File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
new file mode 100644
index 0000000..8550df1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -0,0 +1,193 @@
+#
+# Broadcom device configuration
+#
+
+config NET_VENDOR_BROADCOM
+	bool "Broadcom devices"
+	default y
+	depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
+		   SIBYTE_SB1xxx_SOC
+	---help---
+	  If you have a network (Ethernet) chipset belonging to this class,
+	  say Y.
+
+	  Note that the answer to this question does not directly affect
+	  the kernel: saying N will just case the configurator to skip all
+	  the questions regarding AMD chipsets. If you say Y, you will be asked
+	  for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_BROADCOM
+
+config B44
+	tristate "Broadcom 440x/47xx ethernet support"
+	depends on SSB_POSSIBLE && HAS_DMA
+	select SSB
+	select MII
+	select PHYLIB
+	---help---
+	  If you have a network (Ethernet) controller of this type, say Y
+	  or M here.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called b44.
+
+# Auto-select SSB PCI-HOST support, if possible
+config B44_PCI_AUTOSELECT
+	bool
+	depends on B44 && SSB_PCIHOST_POSSIBLE
+	select SSB_PCIHOST
+	default y
+
+# Auto-select SSB PCICORE driver, if possible
+config B44_PCICORE_AUTOSELECT
+	bool
+	depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
+	select SSB_DRIVER_PCICORE
+	default y
+
+config B44_PCI
+	bool
+	depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
+	default y
+
+config BCM63XX_ENET
+	tristate "Broadcom 63xx internal mac support"
+	depends on BCM63XX
+	select MII
+	select PHYLIB
+	help
+	  This driver supports the ethernet MACs in the Broadcom 63xx
+	  MIPS chipset family (BCM63XX).
+
+config BCMGENET
+	tristate "Broadcom GENET internal MAC support"
+	select MII
+	select PHYLIB
+	select FIXED_PHY
+	select BCM7XXX_PHY
+	help
+	  This driver supports the built-in Ethernet MACs found in the
+	  Broadcom BCM7xxx Set Top Box family chipset.
+
+config BNX2
+	tristate "QLogic bnx2 support"
+	depends on PCI
+	select CRC32
+	select FW_LOADER
+	---help---
+	  This driver supports QLogic bnx2 gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called bnx2.  This is recommended.
+
+config CNIC
+	tristate "QLogic CNIC support"
+	depends on PCI && (IPV6 || IPV6=n)
+	select BNX2
+	select UIO
+	---help---
+	  This driver supports offload features of QLogic bnx2 gigabit
+	  Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cnic.  This is recommended.
+
+config SB1250_MAC
+	tristate "SB1250 Gigabit Ethernet support"
+	depends on SIBYTE_SB1xxx_SOC
+	select PHYLIB
+	---help---
+	  This driver supports Gigabit Ethernet interfaces based on the
+	  Broadcom SiByte family of System-On-a-Chip parts.  They include
+	  the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
+	  and BCM1480 chips.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sb1250-mac.
+
+config TIGON3
+	tristate "Broadcom Tigon3 support"
+	depends on PCI
+	select PHYLIB
+	select HWMON
+	select PTP_1588_CLOCK
+	---help---
+	  This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called tg3.  This is recommended.
+
+config BNX2X
+	tristate "Broadcom NetXtremeII 10Gb support"
+	depends on PCI
+	select PTP_1588_CLOCK
+	select FW_LOADER
+	select ZLIB_INFLATE
+	select LIBCRC32C
+	select MDIO
+	---help---
+	  This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
+	  To compile this driver as a module, choose M here: the module
+	  will be called bnx2x.  This is recommended.
+
+config BNX2X_SRIOV
+	bool "Broadcom 578xx and 57712 SR-IOV support"
+	depends on BNX2X && PCI_IOV
+	default y
+	---help---
+	  This configuration parameter enables Single Root Input Output
+	  Virtualization support in the 578xx and 57712 products. This
+	  allows for virtual function acceleration in virtual environments.
+
+config BNX2X_VXLAN
+	bool "Virtual eXtensible Local Area Network support"
+	default n
+	depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+	---help---
+	  This enables hardward offload support for VXLAN protocol over the
+	  NetXtremeII series adapters.
+	  Say Y here if you want to enable hardware offload support for
+	  Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
+config BGMAC
+	tristate "BCMA bus GBit core support"
+	depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
+	select PHYLIB
+	---help---
+	  This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
+	  They can be found on BCM47xx SoCs and provide gigabit ethernet.
+	  In case of using this driver on BCM4706 it's also requires to enable
+	  BCMA_DRIVER_GMAC_CMN to make it work.
+
+config SYSTEMPORT
+	tristate "Broadcom SYSTEMPORT internal MAC support"
+	depends on OF
+	select MII
+	select PHYLIB
+	select FIXED_PHY
+	help
+	  This driver supports the built-in Ethernet MACs found in the
+	  Broadcom BCM7xxx Set Top Box family chipset using an internal
+	  Ethernet switch.
+
+config BNXT
+	tristate "Broadcom NetXtreme-C/E support"
+	depends on PCI
+	depends on VXLAN || VXLAN=n
+	select FW_LOADER
+	select LIBCRC32C
+	---help---
+	  This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+	  Ethernet cards.  To compile this driver as a module, choose M here:
+	  the module will be called bnxt_en.  This is recommended.
+
+config BNXT_SRIOV
+	bool "Broadcom NetXtreme-C/E SR-IOV support"
+	depends on BNXT && PCI_IOV
+	default y
+	---help---
+	  This configuration parameter enables Single Root Input Output
+	  Virtualization support in the NetXtreme-C/E products. This
+	  allows for virtual function acceleration in virtual environments.
+
+endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
new file mode 100644
index 0000000..00584d7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the Broadcom network device drivers.
+#
+
+obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BCMGENET) += genet/
+obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
+obj-$(CONFIG_BNX2X) += bnx2x/
+obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
+obj-$(CONFIG_TIGON3) += tg3.o
+obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
new file mode 100644
index 0000000..a3b1c07
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -0,0 +1,2615 @@
+/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
+ * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
+ * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
+ * Copyright (C) 2006 Broadcom Corporation.
+ * Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Distribute under GPL.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/ssb/ssb.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+
+#include "b44.h"
+
+#define DRV_MODULE_NAME		"b44"
+#define DRV_MODULE_VERSION	"2.0"
+#define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
+
+#define B44_DEF_MSG_ENABLE	  \
+	(NETIF_MSG_DRV		| \
+	 NETIF_MSG_PROBE	| \
+	 NETIF_MSG_LINK		| \
+	 NETIF_MSG_TIMER	| \
+	 NETIF_MSG_IFDOWN	| \
+	 NETIF_MSG_IFUP		| \
+	 NETIF_MSG_RX_ERR	| \
+	 NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define B44_TX_TIMEOUT			(5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define B44_MIN_MTU			60
+#define B44_MAX_MTU			1500
+
+#define B44_RX_RING_SIZE		512
+#define B44_DEF_RX_RING_PENDING		200
+#define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
+				 B44_RX_RING_SIZE)
+#define B44_TX_RING_SIZE		512
+#define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
+#define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
+				 B44_TX_RING_SIZE)
+
+#define TX_RING_GAP(BP)	\
+	(B44_TX_RING_SIZE - (BP)->tx_pending)
+#define TX_BUFFS_AVAIL(BP)						\
+	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
+	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
+	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
+#define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
+
+#define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
+#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
+
+/* b44 internal pattern match filter info */
+#define B44_PATTERN_BASE	0x400
+#define B44_PATTERN_SIZE	0x80
+#define B44_PMASK_BASE		0x600
+#define B44_PMASK_SIZE		0x10
+#define B44_MAX_PATTERNS	16
+#define B44_ETHIPV6UDP_HLEN	62
+#define B44_ETHIPV4UDP_HLEN	42
+
+MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
+module_param(b44_debug, int, 0);
+MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
+
+
+#ifdef CONFIG_B44_PCI
+static const struct pci_device_id b44_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
+	{ 0 } /* terminate list with empty entry */
+};
+MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
+
+static struct pci_driver b44_pci_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= b44_pci_tbl,
+};
+#endif /* CONFIG_B44_PCI */
+
+static const struct ssb_device_id b44_ssb_tbl[] = {
+	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
+	{},
+};
+MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
+
+static void b44_halt(struct b44 *);
+static void b44_init_rings(struct b44 *);
+
+#define B44_FULL_RESET		1
+#define B44_FULL_RESET_SKIP_PHY	2
+#define B44_PARTIAL_RESET	3
+#define B44_CHIP_RESET_FULL	4
+#define B44_CHIP_RESET_PARTIAL	5
+
+static void b44_init_hw(struct b44 *, int);
+
+static int dma_desc_sync_size;
+static int instance;
+
+static const char b44_gstrings[][ETH_GSTRING_LEN] = {
+#define _B44(x...)	# x,
+B44_STAT_REG_DECLARE
+#undef _B44
+};
+
+static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
+						dma_addr_t dma_base,
+						unsigned long offset,
+						enum dma_data_direction dir)
+{
+	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
+				   dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
+					     dma_addr_t dma_base,
+					     unsigned long offset,
+					     enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
+				dma_desc_sync_size, dir);
+}
+
+static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+{
+	return ssb_read32(bp->sdev, reg);
+}
+
+static inline void bw32(const struct b44 *bp,
+			unsigned long reg, unsigned long val)
+{
+	ssb_write32(bp->sdev, reg, val);
+}
+
+static int b44_wait_bit(struct b44 *bp, unsigned long reg,
+			u32 bit, unsigned long timeout, const int clear)
+{
+	unsigned long i;
+
+	for (i = 0; i < timeout; i++) {
+		u32 val = br32(bp, reg);
+
+		if (clear && !(val & bit))
+			break;
+		if (!clear && (val & bit))
+			break;
+		udelay(10);
+	}
+	if (i == timeout) {
+		if (net_ratelimit())
+			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
+				   bit, reg, clear ? "clear" : "set");
+
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
+{
+	u32 val;
+
+	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
+			    (index << CAM_CTRL_INDEX_SHIFT)));
+
+	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+
+	val = br32(bp, B44_CAM_DATA_LO);
+
+	data[2] = (val >> 24) & 0xFF;
+	data[3] = (val >> 16) & 0xFF;
+	data[4] = (val >> 8) & 0xFF;
+	data[5] = (val >> 0) & 0xFF;
+
+	val = br32(bp, B44_CAM_DATA_HI);
+
+	data[0] = (val >> 8) & 0xFF;
+	data[1] = (val >> 0) & 0xFF;
+}
+
+static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+{
+	u32 val;
+
+	val  = ((u32) data[2]) << 24;
+	val |= ((u32) data[3]) << 16;
+	val |= ((u32) data[4]) <<  8;
+	val |= ((u32) data[5]) <<  0;
+	bw32(bp, B44_CAM_DATA_LO, val);
+	val = (CAM_DATA_HI_VALID |
+	       (((u32) data[0]) << 8) |
+	       (((u32) data[1]) << 0));
+	bw32(bp, B44_CAM_DATA_HI, val);
+	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
+			    (index << CAM_CTRL_INDEX_SHIFT)));
+	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+}
+
+static inline void __b44_disable_ints(struct b44 *bp)
+{
+	bw32(bp, B44_IMASK, 0);
+}
+
+static void b44_disable_ints(struct b44 *bp)
+{
+	__b44_disable_ints(bp);
+
+	/* Flush posted writes. */
+	br32(bp, B44_IMASK);
+}
+
+static void b44_enable_ints(struct b44 *bp)
+{
+	bw32(bp, B44_IMASK, bp->imask);
+}
+
+static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
+{
+	int err;
+
+	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
+			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
+			     (reg << MDIO_DATA_RA_SHIFT) |
+			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
+	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
+
+	return err;
+}
+
+static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
+{
+	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
+			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
+			     (reg << MDIO_DATA_RA_SHIFT) |
+			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
+			     (val & MDIO_DATA_DATA)));
+	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+}
+
+static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
+{
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		return 0;
+
+	return __b44_readphy(bp, bp->phy_addr, reg, val);
+}
+
+static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
+{
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		return 0;
+
+	return __b44_writephy(bp, bp->phy_addr, reg, val);
+}
+
+/* miilib interface */
+static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
+{
+	u32 val;
+	struct b44 *bp = netdev_priv(dev);
+	int rc = __b44_readphy(bp, phy_id, location, &val);
+	if (rc)
+		return 0xffffffff;
+	return val;
+}
+
+static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
+			       int val)
+{
+	struct b44 *bp = netdev_priv(dev);
+	__b44_writephy(bp, phy_id, location, val);
+}
+
+static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
+{
+	u32 val;
+	struct b44 *bp = bus->priv;
+	int rc = __b44_readphy(bp, phy_id, location, &val);
+	if (rc)
+		return 0xffffffff;
+	return val;
+}
+
+static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
+				 u16 val)
+{
+	struct b44 *bp = bus->priv;
+	return __b44_writephy(bp, phy_id, location, val);
+}
+
+static int b44_phy_reset(struct b44 *bp)
+{
+	u32 val;
+	int err;
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		return 0;
+	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
+	if (err)
+		return err;
+	udelay(100);
+	err = b44_readphy(bp, MII_BMCR, &val);
+	if (!err) {
+		if (val & BMCR_RESET) {
+			netdev_err(bp->dev, "PHY Reset would not complete\n");
+			err = -ENODEV;
+		}
+	}
+
+	return err;
+}
+
+static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
+{
+	u32 val;
+
+	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
+	bp->flags |= pause_flags;
+
+	val = br32(bp, B44_RXCONFIG);
+	if (pause_flags & B44_FLAG_RX_PAUSE)
+		val |= RXCONFIG_FLOW;
+	else
+		val &= ~RXCONFIG_FLOW;
+	bw32(bp, B44_RXCONFIG, val);
+
+	val = br32(bp, B44_MAC_FLOW);
+	if (pause_flags & B44_FLAG_TX_PAUSE)
+		val |= (MAC_FLOW_PAUSE_ENAB |
+			(0xc0 & MAC_FLOW_RX_HI_WATER));
+	else
+		val &= ~MAC_FLOW_PAUSE_ENAB;
+	bw32(bp, B44_MAC_FLOW, val);
+}
+
+static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
+{
+	u32 pause_enab = 0;
+
+	/* The driver supports only rx pause by default because
+	   the b44 mac tx pause mechanism generates excessive
+	   pause frames.
+	   Use ethtool to turn on b44 tx pause if necessary.
+	 */
+	if ((local & ADVERTISE_PAUSE_CAP) &&
+	    (local & ADVERTISE_PAUSE_ASYM)){
+		if ((remote & LPA_PAUSE_ASYM) &&
+		    !(remote & LPA_PAUSE_CAP))
+			pause_enab |= B44_FLAG_RX_PAUSE;
+	}
+
+	__b44_set_flow_ctrl(bp, pause_enab);
+}
+
+#ifdef CONFIG_BCM47XX
+#include <linux/bcm47xx_nvram.h>
+static void b44_wap54g10_workaround(struct b44 *bp)
+{
+	char buf[20];
+	u32 val;
+	int err;
+
+	/*
+	 * workaround for bad hardware design in Linksys WAP54G v1.0
+	 * see https://dev.openwrt.org/ticket/146
+	 * check and reset bit "isolate"
+	 */
+	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+		return;
+	if (simple_strtoul(buf, NULL, 0) == 2) {
+		err = __b44_readphy(bp, 0, MII_BMCR, &val);
+		if (err)
+			goto error;
+		if (!(val & BMCR_ISOLATE))
+			return;
+		val &= ~BMCR_ISOLATE;
+		err = __b44_writephy(bp, 0, MII_BMCR, val);
+		if (err)
+			goto error;
+	}
+	return;
+error:
+	pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
+}
+#else
+static inline void b44_wap54g10_workaround(struct b44 *bp)
+{
+}
+#endif
+
+static int b44_setup_phy(struct b44 *bp)
+{
+	u32 val;
+	int err;
+
+	b44_wap54g10_workaround(bp);
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		return 0;
+	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
+		goto out;
+	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
+				val & MII_ALEDCTRL_ALLMSK)) != 0)
+		goto out;
+	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
+		goto out;
+	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
+				val | MII_TLEDCTRL_ENABLE)) != 0)
+		goto out;
+
+	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
+		u32 adv = ADVERTISE_CSMA;
+
+		if (bp->flags & B44_FLAG_ADV_10HALF)
+			adv |= ADVERTISE_10HALF;
+		if (bp->flags & B44_FLAG_ADV_10FULL)
+			adv |= ADVERTISE_10FULL;
+		if (bp->flags & B44_FLAG_ADV_100HALF)
+			adv |= ADVERTISE_100HALF;
+		if (bp->flags & B44_FLAG_ADV_100FULL)
+			adv |= ADVERTISE_100FULL;
+
+		if (bp->flags & B44_FLAG_PAUSE_AUTO)
+			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
+			goto out;
+		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
+						       BMCR_ANRESTART))) != 0)
+			goto out;
+	} else {
+		u32 bmcr;
+
+		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
+			goto out;
+		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
+		if (bp->flags & B44_FLAG_100_BASE_T)
+			bmcr |= BMCR_SPEED100;
+		if (bp->flags & B44_FLAG_FULL_DUPLEX)
+			bmcr |= BMCR_FULLDPLX;
+		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
+			goto out;
+
+		/* Since we will not be negotiating there is no safe way
+		 * to determine if the link partner supports flow control
+		 * or not.  So just disable it completely in this case.
+		 */
+		b44_set_flow_ctrl(bp, 0, 0);
+	}
+
+out:
+	return err;
+}
+
+static void b44_stats_update(struct b44 *bp)
+{
+	unsigned long reg;
+	u64 *val;
+
+	val = &bp->hw_stats.tx_good_octets;
+	u64_stats_update_begin(&bp->hw_stats.syncp);
+
+	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
+		*val++ += br32(bp, reg);
+	}
+
+	/* Pad */
+	reg += 8*4UL;
+
+	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
+		*val++ += br32(bp, reg);
+	}
+
+	u64_stats_update_end(&bp->hw_stats.syncp);
+}
+
+static void b44_link_report(struct b44 *bp)
+{
+	if (!netif_carrier_ok(bp->dev)) {
+		netdev_info(bp->dev, "Link is down\n");
+	} else {
+		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
+			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
+			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
+
+		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
+			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
+			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
+	}
+}
+
+static void b44_check_phy(struct b44 *bp)
+{
+	u32 bmsr, aux;
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		bp->flags |= B44_FLAG_100_BASE_T;
+		if (!netif_carrier_ok(bp->dev)) {
+			u32 val = br32(bp, B44_TX_CTRL);
+			if (bp->flags & B44_FLAG_FULL_DUPLEX)
+				val |= TX_CTRL_DUPLEX;
+			else
+				val &= ~TX_CTRL_DUPLEX;
+			bw32(bp, B44_TX_CTRL, val);
+			netif_carrier_on(bp->dev);
+			b44_link_report(bp);
+		}
+		return;
+	}
+
+	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
+	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
+	    (bmsr != 0xffff)) {
+		if (aux & MII_AUXCTRL_SPEED)
+			bp->flags |= B44_FLAG_100_BASE_T;
+		else
+			bp->flags &= ~B44_FLAG_100_BASE_T;
+		if (aux & MII_AUXCTRL_DUPLEX)
+			bp->flags |= B44_FLAG_FULL_DUPLEX;
+		else
+			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+
+		if (!netif_carrier_ok(bp->dev) &&
+		    (bmsr & BMSR_LSTATUS)) {
+			u32 val = br32(bp, B44_TX_CTRL);
+			u32 local_adv, remote_adv;
+
+			if (bp->flags & B44_FLAG_FULL_DUPLEX)
+				val |= TX_CTRL_DUPLEX;
+			else
+				val &= ~TX_CTRL_DUPLEX;
+			bw32(bp, B44_TX_CTRL, val);
+
+			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
+			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
+			    !b44_readphy(bp, MII_LPA, &remote_adv))
+				b44_set_flow_ctrl(bp, local_adv, remote_adv);
+
+			/* Link now up */
+			netif_carrier_on(bp->dev);
+			b44_link_report(bp);
+		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
+			/* Link now down */
+			netif_carrier_off(bp->dev);
+			b44_link_report(bp);
+		}
+
+		if (bmsr & BMSR_RFAULT)
+			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
+		if (bmsr & BMSR_JCD)
+			netdev_warn(bp->dev, "Jabber detected in PHY\n");
+	}
+}
+
+static void b44_timer(unsigned long __opaque)
+{
+	struct b44 *bp = (struct b44 *) __opaque;
+
+	spin_lock_irq(&bp->lock);
+
+	b44_check_phy(bp);
+
+	b44_stats_update(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
+}
+
+static void b44_tx(struct b44 *bp)
+{
+	u32 cur, cons;
+	unsigned bytes_compl = 0, pkts_compl = 0;
+
+	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
+	cur /= sizeof(struct dma_desc);
+
+	/* XXX needs updating when NETIF_F_SG is supported */
+	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
+		struct ring_info *rp = &bp->tx_buffers[cons];
+		struct sk_buff *skb = rp->skb;
+
+		BUG_ON(skb == NULL);
+
+		dma_unmap_single(bp->sdev->dma_dev,
+				 rp->mapping,
+				 skb->len,
+				 DMA_TO_DEVICE);
+		rp->skb = NULL;
+
+		bytes_compl += skb->len;
+		pkts_compl++;
+
+		dev_kfree_skb_irq(skb);
+	}
+
+	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
+	bp->tx_cons = cons;
+	if (netif_queue_stopped(bp->dev) &&
+	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
+		netif_wake_queue(bp->dev);
+
+	bw32(bp, B44_GPTIMER, 0);
+}
+
+/* Works like this.  This chip writes a 'struct rx_header" 30 bytes
+ * before the DMA address you give it.  So we allocate 30 more bytes
+ * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
+ * point the chip at 30 bytes past where the rx_header will go.
+ */
+static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+	struct dma_desc *dp;
+	struct ring_info *src_map, *map;
+	struct rx_header *rh;
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int dest_idx;
+	u32 ctrl;
+
+	src_map = NULL;
+	if (src_idx >= 0)
+		src_map = &bp->rx_buffers[src_idx];
+	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+	map = &bp->rx_buffers[dest_idx];
+	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+				 RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
+
+	/* Hardware bug work-around, the chip is unable to do PCI DMA
+	   to/from anything above 1GB :-( */
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+		/* Sigh... */
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping,
+					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
+		if (skb == NULL)
+			return -ENOMEM;
+		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+					 RX_PKT_BUF_SZ,
+					 DMA_FROM_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+		bp->force_copybreak = 1;
+	}
+
+	rh = (struct rx_header *) skb->data;
+
+	rh->len = 0;
+	rh->flags = 0;
+
+	map->skb = skb;
+	map->mapping = mapping;
+
+	if (src_map != NULL)
+		src_map->skb = NULL;
+
+	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
+	if (dest_idx == (B44_RX_RING_SIZE - 1))
+		ctrl |= DESC_CTRL_EOT;
+
+	dp = &bp->rx_ring[dest_idx];
+	dp->ctrl = cpu_to_le32(ctrl);
+	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
+			                    dest_idx * sizeof(*dp),
+			                    DMA_BIDIRECTIONAL);
+
+	return RX_PKT_BUF_SZ;
+}
+
+static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+	struct dma_desc *src_desc, *dest_desc;
+	struct ring_info *src_map, *dest_map;
+	struct rx_header *rh;
+	int dest_idx;
+	__le32 ctrl;
+
+	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+	dest_desc = &bp->rx_ring[dest_idx];
+	dest_map = &bp->rx_buffers[dest_idx];
+	src_desc = &bp->rx_ring[src_idx];
+	src_map = &bp->rx_buffers[src_idx];
+
+	dest_map->skb = src_map->skb;
+	rh = (struct rx_header *) src_map->skb->data;
+	rh->len = 0;
+	rh->flags = 0;
+	dest_map->mapping = src_map->mapping;
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
+			                 src_idx * sizeof(*src_desc),
+			                 DMA_BIDIRECTIONAL);
+
+	ctrl = src_desc->ctrl;
+	if (dest_idx == (B44_RX_RING_SIZE - 1))
+		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
+	else
+		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
+
+	dest_desc->ctrl = ctrl;
+	dest_desc->addr = src_desc->addr;
+
+	src_map->skb = NULL;
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
+					     dest_idx * sizeof(*dest_desc),
+					     DMA_BIDIRECTIONAL);
+
+	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
+				   RX_PKT_BUF_SZ,
+				   DMA_FROM_DEVICE);
+}
+
+static int b44_rx(struct b44 *bp, int budget)
+{
+	int received;
+	u32 cons, prod;
+
+	received = 0;
+	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
+	prod /= sizeof(struct dma_desc);
+	cons = bp->rx_cons;
+
+	while (cons != prod && budget > 0) {
+		struct ring_info *rp = &bp->rx_buffers[cons];
+		struct sk_buff *skb = rp->skb;
+		dma_addr_t map = rp->mapping;
+		struct rx_header *rh;
+		u16 len;
+
+		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
+					RX_PKT_BUF_SZ,
+					DMA_FROM_DEVICE);
+		rh = (struct rx_header *) skb->data;
+		len = le16_to_cpu(rh->len);
+		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
+		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
+		drop_it:
+			b44_recycle_rx(bp, cons, bp->rx_prod);
+		drop_it_no_recycle:
+			bp->dev->stats.rx_dropped++;
+			goto next_pkt;
+		}
+
+		if (len == 0) {
+			int i = 0;
+
+			do {
+				udelay(2);
+				barrier();
+				len = le16_to_cpu(rh->len);
+			} while (len == 0 && i++ < 5);
+			if (len == 0)
+				goto drop_it;
+		}
+
+		/* Omit CRC. */
+		len -= 4;
+
+		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
+			int skb_size;
+			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
+			if (skb_size < 0)
+				goto drop_it;
+			dma_unmap_single(bp->sdev->dma_dev, map,
+					 skb_size, DMA_FROM_DEVICE);
+			/* Leave out rx_header */
+			skb_put(skb, len + RX_PKT_OFFSET);
+			skb_pull(skb, RX_PKT_OFFSET);
+		} else {
+			struct sk_buff *copy_skb;
+
+			b44_recycle_rx(bp, cons, bp->rx_prod);
+			copy_skb = napi_alloc_skb(&bp->napi, len);
+			if (copy_skb == NULL)
+				goto drop_it_no_recycle;
+
+			skb_put(copy_skb, len);
+			/* DMA sync done above, copy just the actual packet */
+			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
+							 copy_skb->data, len);
+			skb = copy_skb;
+		}
+		skb_checksum_none_assert(skb);
+		skb->protocol = eth_type_trans(skb, bp->dev);
+		netif_receive_skb(skb);
+		received++;
+		budget--;
+	next_pkt:
+		bp->rx_prod = (bp->rx_prod + 1) &
+			(B44_RX_RING_SIZE - 1);
+		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
+	}
+
+	bp->rx_cons = cons;
+	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
+
+	return received;
+}
+
+static int b44_poll(struct napi_struct *napi, int budget)
+{
+	struct b44 *bp = container_of(napi, struct b44, napi);
+	int work_done;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->lock, flags);
+
+	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
+		/* spin_lock(&bp->tx_lock); */
+		b44_tx(bp);
+		/* spin_unlock(&bp->tx_lock); */
+	}
+	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
+		bp->istat &= ~ISTAT_RFO;
+		b44_disable_ints(bp);
+		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+		b44_init_rings(bp);
+		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+		netif_wake_queue(bp->dev);
+	}
+
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	work_done = 0;
+	if (bp->istat & ISTAT_RX)
+		work_done += b44_rx(bp, budget);
+
+	if (bp->istat & ISTAT_ERRORS) {
+		spin_lock_irqsave(&bp->lock, flags);
+		b44_halt(bp);
+		b44_init_rings(bp);
+		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+		netif_wake_queue(bp->dev);
+		spin_unlock_irqrestore(&bp->lock, flags);
+		work_done = 0;
+	}
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		b44_enable_ints(bp);
+	}
+
+	return work_done;
+}
+
+static irqreturn_t b44_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct b44 *bp = netdev_priv(dev);
+	u32 istat, imask;
+	int handled = 0;
+
+	spin_lock(&bp->lock);
+
+	istat = br32(bp, B44_ISTAT);
+	imask = br32(bp, B44_IMASK);
+
+	/* The interrupt mask register controls which interrupt bits
+	 * will actually raise an interrupt to the CPU when set by hw/firmware,
+	 * but doesn't mask off the bits.
+	 */
+	istat &= imask;
+	if (istat) {
+		handled = 1;
+
+		if (unlikely(!netif_running(dev))) {
+			netdev_info(dev, "late interrupt\n");
+			goto irq_ack;
+		}
+
+		if (napi_schedule_prep(&bp->napi)) {
+			/* NOTE: These writes are posted by the readback of
+			 *       the ISTAT register below.
+			 */
+			bp->istat = istat;
+			__b44_disable_ints(bp);
+			__napi_schedule(&bp->napi);
+		}
+
+irq_ack:
+		bw32(bp, B44_ISTAT, istat);
+		br32(bp, B44_ISTAT);
+	}
+	spin_unlock(&bp->lock);
+	return IRQ_RETVAL(handled);
+}
+
+static void b44_tx_timeout(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	netdev_err(dev, "transmit timed out, resetting\n");
+
+	spin_lock_irq(&bp->lock);
+
+	b44_halt(bp);
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	netif_wake_queue(dev);
+}
+
+static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	int rc = NETDEV_TX_OK;
+	dma_addr_t mapping;
+	u32 len, entry, ctrl;
+	unsigned long flags;
+
+	len = skb->len;
+	spin_lock_irqsave(&bp->lock, flags);
+
+	/* This is a hard error, log it. */
+	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
+		netif_stop_queue(dev);
+		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+		goto err_out;
+	}
+
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+		struct sk_buff *bounce_skb;
+
+		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+					     DMA_TO_DEVICE);
+
+		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
+		if (!bounce_skb)
+			goto err_out;
+
+		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
+					 len, DMA_TO_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping,
+						     len, DMA_TO_DEVICE);
+			dev_kfree_skb_any(bounce_skb);
+			goto err_out;
+		}
+
+		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
+		dev_kfree_skb_any(skb);
+		skb = bounce_skb;
+	}
+
+	entry = bp->tx_prod;
+	bp->tx_buffers[entry].skb = skb;
+	bp->tx_buffers[entry].mapping = mapping;
+
+	ctrl  = (len & DESC_CTRL_LEN);
+	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
+	if (entry == (B44_TX_RING_SIZE - 1))
+		ctrl |= DESC_CTRL_EOT;
+
+	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
+	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+
+	if (bp->flags & B44_FLAG_TX_RING_HACK)
+		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
+			                    entry * sizeof(bp->tx_ring[0]),
+			                    DMA_TO_DEVICE);
+
+	entry = NEXT_TX(entry);
+
+	bp->tx_prod = entry;
+
+	wmb();
+
+	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
+		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+	if (bp->flags & B44_FLAG_REORDER_BUG)
+		br32(bp, B44_DMATX_PTR);
+
+	netdev_sent_queue(dev, skb->len);
+
+	if (TX_BUFFS_AVAIL(bp) < 1)
+		netif_stop_queue(dev);
+
+out_unlock:
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	return rc;
+
+err_out:
+	rc = NETDEV_TX_BUSY;
+	goto out_unlock;
+}
+
+static int b44_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		/* We'll just catch it later when the
+		 * device is up'd.
+		 */
+		dev->mtu = new_mtu;
+		return 0;
+	}
+
+	spin_lock_irq(&bp->lock);
+	b44_halt(bp);
+	dev->mtu = new_mtu;
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	return 0;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  bp->lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void b44_free_rings(struct b44 *bp)
+{
+	struct ring_info *rp;
+	int i;
+
+	for (i = 0; i < B44_RX_RING_SIZE; i++) {
+		rp = &bp->rx_buffers[i];
+
+		if (rp->skb == NULL)
+			continue;
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rp->skb);
+		rp->skb = NULL;
+	}
+
+	/* XXX needs changes once NETIF_F_SG is set... */
+	for (i = 0; i < B44_TX_RING_SIZE; i++) {
+		rp = &bp->tx_buffers[i];
+
+		if (rp->skb == NULL)
+			continue;
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
+				 DMA_TO_DEVICE);
+		dev_kfree_skb_any(rp->skb);
+		rp->skb = NULL;
+	}
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.
+ */
+static void b44_init_rings(struct b44 *bp)
+{
+	int i;
+
+	b44_free_rings(bp);
+
+	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
+	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+
+	if (bp->flags & B44_FLAG_TX_RING_HACK)
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
+
+	for (i = 0; i < bp->rx_pending; i++) {
+		if (b44_alloc_rx_skb(bp, -1, i) < 0)
+			break;
+	}
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void b44_free_consistent(struct b44 *bp)
+{
+	kfree(bp->rx_buffers);
+	bp->rx_buffers = NULL;
+	kfree(bp->tx_buffers);
+	bp->tx_buffers = NULL;
+	if (bp->rx_ring) {
+		if (bp->flags & B44_FLAG_RX_RING_HACK) {
+			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+			kfree(bp->rx_ring);
+		} else
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->rx_ring, bp->rx_ring_dma);
+		bp->rx_ring = NULL;
+		bp->flags &= ~B44_FLAG_RX_RING_HACK;
+	}
+	if (bp->tx_ring) {
+		if (bp->flags & B44_FLAG_TX_RING_HACK) {
+			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
+			kfree(bp->tx_ring);
+		} else
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->tx_ring, bp->tx_ring_dma);
+		bp->tx_ring = NULL;
+		bp->flags &= ~B44_FLAG_TX_RING_HACK;
+	}
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.  Can sleep.
+ */
+static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
+{
+	int size;
+
+	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
+	bp->rx_buffers = kzalloc(size, gfp);
+	if (!bp->rx_buffers)
+		goto out_err;
+
+	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
+	bp->tx_buffers = kzalloc(size, gfp);
+	if (!bp->tx_buffers)
+		goto out_err;
+
+	size = DMA_TABLE_BYTES;
+	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->rx_ring_dma, gfp);
+	if (!bp->rx_ring) {
+		/* Allocation may have failed due to pci_alloc_consistent
+		   insisting on use of GFP_DMA, which is more restrictive
+		   than necessary...  */
+		struct dma_desc *rx_ring;
+		dma_addr_t rx_ring_dma;
+
+		rx_ring = kzalloc(size, gfp);
+		if (!rx_ring)
+			goto out_err;
+
+		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_BIDIRECTIONAL);
+
+		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
+			rx_ring_dma + size > DMA_BIT_MASK(30)) {
+			kfree(rx_ring);
+			goto out_err;
+		}
+
+		bp->rx_ring = rx_ring;
+		bp->rx_ring_dma = rx_ring_dma;
+		bp->flags |= B44_FLAG_RX_RING_HACK;
+	}
+
+	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->tx_ring_dma, gfp);
+	if (!bp->tx_ring) {
+		/* Allocation may have failed due to ssb_dma_alloc_consistent
+		   insisting on use of GFP_DMA, which is more restrictive
+		   than necessary...  */
+		struct dma_desc *tx_ring;
+		dma_addr_t tx_ring_dma;
+
+		tx_ring = kzalloc(size, gfp);
+		if (!tx_ring)
+			goto out_err;
+
+		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_TO_DEVICE);
+
+		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
+			tx_ring_dma + size > DMA_BIT_MASK(30)) {
+			kfree(tx_ring);
+			goto out_err;
+		}
+
+		bp->tx_ring = tx_ring;
+		bp->tx_ring_dma = tx_ring_dma;
+		bp->flags |= B44_FLAG_TX_RING_HACK;
+	}
+
+	return 0;
+
+out_err:
+	b44_free_consistent(bp);
+	return -ENOMEM;
+}
+
+/* bp->lock is held. */
+static void b44_clear_stats(struct b44 *bp)
+{
+	unsigned long reg;
+
+	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
+		br32(bp, reg);
+	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
+		br32(bp, reg);
+}
+
+/* bp->lock is held. */
+static void b44_chip_reset(struct b44 *bp, int reset_kind)
+{
+	struct ssb_device *sdev = bp->sdev;
+	bool was_enabled;
+
+	was_enabled = ssb_device_is_enabled(bp->sdev);
+
+	ssb_device_enable(bp->sdev, 0);
+	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
+
+	if (was_enabled) {
+		bw32(bp, B44_RCV_LAZY, 0);
+		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
+		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
+		bw32(bp, B44_DMATX_CTRL, 0);
+		bp->tx_prod = bp->tx_cons = 0;
+		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
+			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
+				     100, 0);
+		}
+		bw32(bp, B44_DMARX_CTRL, 0);
+		bp->rx_prod = bp->rx_cons = 0;
+	}
+
+	b44_clear_stats(bp);
+
+	/*
+	 * Don't enable PHY if we are doing a partial reset
+	 * we are probably going to power down
+	 */
+	if (reset_kind == B44_CHIP_RESET_PARTIAL)
+		return;
+
+	switch (sdev->bus->bustype) {
+	case SSB_BUSTYPE_SSB:
+		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
+					B44_MDC_RATIO)
+		     & MDIO_CTRL_MAXF_MASK)));
+		break;
+	case SSB_BUSTYPE_PCI:
+		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+		     (0x0d & MDIO_CTRL_MAXF_MASK)));
+		break;
+	case SSB_BUSTYPE_PCMCIA:
+	case SSB_BUSTYPE_SDIO:
+		WARN_ON(1); /* A device with this bus does not exist. */
+		break;
+	}
+
+	br32(bp, B44_MDIO_CTRL);
+
+	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
+		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
+		br32(bp, B44_ENET_CTRL);
+		bp->flags |= B44_FLAG_EXTERNAL_PHY;
+	} else {
+		u32 val = br32(bp, B44_DEVCTRL);
+
+		if (val & DEVCTRL_EPR) {
+			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
+			br32(bp, B44_DEVCTRL);
+			udelay(100);
+		}
+		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
+	}
+}
+
+/* bp->lock is held. */
+static void b44_halt(struct b44 *bp)
+{
+	b44_disable_ints(bp);
+	/* reset PHY */
+	b44_phy_reset(bp);
+	/* power down PHY */
+	netdev_info(bp->dev, "powering down PHY\n");
+	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
+	/* now reset the chip, but without enabling the MAC&PHY
+	 * part of it. This has to be done _after_ we shut down the PHY */
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+	else
+		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+}
+
+/* bp->lock is held. */
+static void __b44_set_mac_addr(struct b44 *bp)
+{
+	bw32(bp, B44_CAM_CTRL, 0);
+	if (!(bp->dev->flags & IFF_PROMISC)) {
+		u32 val;
+
+		__b44_cam_write(bp, bp->dev->dev_addr, 0);
+		val = br32(bp, B44_CAM_CTRL);
+		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+	}
+}
+
+static int b44_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	u32 val;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	spin_lock_irq(&bp->lock);
+
+	val = br32(bp, B44_RXCONFIG);
+	if (!(val & RXCONFIG_CAM_ABSENT))
+		__b44_set_mac_addr(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing.  Invoked with bp->lock held.
+ */
+static void __b44_set_rx_mode(struct net_device *);
+static void b44_init_hw(struct b44 *bp, int reset_kind)
+{
+	u32 val;
+
+	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+	if (reset_kind == B44_FULL_RESET) {
+		b44_phy_reset(bp);
+		b44_setup_phy(bp);
+	}
+
+	/* Enable CRC32, set proper LED modes and power on PHY */
+	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
+	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
+
+	/* This sets the MAC address too.  */
+	__b44_set_rx_mode(bp->dev);
+
+	/* MTU + eth header + possible VLAN tag + struct rx_header */
+	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+
+	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
+	if (reset_kind == B44_PARTIAL_RESET) {
+		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
+	} else {
+		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
+		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+
+		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
+		bp->rx_prod = bp->rx_pending;
+
+		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+	}
+
+	val = br32(bp, B44_ENET_CTRL);
+	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+	netdev_reset_queue(bp->dev);
+}
+
+static int b44_open(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	int err;
+
+	err = b44_alloc_consistent(bp, GFP_KERNEL);
+	if (err)
+		goto out;
+
+	napi_enable(&bp->napi);
+
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+
+	b44_check_phy(bp);
+
+	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
+	if (unlikely(err < 0)) {
+		napi_disable(&bp->napi);
+		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+		b44_free_rings(bp);
+		b44_free_consistent(bp);
+		goto out;
+	}
+
+	init_timer(&bp->timer);
+	bp->timer.expires = jiffies + HZ;
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = b44_timer;
+	add_timer(&bp->timer);
+
+	b44_enable_ints(bp);
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		phy_start(bp->phydev);
+
+	netif_start_queue(dev);
+out:
+	return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void b44_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	b44_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
+{
+	u32 i;
+	u32 *pattern = (u32 *) pp;
+
+	for (i = 0; i < bytes; i += sizeof(u32)) {
+		bw32(bp, B44_FILT_ADDR, table_offset + i);
+		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
+	}
+}
+
+static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+{
+	int magicsync = 6;
+	int k, j, len = offset;
+	int ethaddr_bytes = ETH_ALEN;
+
+	memset(ppattern + offset, 0xff, magicsync);
+	for (j = 0; j < magicsync; j++)
+		set_bit(len++, (unsigned long *) pmask);
+
+	for (j = 0; j < B44_MAX_PATTERNS; j++) {
+		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
+			ethaddr_bytes = ETH_ALEN;
+		else
+			ethaddr_bytes = B44_PATTERN_SIZE - len;
+		if (ethaddr_bytes <=0)
+			break;
+		for (k = 0; k< ethaddr_bytes; k++) {
+			ppattern[offset + magicsync +
+				(j * ETH_ALEN) + k] = macaddr[k];
+			set_bit(len++, (unsigned long *) pmask);
+		}
+	}
+	return len - 1;
+}
+
+/* Setup magic packet patterns in the b44 WOL
+ * pattern matching filter.
+ */
+static void b44_setup_pseudo_magicp(struct b44 *bp)
+{
+
+	u32 val;
+	int plen0, plen1, plen2;
+	u8 *pwol_pattern;
+	u8 pwol_mask[B44_PMASK_SIZE];
+
+	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+	if (!pwol_pattern)
+		return;
+
+	/* Ipv4 magic packet pattern - pattern 0.*/
+	memset(pwol_mask, 0, B44_PMASK_SIZE);
+	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+				  B44_ETHIPV4UDP_HLEN);
+
+   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
+   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
+
+	/* Raw ethernet II magic packet pattern - pattern 1 */
+	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+	memset(pwol_mask, 0, B44_PMASK_SIZE);
+	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+				  ETH_HLEN);
+
+   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
+  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+		       B44_PMASK_BASE + B44_PMASK_SIZE);
+
+	/* Ipv6 magic packet pattern - pattern 2 */
+	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+	memset(pwol_mask, 0, B44_PMASK_SIZE);
+	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+				  B44_ETHIPV6UDP_HLEN);
+
+   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
+  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
+
+	kfree(pwol_pattern);
+
+	/* set these pattern's lengths: one less than each real length */
+	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
+	bw32(bp, B44_WKUP_LEN, val);
+
+	/* enable wakeup pattern matching */
+	val = br32(bp, B44_DEVCTRL);
+	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
+
+}
+
+#ifdef CONFIG_B44_PCI
+static void b44_setup_wol_pci(struct b44 *bp)
+{
+	u16 val;
+
+	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
+		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
+		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
+		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
+	}
+}
+#else
+static inline void b44_setup_wol_pci(struct b44 *bp) { }
+#endif /* CONFIG_B44_PCI */
+
+static void b44_setup_wol(struct b44 *bp)
+{
+	u32 val;
+
+	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
+
+	if (bp->flags & B44_FLAG_B0_ANDLATER) {
+
+		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
+
+		val = bp->dev->dev_addr[2] << 24 |
+			bp->dev->dev_addr[3] << 16 |
+			bp->dev->dev_addr[4] << 8 |
+			bp->dev->dev_addr[5];
+		bw32(bp, B44_ADDR_LO, val);
+
+		val = bp->dev->dev_addr[0] << 8 |
+			bp->dev->dev_addr[1];
+		bw32(bp, B44_ADDR_HI, val);
+
+		val = br32(bp, B44_DEVCTRL);
+		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
+
+ 	} else {
+ 		b44_setup_pseudo_magicp(bp);
+ 	}
+	b44_setup_wol_pci(bp);
+}
+
+static int b44_close(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		phy_stop(bp->phydev);
+
+	napi_disable(&bp->napi);
+
+	del_timer_sync(&bp->timer);
+
+	spin_lock_irq(&bp->lock);
+
+	b44_halt(bp);
+	b44_free_rings(bp);
+	netif_carrier_off(dev);
+
+	spin_unlock_irq(&bp->lock);
+
+	free_irq(dev->irq, dev);
+
+	if (bp->flags & B44_FLAG_WOL_ENABLE) {
+		b44_init_hw(bp, B44_PARTIAL_RESET);
+		b44_setup_wol(bp);
+	}
+
+	b44_free_consistent(bp);
+
+	return 0;
+}
+
+static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+					struct rtnl_link_stats64 *nstat)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct b44_hw_stats *hwstat = &bp->hw_stats;
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+
+		/* Convert HW stats into rtnl_link_stats64 stats. */
+		nstat->rx_packets = hwstat->rx_pkts;
+		nstat->tx_packets = hwstat->tx_pkts;
+		nstat->rx_bytes   = hwstat->rx_octets;
+		nstat->tx_bytes   = hwstat->tx_octets;
+		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
+				     hwstat->tx_oversize_pkts +
+				     hwstat->tx_underruns +
+				     hwstat->tx_excessive_cols +
+				     hwstat->tx_late_cols);
+		nstat->multicast  = hwstat->rx_multicast_pkts;
+		nstat->collisions = hwstat->tx_total_cols;
+
+		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+					   hwstat->rx_undersize);
+		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
+		nstat->rx_frame_errors  = hwstat->rx_align_errs;
+		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
+		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
+					   hwstat->rx_oversize_pkts +
+					   hwstat->rx_missed_pkts +
+					   hwstat->rx_crc_align_errs +
+					   hwstat->rx_undersize +
+					   hwstat->rx_crc_errs +
+					   hwstat->rx_align_errs +
+					   hwstat->rx_symbol_errs);
+
+		nstat->tx_aborted_errors = hwstat->tx_underruns;
+#if 0
+		/* Carrier lost counter seems to be broken for some devices */
+		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+#endif
+	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+
+	return nstat;
+}
+
+static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
+{
+	struct netdev_hw_addr *ha;
+	int i, num_ents;
+
+	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
+	i = 0;
+	netdev_for_each_mc_addr(ha, dev) {
+		if (i == num_ents)
+			break;
+		__b44_cam_write(bp, ha->addr, i++ + 1);
+	}
+	return i+1;
+}
+
+static void __b44_set_rx_mode(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 val;
+
+	val = br32(bp, B44_RXCONFIG);
+	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
+	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
+		val |= RXCONFIG_PROMISC;
+		bw32(bp, B44_RXCONFIG, val);
+	} else {
+		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
+		int i = 1;
+
+		__b44_set_mac_addr(bp);
+
+		if ((dev->flags & IFF_ALLMULTI) ||
+		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
+			val |= RXCONFIG_ALLMULTI;
+		else
+			i = __b44_load_mcast(bp, dev);
+
+		for (; i < 64; i++)
+			__b44_cam_write(bp, zero, i);
+
+		bw32(bp, B44_RXCONFIG, val);
+        	val = br32(bp, B44_CAM_CTRL);
+	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+	}
+}
+
+static void b44_set_rx_mode(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	spin_lock_irq(&bp->lock);
+	__b44_set_rx_mode(dev);
+	spin_unlock_irq(&bp->lock);
+}
+
+static u32 b44_get_msglevel(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	return bp->msg_enable;
+}
+
+static void b44_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct b44 *bp = netdev_priv(dev);
+	bp->msg_enable = value;
+}
+
+static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct ssb_bus *bus = bp->sdev->bus;
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	switch (bus->bustype) {
+	case SSB_BUSTYPE_PCI:
+		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+		break;
+	case SSB_BUSTYPE_SSB:
+		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+		break;
+	case SSB_BUSTYPE_PCMCIA:
+	case SSB_BUSTYPE_SDIO:
+		WARN_ON(1); /* A device with this bus does not exist. */
+		break;
+	}
+}
+
+static int b44_nway_reset(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 bmcr;
+	int r;
+
+	spin_lock_irq(&bp->lock);
+	b44_readphy(bp, MII_BMCR, &bmcr);
+	b44_readphy(bp, MII_BMCR, &bmcr);
+	r = -EINVAL;
+	if (bmcr & BMCR_ANENABLE) {
+		b44_writephy(bp, MII_BMCR,
+			     bmcr | BMCR_ANRESTART);
+		r = 0;
+	}
+	spin_unlock_irq(&bp->lock);
+
+	return r;
+}
+
+static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		BUG_ON(!bp->phydev);
+		return phy_ethtool_gset(bp->phydev, cmd);
+	}
+
+	cmd->supported = (SUPPORTED_Autoneg);
+	cmd->supported |= (SUPPORTED_100baseT_Half |
+			  SUPPORTED_100baseT_Full |
+			  SUPPORTED_10baseT_Half |
+			  SUPPORTED_10baseT_Full |
+			  SUPPORTED_MII);
+
+	cmd->advertising = 0;
+	if (bp->flags & B44_FLAG_ADV_10HALF)
+		cmd->advertising |= ADVERTISED_10baseT_Half;
+	if (bp->flags & B44_FLAG_ADV_10FULL)
+		cmd->advertising |= ADVERTISED_10baseT_Full;
+	if (bp->flags & B44_FLAG_ADV_100HALF)
+		cmd->advertising |= ADVERTISED_100baseT_Half;
+	if (bp->flags & B44_FLAG_ADV_100FULL)
+		cmd->advertising |= ADVERTISED_100baseT_Full;
+	cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+	ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
+				    SPEED_100 : SPEED_10));
+	cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+		DUPLEX_FULL : DUPLEX_HALF;
+	cmd->port = 0;
+	cmd->phy_address = bp->phy_addr;
+	cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
+		XCVR_EXTERNAL : XCVR_INTERNAL;
+	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+		AUTONEG_DISABLE : AUTONEG_ENABLE;
+	if (cmd->autoneg == AUTONEG_ENABLE)
+		cmd->advertising |= ADVERTISED_Autoneg;
+	if (!netif_running(dev)){
+		ethtool_cmd_speed_set(cmd, 0);
+		cmd->duplex = 0xff;
+	}
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 speed;
+	int ret;
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		BUG_ON(!bp->phydev);
+		spin_lock_irq(&bp->lock);
+		if (netif_running(dev))
+			b44_setup_phy(bp);
+
+		ret = phy_ethtool_sset(bp->phydev, cmd);
+
+		spin_unlock_irq(&bp->lock);
+
+		return ret;
+	}
+
+	speed = ethtool_cmd_speed(cmd);
+
+	/* We do not support gigabit. */
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (cmd->advertising &
+		    (ADVERTISED_1000baseT_Half |
+		     ADVERTISED_1000baseT_Full))
+			return -EINVAL;
+	} else if ((speed != SPEED_100 &&
+		    speed != SPEED_10) ||
+		   (cmd->duplex != DUPLEX_HALF &&
+		    cmd->duplex != DUPLEX_FULL)) {
+			return -EINVAL;
+	}
+
+	spin_lock_irq(&bp->lock);
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		bp->flags &= ~(B44_FLAG_FORCE_LINK |
+			       B44_FLAG_100_BASE_T |
+			       B44_FLAG_FULL_DUPLEX |
+			       B44_FLAG_ADV_10HALF |
+			       B44_FLAG_ADV_10FULL |
+			       B44_FLAG_ADV_100HALF |
+			       B44_FLAG_ADV_100FULL);
+		if (cmd->advertising == 0) {
+			bp->flags |= (B44_FLAG_ADV_10HALF |
+				      B44_FLAG_ADV_10FULL |
+				      B44_FLAG_ADV_100HALF |
+				      B44_FLAG_ADV_100FULL);
+		} else {
+			if (cmd->advertising & ADVERTISED_10baseT_Half)
+				bp->flags |= B44_FLAG_ADV_10HALF;
+			if (cmd->advertising & ADVERTISED_10baseT_Full)
+				bp->flags |= B44_FLAG_ADV_10FULL;
+			if (cmd->advertising & ADVERTISED_100baseT_Half)
+				bp->flags |= B44_FLAG_ADV_100HALF;
+			if (cmd->advertising & ADVERTISED_100baseT_Full)
+				bp->flags |= B44_FLAG_ADV_100FULL;
+		}
+	} else {
+		bp->flags |= B44_FLAG_FORCE_LINK;
+		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
+		if (speed == SPEED_100)
+			bp->flags |= B44_FLAG_100_BASE_T;
+		if (cmd->duplex == DUPLEX_FULL)
+			bp->flags |= B44_FLAG_FULL_DUPLEX;
+	}
+
+	if (netif_running(dev))
+		b44_setup_phy(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	return 0;
+}
+
+static void b44_get_ringparam(struct net_device *dev,
+			      struct ethtool_ringparam *ering)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
+	ering->rx_pending = bp->rx_pending;
+
+	/* XXX ethtool lacks a tx_max_pending, oops... */
+}
+
+static int b44_set_ringparam(struct net_device *dev,
+			     struct ethtool_ringparam *ering)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
+	    (ering->rx_mini_pending != 0) ||
+	    (ering->rx_jumbo_pending != 0) ||
+	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
+		return -EINVAL;
+
+	spin_lock_irq(&bp->lock);
+
+	bp->rx_pending = ering->rx_pending;
+	bp->tx_pending = ering->tx_pending;
+
+	b44_halt(bp);
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+	netif_wake_queue(bp->dev);
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	return 0;
+}
+
+static void b44_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	epause->autoneg =
+		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
+	epause->rx_pause =
+		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
+	epause->tx_pause =
+		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
+}
+
+static int b44_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	spin_lock_irq(&bp->lock);
+	if (epause->autoneg)
+		bp->flags |= B44_FLAG_PAUSE_AUTO;
+	else
+		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
+	if (epause->rx_pause)
+		bp->flags |= B44_FLAG_RX_PAUSE;
+	else
+		bp->flags &= ~B44_FLAG_RX_PAUSE;
+	if (epause->tx_pause)
+		bp->flags |= B44_FLAG_TX_PAUSE;
+	else
+		bp->flags &= ~B44_FLAG_TX_PAUSE;
+	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+		b44_halt(bp);
+		b44_init_rings(bp);
+		b44_init_hw(bp, B44_FULL_RESET);
+	} else {
+		__b44_set_flow_ctrl(bp, bp->flags);
+	}
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	return 0;
+}
+
+static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	switch(stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
+		break;
+	}
+}
+
+static int b44_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(b44_gstrings);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void b44_get_ethtool_stats(struct net_device *dev,
+				  struct ethtool_stats *stats, u64 *data)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct b44_hw_stats *hwstat = &bp->hw_stats;
+	u64 *data_src, *data_dst;
+	unsigned int start;
+	u32 i;
+
+	spin_lock_irq(&bp->lock);
+	b44_stats_update(bp);
+	spin_unlock_irq(&bp->lock);
+
+	do {
+		data_src = &hwstat->tx_good_octets;
+		data_dst = data;
+		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+
+		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+			*data_dst++ = *data_src++;
+
+	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+}
+
+static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	wol->supported = WAKE_MAGIC;
+	if (bp->flags & B44_FLAG_WOL_ENABLE)
+		wol->wolopts = WAKE_MAGIC;
+	else
+		wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	spin_lock_irq(&bp->lock);
+	if (wol->wolopts & WAKE_MAGIC)
+		bp->flags |= B44_FLAG_WOL_ENABLE;
+	else
+		bp->flags &= ~B44_FLAG_WOL_ENABLE;
+	spin_unlock_irq(&bp->lock);
+
+	device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
+	return 0;
+}
+
+static const struct ethtool_ops b44_ethtool_ops = {
+	.get_drvinfo		= b44_get_drvinfo,
+	.get_settings		= b44_get_settings,
+	.set_settings		= b44_set_settings,
+	.nway_reset		= b44_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_wol		= b44_get_wol,
+	.set_wol		= b44_set_wol,
+	.get_ringparam		= b44_get_ringparam,
+	.set_ringparam		= b44_set_ringparam,
+	.get_pauseparam		= b44_get_pauseparam,
+	.set_pauseparam		= b44_set_pauseparam,
+	.get_msglevel		= b44_get_msglevel,
+	.set_msglevel		= b44_set_msglevel,
+	.get_strings		= b44_get_strings,
+	.get_sset_count		= b44_get_sset_count,
+	.get_ethtool_stats	= b44_get_ethtool_stats,
+};
+
+static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct b44 *bp = netdev_priv(dev);
+	int err = -EINVAL;
+
+	if (!netif_running(dev))
+		goto out;
+
+	spin_lock_irq(&bp->lock);
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		BUG_ON(!bp->phydev);
+		err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+	} else {
+		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
+	}
+	spin_unlock_irq(&bp->lock);
+out:
+	return err;
+}
+
+static int b44_get_invariants(struct b44 *bp)
+{
+	struct ssb_device *sdev = bp->sdev;
+	int err = 0;
+	u8 *addr;
+
+	bp->dma_offset = ssb_dma_translation(sdev);
+
+	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
+	    instance > 1) {
+		addr = sdev->bus->sprom.et1mac;
+		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
+	} else {
+		addr = sdev->bus->sprom.et0mac;
+		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
+	}
+	/* Some ROMs have buggy PHY addresses with the high
+	 * bits set (sign extension?). Truncate them to a
+	 * valid PHY address. */
+	bp->phy_addr &= 0x1F;
+
+	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
+
+	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
+		pr_err("Invalid MAC address found in EEPROM\n");
+		return -EINVAL;
+	}
+
+	bp->imask = IMASK_DEF;
+
+	/* XXX - really required?
+	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
+	*/
+
+	if (bp->sdev->id.revision >= 7)
+		bp->flags |= B44_FLAG_B0_ANDLATER;
+
+	return err;
+}
+
+static const struct net_device_ops b44_netdev_ops = {
+	.ndo_open		= b44_open,
+	.ndo_stop		= b44_close,
+	.ndo_start_xmit		= b44_start_xmit,
+	.ndo_get_stats64	= b44_get_stats64,
+	.ndo_set_rx_mode	= b44_set_rx_mode,
+	.ndo_set_mac_address	= b44_set_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= b44_ioctl,
+	.ndo_tx_timeout		= b44_tx_timeout,
+	.ndo_change_mtu		= b44_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= b44_poll_controller,
+#endif
+};
+
+static void b44_adjust_link(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct phy_device *phydev = bp->phydev;
+	bool status_changed = 0;
+
+	BUG_ON(!phydev);
+
+	if (bp->old_link != phydev->link) {
+		status_changed = 1;
+		bp->old_link = phydev->link;
+	}
+
+	/* reflect duplex change */
+	if (phydev->link) {
+		if ((phydev->duplex == DUPLEX_HALF) &&
+		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
+			status_changed = 1;
+			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+		} else if ((phydev->duplex == DUPLEX_FULL) &&
+			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
+			status_changed = 1;
+			bp->flags |= B44_FLAG_FULL_DUPLEX;
+		}
+	}
+
+	if (status_changed) {
+		u32 val = br32(bp, B44_TX_CTRL);
+		if (bp->flags & B44_FLAG_FULL_DUPLEX)
+			val |= TX_CTRL_DUPLEX;
+		else
+			val &= ~TX_CTRL_DUPLEX;
+		bw32(bp, B44_TX_CTRL, val);
+		phy_print_status(phydev);
+	}
+}
+
+static int b44_register_phy_one(struct b44 *bp)
+{
+	struct mii_bus *mii_bus;
+	struct ssb_device *sdev = bp->sdev;
+	struct phy_device *phydev;
+	char bus_id[MII_BUS_ID_SIZE + 3];
+	struct ssb_sprom *sprom = &sdev->bus->sprom;
+	int err;
+
+	mii_bus = mdiobus_alloc();
+	if (!mii_bus) {
+		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mii_bus->priv = bp;
+	mii_bus->read = b44_mdio_read_phylib;
+	mii_bus->write = b44_mdio_write_phylib;
+	mii_bus->name = "b44_eth_mii";
+	mii_bus->parent = sdev->dev;
+	mii_bus->phy_mask = ~(1 << bp->phy_addr);
+	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
+	mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!mii_bus->irq) {
+		dev_err(sdev->dev, "mii_bus irq allocation failed\n");
+		err = -ENOMEM;
+		goto err_out_mdiobus;
+	}
+
+	memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
+
+	bp->mii_bus = mii_bus;
+
+	err = mdiobus_register(mii_bus);
+	if (err) {
+		dev_err(sdev->dev, "failed to register MII bus\n");
+		goto err_out_mdiobus_irq;
+	}
+
+	if (!bp->mii_bus->phy_map[bp->phy_addr] &&
+	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
+
+		dev_info(sdev->dev,
+			 "could not find PHY at %i, use fixed one\n",
+			 bp->phy_addr);
+
+		bp->phy_addr = 0;
+		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
+			 bp->phy_addr);
+	} else {
+		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+			 bp->phy_addr);
+	}
+
+	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
+			     PHY_INTERFACE_MODE_MII);
+	if (IS_ERR(phydev)) {
+		dev_err(sdev->dev, "could not attach PHY at %i\n",
+			bp->phy_addr);
+		err = PTR_ERR(phydev);
+		goto err_out_mdiobus_unregister;
+	}
+
+	/* mask with MAC supported features */
+	phydev->supported &= (SUPPORTED_100baseT_Half |
+			      SUPPORTED_100baseT_Full |
+			      SUPPORTED_Autoneg |
+			      SUPPORTED_MII);
+	phydev->advertising = phydev->supported;
+
+	bp->phydev = phydev;
+	bp->old_link = 0;
+	bp->phy_addr = phydev->addr;
+
+	dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+		 phydev->drv->name, dev_name(&phydev->dev));
+
+	return 0;
+
+err_out_mdiobus_unregister:
+	mdiobus_unregister(mii_bus);
+
+err_out_mdiobus_irq:
+	kfree(mii_bus->irq);
+
+err_out_mdiobus:
+	mdiobus_free(mii_bus);
+
+err_out:
+	return err;
+}
+
+static void b44_unregister_phy_one(struct b44 *bp)
+{
+	struct mii_bus *mii_bus = bp->mii_bus;
+
+	phy_disconnect(bp->phydev);
+	mdiobus_unregister(mii_bus);
+	kfree(mii_bus->irq);
+	mdiobus_free(mii_bus);
+}
+
+static int b44_init_one(struct ssb_device *sdev,
+			const struct ssb_device_id *ent)
+{
+	struct net_device *dev;
+	struct b44 *bp;
+	int err;
+
+	instance++;
+
+	pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
+
+	dev = alloc_etherdev(sizeof(*bp));
+	if (!dev) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	SET_NETDEV_DEV(dev, sdev->dev);
+
+	/* No interesting netdevice features in this card... */
+	dev->features |= 0;
+
+	bp = netdev_priv(dev);
+	bp->sdev = sdev;
+	bp->dev = dev;
+	bp->force_copybreak = 0;
+
+	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
+
+	spin_lock_init(&bp->lock);
+
+	bp->rx_pending = B44_DEF_RX_RING_PENDING;
+	bp->tx_pending = B44_DEF_TX_RING_PENDING;
+
+	dev->netdev_ops = &b44_netdev_ops;
+	netif_napi_add(dev, &bp->napi, b44_poll, 64);
+	dev->watchdog_timeo = B44_TX_TIMEOUT;
+	dev->irq = sdev->irq;
+	dev->ethtool_ops = &b44_ethtool_ops;
+
+	err = ssb_bus_powerup(sdev->bus, 0);
+	if (err) {
+		dev_err(sdev->dev,
+			"Failed to powerup the bus\n");
+		goto err_out_free_dev;
+	}
+
+	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
+		dev_err(sdev->dev,
+			"Required 30BIT DMA mask unsupported by the system\n");
+		goto err_out_powerdown;
+	}
+
+	err = b44_get_invariants(bp);
+	if (err) {
+		dev_err(sdev->dev,
+			"Problem fetching invariants of chip, aborting\n");
+		goto err_out_powerdown;
+	}
+
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
+		err = -ENODEV;
+		goto err_out_powerdown;
+	}
+
+	bp->mii_if.dev = dev;
+	bp->mii_if.mdio_read = b44_mdio_read_mii;
+	bp->mii_if.mdio_write = b44_mdio_write_mii;
+	bp->mii_if.phy_id = bp->phy_addr;
+	bp->mii_if.phy_id_mask = 0x1f;
+	bp->mii_if.reg_num_mask = 0x1f;
+
+	/* By default, advertise all speed/duplex settings. */
+	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
+		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
+
+	/* By default, auto-negotiate PAUSE. */
+	bp->flags |= B44_FLAG_PAUSE_AUTO;
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(sdev->dev, "Cannot register net device, aborting\n");
+		goto err_out_powerdown;
+	}
+
+	netif_carrier_off(dev);
+
+	ssb_set_drvdata(sdev, dev);
+
+	/* Chip reset provides power to the b44 MAC & PCI cores, which
+	 * is necessary for MAC register access.
+	 */
+	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+
+	/* do a phy reset to test if there is an active phy */
+	err = b44_phy_reset(bp);
+	if (err < 0) {
+		dev_err(sdev->dev, "phy reset failed\n");
+		goto err_out_unregister_netdev;
+	}
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		err = b44_register_phy_one(bp);
+		if (err) {
+			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
+			goto err_out_unregister_netdev;
+		}
+	}
+
+	device_set_wakeup_capable(sdev->dev, true);
+	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
+
+	return 0;
+
+err_out_unregister_netdev:
+	unregister_netdev(dev);
+err_out_powerdown:
+	ssb_bus_may_powerdown(sdev->bus);
+
+err_out_free_dev:
+	netif_napi_del(&bp->napi);
+	free_netdev(dev);
+
+out:
+	return err;
+}
+
+static void b44_remove_one(struct ssb_device *sdev)
+{
+	struct net_device *dev = ssb_get_drvdata(sdev);
+	struct b44 *bp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		b44_unregister_phy_one(bp);
+	ssb_device_disable(sdev, 0);
+	ssb_bus_may_powerdown(sdev->bus);
+	netif_napi_del(&bp->napi);
+	free_netdev(dev);
+	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
+	ssb_set_drvdata(sdev, NULL);
+}
+
+static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
+{
+	struct net_device *dev = ssb_get_drvdata(sdev);
+	struct b44 *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return 0;
+
+	del_timer_sync(&bp->timer);
+
+	spin_lock_irq(&bp->lock);
+
+	b44_halt(bp);
+	netif_carrier_off(bp->dev);
+	netif_device_detach(bp->dev);
+	b44_free_rings(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	free_irq(dev->irq, dev);
+	if (bp->flags & B44_FLAG_WOL_ENABLE) {
+		b44_init_hw(bp, B44_PARTIAL_RESET);
+		b44_setup_wol(bp);
+	}
+
+	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
+	return 0;
+}
+
+static int b44_resume(struct ssb_device *sdev)
+{
+	struct net_device *dev = ssb_get_drvdata(sdev);
+	struct b44 *bp = netdev_priv(dev);
+	int rc = 0;
+
+	rc = ssb_bus_powerup(sdev->bus, 0);
+	if (rc) {
+		dev_err(sdev->dev,
+			"Failed to powerup the bus\n");
+		return rc;
+	}
+
+	if (!netif_running(dev))
+		return 0;
+
+	spin_lock_irq(&bp->lock);
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+	spin_unlock_irq(&bp->lock);
+
+	/*
+	 * As a shared interrupt, the handler can be called immediately. To be
+	 * able to check the interrupt status the hardware must already be
+	 * powered back on (b44_init_hw).
+	 */
+	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
+	if (rc) {
+		netdev_err(dev, "request_irq failed\n");
+		spin_lock_irq(&bp->lock);
+		b44_halt(bp);
+		b44_free_rings(bp);
+		spin_unlock_irq(&bp->lock);
+		return rc;
+	}
+
+	netif_device_attach(bp->dev);
+
+	b44_enable_ints(bp);
+	netif_wake_queue(dev);
+
+	mod_timer(&bp->timer, jiffies + 1);
+
+	return 0;
+}
+
+static struct ssb_driver b44_ssb_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= b44_ssb_tbl,
+	.probe		= b44_init_one,
+	.remove		= b44_remove_one,
+	.suspend	= b44_suspend,
+	.resume		= b44_resume,
+};
+
+static inline int __init b44_pci_init(void)
+{
+	int err = 0;
+#ifdef CONFIG_B44_PCI
+	err = ssb_pcihost_register(&b44_pci_driver);
+#endif
+	return err;
+}
+
+static inline void b44_pci_exit(void)
+{
+#ifdef CONFIG_B44_PCI
+	ssb_pcihost_unregister(&b44_pci_driver);
+#endif
+}
+
+static int __init b44_init(void)
+{
+	unsigned int dma_desc_align_size = dma_get_cache_alignment();
+	int err;
+
+	/* Setup paramaters for syncing RX/TX DMA descriptors */
+	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
+
+	err = b44_pci_init();
+	if (err)
+		return err;
+	err = ssb_driver_register(&b44_ssb_driver);
+	if (err)
+		b44_pci_exit();
+	return err;
+}
+
+static void __exit b44_cleanup(void)
+{
+	ssb_driver_unregister(&b44_ssb_driver);
+	b44_pci_exit();
+}
+
+module_init(b44_init);
+module_exit(b44_cleanup);
+
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
new file mode 100644
index 0000000..65d88d7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -0,0 +1,413 @@
+#ifndef _B44_H
+#define _B44_H
+
+#include <linux/brcmphy.h>
+
+/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
+#define	B44_DEVCTRL	0x0000UL /* Device Control */
+#define  DEVCTRL_MPM		0x00000040 /* Magic Packet PME Enable (B0 only) */
+#define  DEVCTRL_PFE		0x00000080 /* Pattern Filtering Enable */
+#define  DEVCTRL_IPP		0x00000400 /* Internal EPHY Present */
+#define  DEVCTRL_EPR		0x00008000 /* EPHY Reset */
+#define  DEVCTRL_PME		0x00001000 /* PHY Mode Enable */
+#define  DEVCTRL_PMCE		0x00002000 /* PHY Mode Clocks Enable */
+#define  DEVCTRL_PADDR		0x0007c000 /* PHY Address */
+#define  DEVCTRL_PADDR_SHIFT	18
+#define B44_BIST_STAT	0x000CUL /* Built-In Self-Test Status */
+#define B44_WKUP_LEN	0x0010UL /* Wakeup Length */
+#define  WKUP_LEN_P0_MASK	0x0000007f /* Pattern 0 */
+#define  WKUP_LEN_D0		0x00000080
+#define  WKUP_LEN_P1_MASK	0x00007f00 /* Pattern 1 */
+#define  WKUP_LEN_P1_SHIFT	8
+#define  WKUP_LEN_D1		0x00008000
+#define  WKUP_LEN_P2_MASK	0x007f0000 /* Pattern 2 */
+#define  WKUP_LEN_P2_SHIFT	16
+#define  WKUP_LEN_D2		0x00000000
+#define  WKUP_LEN_P3_MASK	0x7f000000 /* Pattern 3 */
+#define  WKUP_LEN_P3_SHIFT	24
+#define  WKUP_LEN_D3		0x80000000
+#define  WKUP_LEN_DISABLE	0x80808080
+#define  WKUP_LEN_ENABLE_TWO	0x80800000
+#define  WKUP_LEN_ENABLE_THREE	0x80000000
+#define B44_ISTAT	0x0020UL /* Interrupt Status */
+#define  ISTAT_LS		0x00000020 /* Link Change (B0 only) */
+#define  ISTAT_PME		0x00000040 /* Power Management Event */
+#define  ISTAT_TO		0x00000080 /* General Purpose Timeout */
+#define  ISTAT_DSCE		0x00000400 /* Descriptor Error */
+#define  ISTAT_DATAE		0x00000800 /* Data Error */
+#define  ISTAT_DPE		0x00001000 /* Descr. Protocol Error */
+#define  ISTAT_RDU		0x00002000 /* Receive Descr. Underflow */
+#define  ISTAT_RFO		0x00004000 /* Receive FIFO Overflow */
+#define  ISTAT_TFU		0x00008000 /* Transmit FIFO Underflow */
+#define  ISTAT_RX		0x00010000 /* RX Interrupt */
+#define  ISTAT_TX		0x01000000 /* TX Interrupt */
+#define  ISTAT_EMAC		0x04000000 /* EMAC Interrupt */
+#define  ISTAT_MII_WRITE	0x08000000 /* MII Write Interrupt */
+#define  ISTAT_MII_READ		0x10000000 /* MII Read Interrupt */
+#define  ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
+#define B44_IMASK	0x0024UL /* Interrupt Mask */
+#define  IMASK_DEF		(ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
+#define B44_GPTIMER	0x0028UL /* General Purpose Timer */
+#define B44_ADDR_LO	0x0088UL /* ENET Address Lo (B0 only) */
+#define B44_ADDR_HI	0x008CUL /* ENET Address Hi (B0 only) */
+#define B44_FILT_ADDR	0x0090UL /* ENET Filter Address */
+#define B44_FILT_DATA	0x0094UL /* ENET Filter Data */
+#define B44_TXBURST	0x00A0UL /* TX Max Burst Length */
+#define B44_RXBURST	0x00A4UL /* RX Max Burst Length */
+#define B44_MAC_CTRL	0x00A8UL /* MAC Control */
+#define  MAC_CTRL_CRC32_ENAB	0x00000001 /* CRC32 Generation Enable */
+#define  MAC_CTRL_PHY_PDOWN	0x00000004 /* Onchip EPHY Powerdown */
+#define  MAC_CTRL_PHY_EDET	0x00000008 /* Onchip EPHY Energy Detected */
+#define  MAC_CTRL_PHY_LEDCTRL	0x000000e0 /* Onchip EPHY LED Control */
+#define  MAC_CTRL_PHY_LEDCTRL_SHIFT 5
+#define B44_MAC_FLOW	0x00ACUL /* MAC Flow Control */
+#define  MAC_FLOW_RX_HI_WATER	0x000000ff /* Receive FIFO HI Water Mark */
+#define  MAC_FLOW_PAUSE_ENAB	0x00008000 /* Enable Pause Frame Generation */
+#define B44_RCV_LAZY	0x0100UL /* Lazy Interrupt Control */
+#define  RCV_LAZY_TO_MASK	0x00ffffff /* Timeout */
+#define  RCV_LAZY_FC_MASK	0xff000000 /* Frame Count */
+#define  RCV_LAZY_FC_SHIFT	24
+#define B44_DMATX_CTRL	0x0200UL /* DMA TX Control */
+#define  DMATX_CTRL_ENABLE	0x00000001 /* Enable */
+#define  DMATX_CTRL_SUSPEND	0x00000002 /* Suepend Request */
+#define  DMATX_CTRL_LPBACK	0x00000004 /* Loopback Enable */
+#define  DMATX_CTRL_FAIRPRIOR	0x00000008 /* Fair Priority */
+#define  DMATX_CTRL_FLUSH	0x00000010 /* Flush Request */
+#define B44_DMATX_ADDR	0x0204UL /* DMA TX Descriptor Ring Address */
+#define B44_DMATX_PTR	0x0208UL /* DMA TX Last Posted Descriptor */
+#define B44_DMATX_STAT	0x020CUL /* DMA TX Current Active Desc. + Status */
+#define  DMATX_STAT_CDMASK	0x00000fff /* Current Descriptor Mask */
+#define  DMATX_STAT_SMASK	0x0000f000 /* State Mask */
+#define  DMATX_STAT_SDISABLED	0x00000000 /* State Disabled */
+#define  DMATX_STAT_SACTIVE	0x00001000 /* State Active */
+#define  DMATX_STAT_SIDLE	0x00002000 /* State Idle Wait */
+#define  DMATX_STAT_SSTOPPED	0x00003000 /* State Stopped */
+#define  DMATX_STAT_SSUSP	0x00004000 /* State Suspend Pending */
+#define  DMATX_STAT_EMASK	0x000f0000 /* Error Mask */
+#define  DMATX_STAT_ENONE	0x00000000 /* Error None */
+#define  DMATX_STAT_EDPE	0x00010000 /* Error Desc. Protocol Error */
+#define  DMATX_STAT_EDFU	0x00020000 /* Error Data FIFO Underrun */
+#define  DMATX_STAT_EBEBR	0x00030000 /* Error Bus Error on Buffer Read */
+#define  DMATX_STAT_EBEDA	0x00040000 /* Error Bus Error on Desc. Access */
+#define  DMATX_STAT_FLUSHED	0x00100000 /* Flushed */
+#define B44_DMARX_CTRL	0x0210UL /* DMA RX Control */
+#define  DMARX_CTRL_ENABLE	0x00000001 /* Enable */
+#define  DMARX_CTRL_ROMASK	0x000000fe /* Receive Offset Mask */
+#define  DMARX_CTRL_ROSHIFT	1 	   /* Receive Offset Shift */
+#define B44_DMARX_ADDR	0x0214UL /* DMA RX Descriptor Ring Address */
+#define B44_DMARX_PTR	0x0218UL /* DMA RX Last Posted Descriptor */
+#define B44_DMARX_STAT	0x021CUL /* DMA RX Current Active Desc. + Status */
+#define  DMARX_STAT_CDMASK	0x00000fff /* Current Descriptor Mask */
+#define  DMARX_STAT_SMASK	0x0000f000 /* State Mask */
+#define  DMARX_STAT_SDISABLED	0x00000000 /* State Disabled */
+#define  DMARX_STAT_SACTIVE	0x00001000 /* State Active */
+#define  DMARX_STAT_SIDLE	0x00002000 /* State Idle Wait */
+#define  DMARX_STAT_SSTOPPED	0x00003000 /* State Stopped */
+#define  DMARX_STAT_EMASK	0x000f0000 /* Error Mask */
+#define  DMARX_STAT_ENONE	0x00000000 /* Error None */
+#define  DMARX_STAT_EDPE	0x00010000 /* Error Desc. Protocol Error */
+#define  DMARX_STAT_EDFO	0x00020000 /* Error Data FIFO Overflow */
+#define  DMARX_STAT_EBEBW	0x00030000 /* Error Bus Error on Buffer Write */
+#define  DMARX_STAT_EBEDA	0x00040000 /* Error Bus Error on Desc. Access */
+#define B44_DMAFIFO_AD	0x0220UL /* DMA FIFO Diag Address */
+#define  DMAFIFO_AD_OMASK	0x0000ffff /* Offset Mask */
+#define  DMAFIFO_AD_SMASK	0x000f0000 /* Select Mask */
+#define  DMAFIFO_AD_SXDD	0x00000000 /* Select Transmit DMA Data */
+#define  DMAFIFO_AD_SXDP	0x00010000 /* Select Transmit DMA Pointers */
+#define  DMAFIFO_AD_SRDD	0x00040000 /* Select Receive DMA Data */
+#define  DMAFIFO_AD_SRDP	0x00050000 /* Select Receive DMA Pointers */
+#define  DMAFIFO_AD_SXFD	0x00080000 /* Select Transmit FIFO Data */
+#define  DMAFIFO_AD_SXFP	0x00090000 /* Select Transmit FIFO Pointers */
+#define  DMAFIFO_AD_SRFD	0x000c0000 /* Select Receive FIFO Data */
+#define  DMAFIFO_AD_SRFP	0x000c0000 /* Select Receive FIFO Pointers */
+#define B44_DMAFIFO_LO	0x0224UL /* DMA FIFO Diag Low Data */
+#define B44_DMAFIFO_HI	0x0228UL /* DMA FIFO Diag High Data */
+#define B44_RXCONFIG	0x0400UL /* EMAC RX Config */
+#define  RXCONFIG_DBCAST	0x00000001 /* Disable Broadcast */
+#define  RXCONFIG_ALLMULTI	0x00000002 /* Accept All Multicast */
+#define  RXCONFIG_NORX_WHILE_TX	0x00000004 /* Receive Disable While Transmitting */
+#define  RXCONFIG_PROMISC	0x00000008 /* Promiscuous Enable */
+#define  RXCONFIG_LPBACK	0x00000010 /* Loopback Enable */
+#define  RXCONFIG_FLOW		0x00000020 /* Flow Control Enable */
+#define  RXCONFIG_FLOW_ACCEPT	0x00000040 /* Accept Unicast Flow Control Frame */
+#define  RXCONFIG_RFILT		0x00000080 /* Reject Filter */
+#define  RXCONFIG_CAM_ABSENT	0x00000100 /* CAM Absent */
+#define B44_RXMAXLEN	0x0404UL /* EMAC RX Max Packet Length */
+#define B44_TXMAXLEN	0x0408UL /* EMAC TX Max Packet Length */
+#define B44_MDIO_CTRL	0x0410UL /* EMAC MDIO Control */
+#define  MDIO_CTRL_MAXF_MASK	0x0000007f /* MDC Frequency */
+#define  MDIO_CTRL_PREAMBLE	0x00000080 /* MII Preamble Enable */
+#define B44_MDIO_DATA	0x0414UL /* EMAC MDIO Data */
+#define  MDIO_DATA_DATA		0x0000ffff /* R/W Data */
+#define  MDIO_DATA_TA_MASK	0x00030000 /* Turnaround Value */
+#define  MDIO_DATA_TA_SHIFT	16
+#define  MDIO_TA_VALID		2
+#define  MDIO_DATA_RA_MASK	0x007c0000 /* Register Address */
+#define  MDIO_DATA_RA_SHIFT	18
+#define  MDIO_DATA_PMD_MASK	0x0f800000 /* Physical Media Device */
+#define  MDIO_DATA_PMD_SHIFT	23
+#define  MDIO_DATA_OP_MASK	0x30000000 /* Opcode */
+#define  MDIO_DATA_OP_SHIFT	28
+#define  MDIO_OP_WRITE		1
+#define  MDIO_OP_READ		2
+#define  MDIO_DATA_SB_MASK	0xc0000000 /* Start Bits */
+#define  MDIO_DATA_SB_SHIFT	30
+#define  MDIO_DATA_SB_START	0x40000000 /* Start Of Frame */
+#define B44_EMAC_IMASK	0x0418UL /* EMAC Interrupt Mask */
+#define B44_EMAC_ISTAT	0x041CUL /* EMAC Interrupt Status */
+#define  EMAC_INT_MII		0x00000001 /* MII MDIO Interrupt */
+#define  EMAC_INT_MIB		0x00000002 /* MIB Interrupt */
+#define  EMAC_INT_FLOW		0x00000003 /* Flow Control Interrupt */
+#define B44_CAM_DATA_LO	0x0420UL /* EMAC CAM Data Low */
+#define B44_CAM_DATA_HI	0x0424UL /* EMAC CAM Data High */
+#define  CAM_DATA_HI_VALID	0x00010000 /* Valid Bit */
+#define B44_CAM_CTRL	0x0428UL /* EMAC CAM Control */
+#define  CAM_CTRL_ENABLE	0x00000001 /* CAM Enable */
+#define  CAM_CTRL_MSEL		0x00000002 /* Mask Select */
+#define  CAM_CTRL_READ		0x00000004 /* Read */
+#define  CAM_CTRL_WRITE		0x00000008 /* Read */
+#define  CAM_CTRL_INDEX_MASK	0x003f0000 /* Index Mask */
+#define  CAM_CTRL_INDEX_SHIFT	16
+#define  CAM_CTRL_BUSY		0x80000000 /* CAM Busy */
+#define B44_ENET_CTRL	0x042CUL /* EMAC ENET Control */
+#define  ENET_CTRL_ENABLE	0x00000001 /* EMAC Enable */
+#define  ENET_CTRL_DISABLE	0x00000002 /* EMAC Disable */
+#define  ENET_CTRL_SRST		0x00000004 /* EMAC Soft Reset */
+#define  ENET_CTRL_EPSEL	0x00000008 /* External PHY Select */
+#define B44_TX_CTRL	0x0430UL /* EMAC TX Control */
+#define  TX_CTRL_DUPLEX		0x00000001 /* Full Duplex */
+#define  TX_CTRL_FMODE		0x00000002 /* Flow Mode */
+#define  TX_CTRL_SBENAB		0x00000004 /* Single Backoff Enable */
+#define  TX_CTRL_SMALL_SLOT	0x00000008 /* Small Slottime */
+#define B44_TX_WMARK	0x0434UL /* EMAC TX Watermark */
+#define B44_MIB_CTRL	0x0438UL /* EMAC MIB Control */
+#define  MIB_CTRL_CLR_ON_READ	0x00000001 /* Autoclear on Read */
+#define B44_TX_GOOD_O	0x0500UL /* MIB TX Good Octets */
+#define B44_TX_GOOD_P	0x0504UL /* MIB TX Good Packets */
+#define B44_TX_O	0x0508UL /* MIB TX Octets */
+#define B44_TX_P	0x050CUL /* MIB TX Packets */
+#define B44_TX_BCAST	0x0510UL /* MIB TX Broadcast Packets */
+#define B44_TX_MCAST	0x0514UL /* MIB TX Multicast Packets */
+#define B44_TX_64	0x0518UL /* MIB TX <= 64 byte Packets */
+#define B44_TX_65_127	0x051CUL /* MIB TX 65 to 127 byte Packets */
+#define B44_TX_128_255	0x0520UL /* MIB TX 128 to 255 byte Packets */
+#define B44_TX_256_511	0x0524UL /* MIB TX 256 to 511 byte Packets */
+#define B44_TX_512_1023	0x0528UL /* MIB TX 512 to 1023 byte Packets */
+#define B44_TX_1024_MAX	0x052CUL /* MIB TX 1024 to max byte Packets */
+#define B44_TX_JABBER	0x0530UL /* MIB TX Jabber Packets */
+#define B44_TX_OSIZE	0x0534UL /* MIB TX Oversize Packets */
+#define B44_TX_FRAG	0x0538UL /* MIB TX Fragment Packets */
+#define B44_TX_URUNS	0x053CUL /* MIB TX Underruns */
+#define B44_TX_TCOLS	0x0540UL /* MIB TX Total Collisions */
+#define B44_TX_SCOLS	0x0544UL /* MIB TX Single Collisions */
+#define B44_TX_MCOLS	0x0548UL /* MIB TX Multiple Collisions */
+#define B44_TX_ECOLS	0x054CUL /* MIB TX Excessive Collisions */
+#define B44_TX_LCOLS	0x0550UL /* MIB TX Late Collisions */
+#define B44_TX_DEFERED	0x0554UL /* MIB TX Defered Packets */
+#define B44_TX_CLOST	0x0558UL /* MIB TX Carrier Lost */
+#define B44_TX_PAUSE	0x055CUL /* MIB TX Pause Packets */
+#define B44_RX_GOOD_O	0x0580UL /* MIB RX Good Octets */
+#define B44_RX_GOOD_P	0x0584UL /* MIB RX Good Packets */
+#define B44_RX_O	0x0588UL /* MIB RX Octets */
+#define B44_RX_P	0x058CUL /* MIB RX Packets */
+#define B44_RX_BCAST	0x0590UL /* MIB RX Broadcast Packets */
+#define B44_RX_MCAST	0x0594UL /* MIB RX Multicast Packets */
+#define B44_RX_64	0x0598UL /* MIB RX <= 64 byte Packets */
+#define B44_RX_65_127	0x059CUL /* MIB RX 65 to 127 byte Packets */
+#define B44_RX_128_255	0x05A0UL /* MIB RX 128 to 255 byte Packets */
+#define B44_RX_256_511	0x05A4UL /* MIB RX 256 to 511 byte Packets */
+#define B44_RX_512_1023	0x05A8UL /* MIB RX 512 to 1023 byte Packets */
+#define B44_RX_1024_MAX	0x05ACUL /* MIB RX 1024 to max byte Packets */
+#define B44_RX_JABBER	0x05B0UL /* MIB RX Jabber Packets */
+#define B44_RX_OSIZE	0x05B4UL /* MIB RX Oversize Packets */
+#define B44_RX_FRAG	0x05B8UL /* MIB RX Fragment Packets */
+#define B44_RX_MISS	0x05BCUL /* MIB RX Missed Packets */
+#define B44_RX_CRCA	0x05C0UL /* MIB RX CRC Align Errors */
+#define B44_RX_USIZE	0x05C4UL /* MIB RX Undersize Packets */
+#define B44_RX_CRC	0x05C8UL /* MIB RX CRC Errors */
+#define B44_RX_ALIGN	0x05CCUL /* MIB RX Align Errors */
+#define B44_RX_SYM	0x05D0UL /* MIB RX Symbol Errors */
+#define B44_RX_PAUSE	0x05D4UL /* MIB RX Pause Packets */
+#define B44_RX_NPAUSE	0x05D8UL /* MIB RX Non-Pause Packets */
+
+/* 4400 PHY registers */
+#define B44_MII_AUXCTRL		24	/* Auxiliary Control */
+#define  MII_AUXCTRL_DUPLEX	0x0001  /* Full Duplex */
+#define  MII_AUXCTRL_SPEED	0x0002  /* 1=100Mbps, 0=10Mbps */
+#define  MII_AUXCTRL_FORCED	0x0004	/* Forced 10/100 */
+#define B44_MII_ALEDCTRL	26	/* Activity LED */
+#define  MII_ALEDCTRL_ALLMSK	0x7fff
+#define B44_MII_TLEDCTRL	27	/* Traffic Meter LED */
+#define  MII_TLEDCTRL_ENABLE	0x0040
+
+struct dma_desc {
+	__le32	ctrl;
+	__le32	addr;
+};
+
+/* There are only 12 bits in the DMA engine for descriptor offsetting
+ * so the table must be aligned on a boundary of this.
+ */
+#define DMA_TABLE_BYTES		4096
+
+#define DESC_CTRL_LEN	0x00001fff
+#define DESC_CTRL_CMASK	0x0ff00000 /* Core specific bits */
+#define DESC_CTRL_EOT	0x10000000 /* End of Table */
+#define DESC_CTRL_IOC	0x20000000 /* Interrupt On Completion */
+#define DESC_CTRL_EOF	0x40000000 /* End of Frame */
+#define DESC_CTRL_SOF	0x80000000 /* Start of Frame */
+
+#define RX_COPY_THRESHOLD  	256
+
+struct rx_header {
+	__le16	len;
+	__le16	flags;
+	__le16	pad[12];
+};
+#define RX_HEADER_LEN	28
+
+#define RX_FLAG_OFIFO	0x00000001 /* FIFO Overflow */
+#define RX_FLAG_CRCERR	0x00000002 /* CRC Error */
+#define RX_FLAG_SERR	0x00000004 /* Receive Symbol Error */
+#define RX_FLAG_ODD	0x00000008 /* Frame has odd number of nibbles */
+#define RX_FLAG_LARGE	0x00000010 /* Frame is > RX MAX Length */
+#define RX_FLAG_MCAST	0x00000020 /* Dest is Multicast Address */
+#define RX_FLAG_BCAST	0x00000040 /* Dest is Broadcast Address */
+#define RX_FLAG_MISS	0x00000080 /* Received due to promisc mode */
+#define RX_FLAG_LAST	0x00000800 /* Last buffer in frame */
+#define RX_FLAG_ERRORS	(RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
+
+struct ring_info {
+	struct sk_buff		*skb;
+	dma_addr_t	mapping;
+};
+
+#define B44_MCAST_TABLE_SIZE		32
+/* no local phy regs, e.g: Broadcom switches pseudo-PHY */
+#define B44_PHY_ADDR_NO_LOCAL_PHY	BRCM_PSEUDO_PHY_ADDR
+/* no phy present at all */
+#define B44_PHY_ADDR_NO_PHY		31
+#define B44_MDC_RATIO			5000000
+
+#define	B44_STAT_REG_DECLARE		\
+	_B44(tx_good_octets)		\
+	_B44(tx_good_pkts)		\
+	_B44(tx_octets)			\
+	_B44(tx_pkts)			\
+	_B44(tx_broadcast_pkts)		\
+	_B44(tx_multicast_pkts)		\
+	_B44(tx_len_64)			\
+	_B44(tx_len_65_to_127)		\
+	_B44(tx_len_128_to_255)		\
+	_B44(tx_len_256_to_511)		\
+	_B44(tx_len_512_to_1023)	\
+	_B44(tx_len_1024_to_max)	\
+	_B44(tx_jabber_pkts)		\
+	_B44(tx_oversize_pkts)		\
+	_B44(tx_fragment_pkts)		\
+	_B44(tx_underruns)		\
+	_B44(tx_total_cols)		\
+	_B44(tx_single_cols)		\
+	_B44(tx_multiple_cols)		\
+	_B44(tx_excessive_cols)		\
+	_B44(tx_late_cols)		\
+	_B44(tx_defered)		\
+	_B44(tx_carrier_lost)		\
+	_B44(tx_pause_pkts)		\
+	_B44(rx_good_octets)		\
+	_B44(rx_good_pkts)		\
+	_B44(rx_octets)			\
+	_B44(rx_pkts)			\
+	_B44(rx_broadcast_pkts)		\
+	_B44(rx_multicast_pkts)		\
+	_B44(rx_len_64)			\
+	_B44(rx_len_65_to_127)		\
+	_B44(rx_len_128_to_255)		\
+	_B44(rx_len_256_to_511)		\
+	_B44(rx_len_512_to_1023)	\
+	_B44(rx_len_1024_to_max)	\
+	_B44(rx_jabber_pkts)		\
+	_B44(rx_oversize_pkts)		\
+	_B44(rx_fragment_pkts)		\
+	_B44(rx_missed_pkts)		\
+	_B44(rx_crc_align_errs)		\
+	_B44(rx_undersize)		\
+	_B44(rx_crc_errs)		\
+	_B44(rx_align_errs)		\
+	_B44(rx_symbol_errs)		\
+	_B44(rx_pause_pkts)		\
+	_B44(rx_nonpause_pkts)
+
+/* SW copy of device statistics, kept up to date by periodic timer
+ * which probes HW values. Check b44_stats_update if you mess with
+ * the layout
+ */
+struct b44_hw_stats {
+#define _B44(x)	u64 x;
+B44_STAT_REG_DECLARE
+#undef _B44
+	struct u64_stats_sync	syncp;
+};
+
+#define	B44_BOARDFLAG_ROBO		0x0010  /* Board has robo switch */
+#define	B44_BOARDFLAG_ADM		0x0080  /* Board has ADMtek switch */
+
+struct ssb_device;
+
+struct b44 {
+	spinlock_t		lock;
+
+	u32			imask, istat;
+
+	struct dma_desc		*rx_ring, *tx_ring;
+
+	u32			tx_prod, tx_cons;
+	u32			rx_prod, rx_cons;
+
+	struct ring_info	*rx_buffers;
+	struct ring_info	*tx_buffers;
+
+	struct napi_struct	napi;
+
+	u32			dma_offset;
+	u32			flags;
+#define B44_FLAG_B0_ANDLATER	0x00000001
+#define B44_FLAG_BUGGY_TXPTR	0x00000002
+#define B44_FLAG_REORDER_BUG	0x00000004
+#define B44_FLAG_PAUSE_AUTO	0x00008000
+#define B44_FLAG_FULL_DUPLEX	0x00010000
+#define B44_FLAG_100_BASE_T	0x00020000
+#define B44_FLAG_TX_PAUSE	0x00040000
+#define B44_FLAG_RX_PAUSE	0x00080000
+#define B44_FLAG_FORCE_LINK	0x00100000
+#define B44_FLAG_ADV_10HALF	0x01000000
+#define B44_FLAG_ADV_10FULL	0x02000000
+#define B44_FLAG_ADV_100HALF	0x04000000
+#define B44_FLAG_ADV_100FULL	0x08000000
+#define B44_FLAG_EXTERNAL_PHY	0x10000000
+#define B44_FLAG_RX_RING_HACK	0x20000000
+#define B44_FLAG_TX_RING_HACK	0x40000000
+#define B44_FLAG_WOL_ENABLE	0x80000000
+
+	u32			msg_enable;
+
+	struct timer_list	timer;
+
+	struct b44_hw_stats	hw_stats;
+
+	struct ssb_device	*sdev;
+	struct net_device	*dev;
+
+	dma_addr_t		rx_ring_dma, tx_ring_dma;
+
+	u32			rx_pending;
+	u32			tx_pending;
+	u8			phy_addr;
+	u8			force_copybreak;
+	struct phy_device	*phydev;
+	struct mii_bus		*mii_bus;
+	int			old_link;
+	struct mii_if_info	mii_if;
+};
+
+#endif /* _B44_H */
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
new file mode 100644
index 0000000..8b1929e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -0,0 +1,2922 @@
+/*
+ * Driver for BCM963xx builtin Ethernet mac
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/if_vlan.h>
+
+#include <bcm63xx_dev_enet.h>
+#include "bcm63xx_enet.h"
+
+static char bcm_enet_driver_name[] = "bcm63xx_enet";
+static char bcm_enet_driver_version[] = "1.0";
+
+static int copybreak __read_mostly = 128;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+/* io registers memory shared between all devices */
+static void __iomem *bcm_enet_shared_base[3];
+
+/*
+ * io helpers to access mac registers
+ */
+static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readl(priv->base + off);
+}
+
+static inline void enet_writel(struct bcm_enet_priv *priv,
+			       u32 val, u32 off)
+{
+	bcm_writel(val, priv->base + off);
+}
+
+/*
+ * io helpers to access switch registers
+ */
+static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readl(priv->base + off);
+}
+
+static inline void enetsw_writel(struct bcm_enet_priv *priv,
+				 u32 val, u32 off)
+{
+	bcm_writel(val, priv->base + off);
+}
+
+static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readw(priv->base + off);
+}
+
+static inline void enetsw_writew(struct bcm_enet_priv *priv,
+				 u16 val, u32 off)
+{
+	bcm_writew(val, priv->base + off);
+}
+
+static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readb(priv->base + off);
+}
+
+static inline void enetsw_writeb(struct bcm_enet_priv *priv,
+				 u8 val, u32 off)
+{
+	bcm_writeb(val, priv->base + off);
+}
+
+
+/* io helpers to access shared registers */
+static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readl(bcm_enet_shared_base[0] + off);
+}
+
+static inline void enet_dma_writel(struct bcm_enet_priv *priv,
+				       u32 val, u32 off)
+{
+	bcm_writel(val, bcm_enet_shared_base[0] + off);
+}
+
+static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
+{
+	return bcm_readl(bcm_enet_shared_base[1] +
+		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
+}
+
+static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
+				       u32 val, u32 off, int chan)
+{
+	bcm_writel(val, bcm_enet_shared_base[1] +
+		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
+}
+
+static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
+{
+	return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
+}
+
+static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
+				       u32 val, u32 off, int chan)
+{
+	bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
+}
+
+/*
+ * write given data into mii register and wait for transfer to end
+ * with timeout (average measured transfer time is 25us)
+ */
+static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
+{
+	int limit;
+
+	/* make sure mii interrupt status is cleared */
+	enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
+
+	enet_writel(priv, data, ENET_MIIDATA_REG);
+	wmb();
+
+	/* busy wait on mii interrupt bit, with timeout */
+	limit = 1000;
+	do {
+		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
+			break;
+		udelay(1);
+	} while (limit-- > 0);
+
+	return (limit < 0) ? 1 : 0;
+}
+
+/*
+ * MII internal read callback
+ */
+static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
+			      int regnum)
+{
+	u32 tmp, val;
+
+	tmp = regnum << ENET_MIIDATA_REG_SHIFT;
+	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
+	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
+	tmp |= ENET_MIIDATA_OP_READ_MASK;
+
+	if (do_mdio_op(priv, tmp))
+		return -1;
+
+	val = enet_readl(priv, ENET_MIIDATA_REG);
+	val &= 0xffff;
+	return val;
+}
+
+/*
+ * MII internal write callback
+ */
+static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
+			       int regnum, u16 value)
+{
+	u32 tmp;
+
+	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
+	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
+	tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
+	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
+	tmp |= ENET_MIIDATA_OP_WRITE_MASK;
+
+	(void)do_mdio_op(priv, tmp);
+	return 0;
+}
+
+/*
+ * MII read callback from phylib
+ */
+static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
+				     int regnum)
+{
+	return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
+}
+
+/*
+ * MII write callback from phylib
+ */
+static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
+				      int regnum, u16 value)
+{
+	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
+}
+
+/*
+ * MII read callback from mii core
+ */
+static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
+				  int regnum)
+{
+	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
+}
+
+/*
+ * MII write callback from mii core
+ */
+static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
+				    int regnum, int value)
+{
+	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
+}
+
+/*
+ * refill rx queue
+ */
+static int bcm_enet_refill_rx(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	while (priv->rx_desc_count < priv->rx_ring_size) {
+		struct bcm_enet_desc *desc;
+		struct sk_buff *skb;
+		dma_addr_t p;
+		int desc_idx;
+		u32 len_stat;
+
+		desc_idx = priv->rx_dirty_desc;
+		desc = &priv->rx_desc_cpu[desc_idx];
+
+		if (!priv->rx_skb[desc_idx]) {
+			skb = netdev_alloc_skb(dev, priv->rx_skb_size);
+			if (!skb)
+				break;
+			priv->rx_skb[desc_idx] = skb;
+			p = dma_map_single(&priv->pdev->dev, skb->data,
+					   priv->rx_skb_size,
+					   DMA_FROM_DEVICE);
+			desc->address = p;
+		}
+
+		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+		len_stat |= DMADESC_OWNER_MASK;
+		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
+			len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
+			priv->rx_dirty_desc = 0;
+		} else {
+			priv->rx_dirty_desc++;
+		}
+		wmb();
+		desc->len_stat = len_stat;
+
+		priv->rx_desc_count++;
+
+		/* tell dma engine we allocated one buffer */
+		if (priv->dma_has_sram)
+			enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
+		else
+			enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
+	}
+
+	/* If rx ring is still empty, set a timer to try allocating
+	 * again at a later time. */
+	if (priv->rx_desc_count == 0 && netif_running(dev)) {
+		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
+		priv->rx_timeout.expires = jiffies + HZ;
+		add_timer(&priv->rx_timeout);
+	}
+
+	return 0;
+}
+
+/*
+ * timer callback to defer refill rx queue in case we're OOM
+ */
+static void bcm_enet_refill_rx_timer(unsigned long data)
+{
+	struct net_device *dev;
+	struct bcm_enet_priv *priv;
+
+	dev = (struct net_device *)data;
+	priv = netdev_priv(dev);
+
+	spin_lock(&priv->rx_lock);
+	bcm_enet_refill_rx((struct net_device *)data);
+	spin_unlock(&priv->rx_lock);
+}
+
+/*
+ * extract packet from rx queue
+ */
+static int bcm_enet_receive_queue(struct net_device *dev, int budget)
+{
+	struct bcm_enet_priv *priv;
+	struct device *kdev;
+	int processed;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+	processed = 0;
+
+	/* don't scan ring further than number of refilled
+	 * descriptor */
+	if (budget > priv->rx_desc_count)
+		budget = priv->rx_desc_count;
+
+	do {
+		struct bcm_enet_desc *desc;
+		struct sk_buff *skb;
+		int desc_idx;
+		u32 len_stat;
+		unsigned int len;
+
+		desc_idx = priv->rx_curr_desc;
+		desc = &priv->rx_desc_cpu[desc_idx];
+
+		/* make sure we actually read the descriptor status at
+		 * each loop */
+		rmb();
+
+		len_stat = desc->len_stat;
+
+		/* break if dma ownership belongs to hw */
+		if (len_stat & DMADESC_OWNER_MASK)
+			break;
+
+		processed++;
+		priv->rx_curr_desc++;
+		if (priv->rx_curr_desc == priv->rx_ring_size)
+			priv->rx_curr_desc = 0;
+		priv->rx_desc_count--;
+
+		/* if the packet does not have start of packet _and_
+		 * end of packet flag set, then just recycle it */
+		if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
+			(DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
+			dev->stats.rx_dropped++;
+			continue;
+		}
+
+		/* recycle packet if it's marked as bad */
+		if (!priv->enet_is_sw &&
+		    unlikely(len_stat & DMADESC_ERR_MASK)) {
+			dev->stats.rx_errors++;
+
+			if (len_stat & DMADESC_OVSIZE_MASK)
+				dev->stats.rx_length_errors++;
+			if (len_stat & DMADESC_CRC_MASK)
+				dev->stats.rx_crc_errors++;
+			if (len_stat & DMADESC_UNDER_MASK)
+				dev->stats.rx_frame_errors++;
+			if (len_stat & DMADESC_OV_MASK)
+				dev->stats.rx_fifo_errors++;
+			continue;
+		}
+
+		/* valid packet */
+		skb = priv->rx_skb[desc_idx];
+		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
+		/* don't include FCS */
+		len -= 4;
+
+		if (len < copybreak) {
+			struct sk_buff *nskb;
+
+			nskb = napi_alloc_skb(&priv->napi, len);
+			if (!nskb) {
+				/* forget packet, just rearm desc */
+				dev->stats.rx_dropped++;
+				continue;
+			}
+
+			dma_sync_single_for_cpu(kdev, desc->address,
+						len, DMA_FROM_DEVICE);
+			memcpy(nskb->data, skb->data, len);
+			dma_sync_single_for_device(kdev, desc->address,
+						   len, DMA_FROM_DEVICE);
+			skb = nskb;
+		} else {
+			dma_unmap_single(&priv->pdev->dev, desc->address,
+					 priv->rx_skb_size, DMA_FROM_DEVICE);
+			priv->rx_skb[desc_idx] = NULL;
+		}
+
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, dev);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
+		netif_receive_skb(skb);
+
+	} while (--budget > 0);
+
+	if (processed || !priv->rx_desc_count) {
+		bcm_enet_refill_rx(dev);
+
+		/* kick rx dma */
+		enet_dmac_writel(priv, priv->dma_chan_en_mask,
+					 ENETDMAC_CHANCFG, priv->rx_chan);
+	}
+
+	return processed;
+}
+
+
+/*
+ * try to or force reclaim of transmitted buffers
+ */
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+{
+	struct bcm_enet_priv *priv;
+	int released;
+
+	priv = netdev_priv(dev);
+	released = 0;
+
+	while (priv->tx_desc_count < priv->tx_ring_size) {
+		struct bcm_enet_desc *desc;
+		struct sk_buff *skb;
+
+		/* We run in a bh and fight against start_xmit, which
+		 * is called with bh disabled  */
+		spin_lock(&priv->tx_lock);
+
+		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
+
+		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
+			spin_unlock(&priv->tx_lock);
+			break;
+		}
+
+		/* ensure other field of the descriptor were not read
+		 * before we checked ownership */
+		rmb();
+
+		skb = priv->tx_skb[priv->tx_dirty_desc];
+		priv->tx_skb[priv->tx_dirty_desc] = NULL;
+		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
+				 DMA_TO_DEVICE);
+
+		priv->tx_dirty_desc++;
+		if (priv->tx_dirty_desc == priv->tx_ring_size)
+			priv->tx_dirty_desc = 0;
+		priv->tx_desc_count++;
+
+		spin_unlock(&priv->tx_lock);
+
+		if (desc->len_stat & DMADESC_UNDER_MASK)
+			dev->stats.tx_errors++;
+
+		dev_kfree_skb(skb);
+		released++;
+	}
+
+	if (netif_queue_stopped(dev) && released)
+		netif_wake_queue(dev);
+
+	return released;
+}
+
+/*
+ * poll func, called by network core
+ */
+static int bcm_enet_poll(struct napi_struct *napi, int budget)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	int rx_work_done;
+
+	priv = container_of(napi, struct bcm_enet_priv, napi);
+	dev = priv->net_dev;
+
+	/* ack interrupts */
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IR, priv->rx_chan);
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IR, priv->tx_chan);
+
+	/* reclaim sent skb */
+	bcm_enet_tx_reclaim(dev, 0);
+
+	spin_lock(&priv->rx_lock);
+	rx_work_done = bcm_enet_receive_queue(dev, budget);
+	spin_unlock(&priv->rx_lock);
+
+	if (rx_work_done >= budget) {
+		/* rx queue is not yet empty/clean */
+		return rx_work_done;
+	}
+
+	/* no more packet in rx/tx queue, remove device from poll
+	 * queue */
+	napi_complete(napi);
+
+	/* restore rx/tx interrupt */
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IRMASK, priv->tx_chan);
+
+	return rx_work_done;
+}
+
+/*
+ * mac interrupt handler
+ */
+static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct bcm_enet_priv *priv;
+	u32 stat;
+
+	dev = dev_id;
+	priv = netdev_priv(dev);
+
+	stat = enet_readl(priv, ENET_IR_REG);
+	if (!(stat & ENET_IR_MIB))
+		return IRQ_NONE;
+
+	/* clear & mask interrupt */
+	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
+	enet_writel(priv, 0, ENET_IRMASK_REG);
+
+	/* read mib registers in workqueue */
+	schedule_work(&priv->mib_update_task);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * rx/tx dma interrupt handler
+ */
+static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct bcm_enet_priv *priv;
+
+	dev = dev_id;
+	priv = netdev_priv(dev);
+
+	/* mask rx/tx interrupts */
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+	napi_schedule(&priv->napi);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * tx request callback
+ */
+static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct bcm_enet_desc *desc;
+	u32 len_stat;
+	int ret;
+
+	priv = netdev_priv(dev);
+
+	/* lock against tx reclaim */
+	spin_lock(&priv->tx_lock);
+
+	/* make sure  the tx hw queue  is not full,  should not happen
+	 * since we stop queue before it's the case */
+	if (unlikely(!priv->tx_desc_count)) {
+		netif_stop_queue(dev);
+		dev_err(&priv->pdev->dev, "xmit called with no tx desc "
+			"available?\n");
+		ret = NETDEV_TX_BUSY;
+		goto out_unlock;
+	}
+
+	/* pad small packets sent on a switch device */
+	if (priv->enet_is_sw && skb->len < 64) {
+		int needed = 64 - skb->len;
+		char *data;
+
+		if (unlikely(skb_tailroom(skb) < needed)) {
+			struct sk_buff *nskb;
+
+			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
+			if (!nskb) {
+				ret = NETDEV_TX_BUSY;
+				goto out_unlock;
+			}
+			dev_kfree_skb(skb);
+			skb = nskb;
+		}
+		data = skb_put(skb, needed);
+		memset(data, 0, needed);
+	}
+
+	/* point to the next available desc */
+	desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
+	priv->tx_skb[priv->tx_curr_desc] = skb;
+
+	/* fill descriptor */
+	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+				       DMA_TO_DEVICE);
+
+	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
+	len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
+		DMADESC_APPEND_CRC |
+		DMADESC_OWNER_MASK;
+
+	priv->tx_curr_desc++;
+	if (priv->tx_curr_desc == priv->tx_ring_size) {
+		priv->tx_curr_desc = 0;
+		len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
+	}
+	priv->tx_desc_count--;
+
+	/* dma might be already polling, make sure we update desc
+	 * fields in correct order */
+	wmb();
+	desc->len_stat = len_stat;
+	wmb();
+
+	/* kick tx dma */
+	enet_dmac_writel(priv, priv->dma_chan_en_mask,
+				 ENETDMAC_CHANCFG, priv->tx_chan);
+
+	/* stop queue if no more desc available */
+	if (!priv->tx_desc_count)
+		netif_stop_queue(dev);
+
+	dev->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	ret = NETDEV_TX_OK;
+
+out_unlock:
+	spin_unlock(&priv->tx_lock);
+	return ret;
+}
+
+/*
+ * Change the interface's mac address.
+ */
+static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
+{
+	struct bcm_enet_priv *priv;
+	struct sockaddr *addr = p;
+	u32 val;
+
+	priv = netdev_priv(dev);
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+	/* use perfect match register 0 to store my mac address */
+	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
+		(dev->dev_addr[4] << 8) | dev->dev_addr[5];
+	enet_writel(priv, val, ENET_PML_REG(0));
+
+	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+	val |= ENET_PMH_DATAVALID_MASK;
+	enet_writel(priv, val, ENET_PMH_REG(0));
+
+	return 0;
+}
+
+/*
+ * Change rx mode (promiscuous/allmulti) and update multicast list
+ */
+static void bcm_enet_set_multicast_list(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct netdev_hw_addr *ha;
+	u32 val;
+	int i;
+
+	priv = netdev_priv(dev);
+
+	val = enet_readl(priv, ENET_RXCFG_REG);
+
+	if (dev->flags & IFF_PROMISC)
+		val |= ENET_RXCFG_PROMISC_MASK;
+	else
+		val &= ~ENET_RXCFG_PROMISC_MASK;
+
+	/* only 3 perfect match registers left, first one is used for
+	 * own mac address */
+	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
+		val |= ENET_RXCFG_ALLMCAST_MASK;
+	else
+		val &= ~ENET_RXCFG_ALLMCAST_MASK;
+
+	/* no need to set perfect match registers if we catch all
+	 * multicast */
+	if (val & ENET_RXCFG_ALLMCAST_MASK) {
+		enet_writel(priv, val, ENET_RXCFG_REG);
+		return;
+	}
+
+	i = 0;
+	netdev_for_each_mc_addr(ha, dev) {
+		u8 *dmi_addr;
+		u32 tmp;
+
+		if (i == 3)
+			break;
+		/* update perfect match registers */
+		dmi_addr = ha->addr;
+		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
+			(dmi_addr[4] << 8) | dmi_addr[5];
+		enet_writel(priv, tmp, ENET_PML_REG(i + 1));
+
+		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
+		tmp |= ENET_PMH_DATAVALID_MASK;
+		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
+	}
+
+	for (; i < 3; i++) {
+		enet_writel(priv, 0, ENET_PML_REG(i + 1));
+		enet_writel(priv, 0, ENET_PMH_REG(i + 1));
+	}
+
+	enet_writel(priv, val, ENET_RXCFG_REG);
+}
+
+/*
+ * set mac duplex parameters
+ */
+static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
+{
+	u32 val;
+
+	val = enet_readl(priv, ENET_TXCTL_REG);
+	if (fullduplex)
+		val |= ENET_TXCTL_FD_MASK;
+	else
+		val &= ~ENET_TXCTL_FD_MASK;
+	enet_writel(priv, val, ENET_TXCTL_REG);
+}
+
+/*
+ * set mac flow control parameters
+ */
+static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
+{
+	u32 val;
+
+	/* rx flow control (pause frame handling) */
+	val = enet_readl(priv, ENET_RXCFG_REG);
+	if (rx_en)
+		val |= ENET_RXCFG_ENFLOW_MASK;
+	else
+		val &= ~ENET_RXCFG_ENFLOW_MASK;
+	enet_writel(priv, val, ENET_RXCFG_REG);
+
+	if (!priv->dma_has_sram)
+		return;
+
+	/* tx flow control (pause frame generation) */
+	val = enet_dma_readl(priv, ENETDMA_CFG_REG);
+	if (tx_en)
+		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
+	else
+		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
+	enet_dma_writel(priv, val, ENETDMA_CFG_REG);
+}
+
+/*
+ * link changed callback (from phylib)
+ */
+static void bcm_enet_adjust_phy_link(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct phy_device *phydev;
+	int status_changed;
+
+	priv = netdev_priv(dev);
+	phydev = priv->phydev;
+	status_changed = 0;
+
+	if (priv->old_link != phydev->link) {
+		status_changed = 1;
+		priv->old_link = phydev->link;
+	}
+
+	/* reflect duplex change in mac configuration */
+	if (phydev->link && phydev->duplex != priv->old_duplex) {
+		bcm_enet_set_duplex(priv,
+				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
+		status_changed = 1;
+		priv->old_duplex = phydev->duplex;
+	}
+
+	/* enable flow control if remote advertise it (trust phylib to
+	 * check that duplex is full */
+	if (phydev->link && phydev->pause != priv->old_pause) {
+		int rx_pause_en, tx_pause_en;
+
+		if (phydev->pause) {
+			/* pause was advertised by lpa and us */
+			rx_pause_en = 1;
+			tx_pause_en = 1;
+		} else if (!priv->pause_auto) {
+			/* pause setting overrided by user */
+			rx_pause_en = priv->pause_rx;
+			tx_pause_en = priv->pause_tx;
+		} else {
+			rx_pause_en = 0;
+			tx_pause_en = 0;
+		}
+
+		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
+		status_changed = 1;
+		priv->old_pause = phydev->pause;
+	}
+
+	if (status_changed) {
+		pr_info("%s: link %s", dev->name, phydev->link ?
+			"UP" : "DOWN");
+		if (phydev->link)
+			pr_cont(" - %d/%s - flow control %s", phydev->speed,
+			       DUPLEX_FULL == phydev->duplex ? "full" : "half",
+			       phydev->pause == 1 ? "rx&tx" : "off");
+
+		pr_cont("\n");
+	}
+}
+
+/*
+ * link changed callback (if phylib is not used)
+ */
+static void bcm_enet_adjust_link(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	bcm_enet_set_duplex(priv, priv->force_duplex_full);
+	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
+	netif_carrier_on(dev);
+
+	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
+		dev->name,
+		priv->force_speed_100 ? 100 : 10,
+		priv->force_duplex_full ? "full" : "half",
+		priv->pause_rx ? "rx" : "off",
+		priv->pause_tx ? "tx" : "off");
+}
+
+/*
+ * open callback, allocate dma rings & buffers and start rx operation
+ */
+static int bcm_enet_open(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct sockaddr addr;
+	struct device *kdev;
+	struct phy_device *phydev;
+	int i, ret;
+	unsigned int size;
+	char phy_id[MII_BUS_ID_SIZE + 3];
+	void *p;
+	u32 val;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+
+	if (priv->has_phy) {
+		/* connect to PHY */
+		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+			 priv->mii_bus->id, priv->phy_id);
+
+		phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
+				     PHY_INTERFACE_MODE_MII);
+
+		if (IS_ERR(phydev)) {
+			dev_err(kdev, "could not attach to PHY\n");
+			return PTR_ERR(phydev);
+		}
+
+		/* mask with MAC supported features */
+		phydev->supported &= (SUPPORTED_10baseT_Half |
+				      SUPPORTED_10baseT_Full |
+				      SUPPORTED_100baseT_Half |
+				      SUPPORTED_100baseT_Full |
+				      SUPPORTED_Autoneg |
+				      SUPPORTED_Pause |
+				      SUPPORTED_MII);
+		phydev->advertising = phydev->supported;
+
+		if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
+			phydev->advertising |= SUPPORTED_Pause;
+		else
+			phydev->advertising &= ~SUPPORTED_Pause;
+
+		dev_info(kdev, "attached PHY at address %d [%s]\n",
+			 phydev->addr, phydev->drv->name);
+
+		priv->old_link = 0;
+		priv->old_duplex = -1;
+		priv->old_pause = -1;
+		priv->phydev = phydev;
+	}
+
+	/* mask all interrupts and request them */
+	enet_writel(priv, 0, ENET_IRMASK_REG);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
+	if (ret)
+		goto out_phy_disconnect;
+
+	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
+			  dev->name, dev);
+	if (ret)
+		goto out_freeirq;
+
+	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
+			  0, dev->name, dev);
+	if (ret)
+		goto out_freeirq_rx;
+
+	/* initialize perfect match registers */
+	for (i = 0; i < 4; i++) {
+		enet_writel(priv, 0, ENET_PML_REG(i));
+		enet_writel(priv, 0, ENET_PMH_REG(i));
+	}
+
+	/* write device mac address */
+	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
+	bcm_enet_set_mac_address(dev, &addr);
+
+	/* allocate rx dma ring */
+	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
+	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+	if (!p) {
+		ret = -ENOMEM;
+		goto out_freeirq_tx;
+	}
+
+	priv->rx_desc_alloc_size = size;
+	priv->rx_desc_cpu = p;
+
+	/* allocate tx dma ring */
+	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
+	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+	if (!p) {
+		ret = -ENOMEM;
+		goto out_free_rx_ring;
+	}
+
+	priv->tx_desc_alloc_size = size;
+	priv->tx_desc_cpu = p;
+
+	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
+			       GFP_KERNEL);
+	if (!priv->tx_skb) {
+		ret = -ENOMEM;
+		goto out_free_tx_ring;
+	}
+
+	priv->tx_desc_count = priv->tx_ring_size;
+	priv->tx_dirty_desc = 0;
+	priv->tx_curr_desc = 0;
+	spin_lock_init(&priv->tx_lock);
+
+	/* init & fill rx ring with skbs */
+	priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+			       GFP_KERNEL);
+	if (!priv->rx_skb) {
+		ret = -ENOMEM;
+		goto out_free_tx_skb;
+	}
+
+	priv->rx_desc_count = 0;
+	priv->rx_dirty_desc = 0;
+	priv->rx_curr_desc = 0;
+
+	/* initialize flow control buffer allocation */
+	if (priv->dma_has_sram)
+		enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+				ENETDMA_BUFALLOC_REG(priv->rx_chan));
+	else
+		enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+				ENETDMAC_BUFALLOC, priv->rx_chan);
+
+	if (bcm_enet_refill_rx(dev)) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* write rx & tx ring addresses */
+	if (priv->dma_has_sram) {
+		enet_dmas_writel(priv, priv->rx_desc_dma,
+				 ENETDMAS_RSTART_REG, priv->rx_chan);
+		enet_dmas_writel(priv, priv->tx_desc_dma,
+			 ENETDMAS_RSTART_REG, priv->tx_chan);
+	} else {
+		enet_dmac_writel(priv, priv->rx_desc_dma,
+				ENETDMAC_RSTART, priv->rx_chan);
+		enet_dmac_writel(priv, priv->tx_desc_dma,
+				ENETDMAC_RSTART, priv->tx_chan);
+	}
+
+	/* clear remaining state ram for rx & tx channel */
+	if (priv->dma_has_sram) {
+		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
+		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
+		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
+		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
+		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
+		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
+	} else {
+		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
+		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
+	}
+
+	/* set max rx/tx length */
+	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
+	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
+
+	/* set dma maximum burst len */
+	enet_dmac_writel(priv, priv->dma_maxburst,
+			 ENETDMAC_MAXBURST, priv->rx_chan);
+	enet_dmac_writel(priv, priv->dma_maxburst,
+			 ENETDMAC_MAXBURST, priv->tx_chan);
+
+	/* set correct transmit fifo watermark */
+	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
+
+	/* set flow control low/high threshold to 1/3 / 2/3 */
+	if (priv->dma_has_sram) {
+		val = priv->rx_ring_size / 3;
+		enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
+		val = (priv->rx_ring_size * 2) / 3;
+		enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+	} else {
+		enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
+		enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
+		enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
+	}
+
+	/* all set, enable mac and interrupts, start dma engine and
+	 * kick rx dma channel */
+	wmb();
+	val = enet_readl(priv, ENET_CTL_REG);
+	val |= ENET_CTL_ENABLE_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+	enet_dmac_writel(priv, priv->dma_chan_en_mask,
+			 ENETDMAC_CHANCFG, priv->rx_chan);
+
+	/* watch "mib counters about to overflow" interrupt */
+	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
+	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
+
+	/* watch "packet transferred" interrupt in rx and tx */
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IR, priv->rx_chan);
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IR, priv->tx_chan);
+
+	/* make sure we enable napi before rx interrupt  */
+	napi_enable(&priv->napi);
+
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, priv->dma_chan_int_mask,
+			 ENETDMAC_IRMASK, priv->tx_chan);
+
+	if (priv->has_phy)
+		phy_start(priv->phydev);
+	else
+		bcm_enet_adjust_link(dev);
+
+	netif_start_queue(dev);
+	return 0;
+
+out:
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		struct bcm_enet_desc *desc;
+
+		if (!priv->rx_skb[i])
+			continue;
+
+		desc = &priv->rx_desc_cpu[i];
+		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+				 DMA_FROM_DEVICE);
+		kfree_skb(priv->rx_skb[i]);
+	}
+	kfree(priv->rx_skb);
+
+out_free_tx_skb:
+	kfree(priv->tx_skb);
+
+out_free_tx_ring:
+	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+			  priv->tx_desc_cpu, priv->tx_desc_dma);
+
+out_free_rx_ring:
+	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+			  priv->rx_desc_cpu, priv->rx_desc_dma);
+
+out_freeirq_tx:
+	free_irq(priv->irq_tx, dev);
+
+out_freeirq_rx:
+	free_irq(priv->irq_rx, dev);
+
+out_freeirq:
+	free_irq(dev->irq, dev);
+
+out_phy_disconnect:
+	phy_disconnect(priv->phydev);
+
+	return ret;
+}
+
+/*
+ * disable mac
+ */
+static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
+{
+	int limit;
+	u32 val;
+
+	val = enet_readl(priv, ENET_CTL_REG);
+	val |= ENET_CTL_DISABLE_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+
+	limit = 1000;
+	do {
+		u32 val;
+
+		val = enet_readl(priv, ENET_CTL_REG);
+		if (!(val & ENET_CTL_DISABLE_MASK))
+			break;
+		udelay(1);
+	} while (limit--);
+}
+
+/*
+ * disable dma in given channel
+ */
+static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
+{
+	int limit;
+
+	enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
+
+	limit = 1000;
+	do {
+		u32 val;
+
+		val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
+		if (!(val & ENETDMAC_CHANCFG_EN_MASK))
+			break;
+		udelay(1);
+	} while (limit--);
+}
+
+/*
+ * stop callback
+ */
+static int bcm_enet_stop(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct device *kdev;
+	int i;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	if (priv->has_phy)
+		phy_stop(priv->phydev);
+	del_timer_sync(&priv->rx_timeout);
+
+	/* mask all interrupts */
+	enet_writel(priv, 0, ENET_IRMASK_REG);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+	/* make sure no mib update is scheduled */
+	cancel_work_sync(&priv->mib_update_task);
+
+	/* disable dma & mac */
+	bcm_enet_disable_dma(priv, priv->tx_chan);
+	bcm_enet_disable_dma(priv, priv->rx_chan);
+	bcm_enet_disable_mac(priv);
+
+	/* force reclaim of all tx buffers */
+	bcm_enet_tx_reclaim(dev, 1);
+
+	/* free the rx skb ring */
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		struct bcm_enet_desc *desc;
+
+		if (!priv->rx_skb[i])
+			continue;
+
+		desc = &priv->rx_desc_cpu[i];
+		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+				 DMA_FROM_DEVICE);
+		kfree_skb(priv->rx_skb[i]);
+	}
+
+	/* free remaining allocated memory */
+	kfree(priv->rx_skb);
+	kfree(priv->tx_skb);
+	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+			  priv->rx_desc_cpu, priv->rx_desc_dma);
+	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+			  priv->tx_desc_cpu, priv->tx_desc_dma);
+	free_irq(priv->irq_tx, dev);
+	free_irq(priv->irq_rx, dev);
+	free_irq(dev->irq, dev);
+
+	/* release phy */
+	if (priv->has_phy) {
+		phy_disconnect(priv->phydev);
+		priv->phydev = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * ethtool callbacks
+ */
+struct bcm_enet_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+	int mib_reg;
+};
+
+#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\
+		     offsetof(struct bcm_enet_priv, m)
+#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),		\
+		     offsetof(struct net_device_stats, m)
+
+static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
+	{ "rx_packets", DEV_STAT(rx_packets), -1 },
+	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
+	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
+	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
+	{ "rx_errors", DEV_STAT(rx_errors), -1 },
+	{ "tx_errors", DEV_STAT(tx_errors), -1 },
+	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
+	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
+
+	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
+	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
+	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
+	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
+	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
+	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
+	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
+	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
+	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
+	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
+	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
+	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
+	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
+	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
+	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
+	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
+	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
+	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
+	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
+	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
+	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
+
+	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
+	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
+	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
+	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
+	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
+	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
+	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
+	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
+	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
+	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
+	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
+	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
+	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
+	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
+	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
+	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
+	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
+	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
+	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
+	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
+	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
+	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
+
+};
+
+#define BCM_ENET_STATS_LEN	ARRAY_SIZE(bcm_enet_gstrings_stats)
+
+static const u32 unused_mib_regs[] = {
+	ETH_MIB_TX_ALL_OCTETS,
+	ETH_MIB_TX_ALL_PKTS,
+	ETH_MIB_RX_ALL_OCTETS,
+	ETH_MIB_RX_ALL_PKTS,
+};
+
+
+static void bcm_enet_get_drvinfo(struct net_device *netdev,
+				 struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, bcm_enet_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+}
+
+static int bcm_enet_get_sset_count(struct net_device *netdev,
+					int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return BCM_ENET_STATS_LEN;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void bcm_enet_get_strings(struct net_device *netdev,
+				 u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       bcm_enet_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static void update_mib_counters(struct bcm_enet_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+		const struct bcm_enet_stats *s;
+		u32 val;
+		char *p;
+
+		s = &bcm_enet_gstrings_stats[i];
+		if (s->mib_reg == -1)
+			continue;
+
+		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
+		p = (char *)priv + s->stat_offset;
+
+		if (s->sizeof_stat == sizeof(u64))
+			*(u64 *)p += val;
+		else
+			*(u32 *)p += val;
+	}
+
+	/* also empty unused mib counters to make sure mib counter
+	 * overflow interrupt is cleared */
+	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
+		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
+}
+
+static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
+	mutex_lock(&priv->mib_update_lock);
+	update_mib_counters(priv);
+	mutex_unlock(&priv->mib_update_lock);
+
+	/* reenable mib interrupt */
+	if (netif_running(priv->net_dev))
+		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
+}
+
+static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
+				       struct ethtool_stats *stats,
+				       u64 *data)
+{
+	struct bcm_enet_priv *priv;
+	int i;
+
+	priv = netdev_priv(netdev);
+
+	mutex_lock(&priv->mib_update_lock);
+	update_mib_counters(priv);
+
+	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+		const struct bcm_enet_stats *s;
+		char *p;
+
+		s = &bcm_enet_gstrings_stats[i];
+		if (s->mib_reg == -1)
+			p = (char *)&netdev->stats;
+		else
+			p = (char *)priv;
+		p += s->stat_offset;
+		data[i] = (s->sizeof_stat == sizeof(u64)) ?
+			*(u64 *)p : *(u32 *)p;
+	}
+	mutex_unlock(&priv->mib_update_lock);
+}
+
+static int bcm_enet_nway_reset(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return genphy_restart_aneg(priv->phydev);
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static int bcm_enet_get_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	cmd->maxrxpkt = 0;
+	cmd->maxtxpkt = 0;
+
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return phy_ethtool_gset(priv->phydev, cmd);
+	} else {
+		cmd->autoneg = 0;
+		ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
+					    ? SPEED_100 : SPEED_10));
+		cmd->duplex = (priv->force_duplex_full) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+		cmd->supported = ADVERTISED_10baseT_Half  |
+			ADVERTISED_10baseT_Full |
+			ADVERTISED_100baseT_Half |
+			ADVERTISED_100baseT_Full;
+		cmd->advertising = 0;
+		cmd->port = PORT_MII;
+		cmd->transceiver = XCVR_EXTERNAL;
+	}
+	return 0;
+}
+
+static int bcm_enet_set_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return phy_ethtool_sset(priv->phydev, cmd);
+	} else {
+
+		if (cmd->autoneg ||
+		    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
+		    cmd->port != PORT_MII)
+			return -EINVAL;
+
+		priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
+		priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+
+		if (netif_running(dev))
+			bcm_enet_adjust_link(dev);
+		return 0;
+	}
+}
+
+static void bcm_enet_get_ringparam(struct net_device *dev,
+				   struct ethtool_ringparam *ering)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	/* rx/tx ring is actually only limited by memory */
+	ering->rx_max_pending = 8192;
+	ering->tx_max_pending = 8192;
+	ering->rx_pending = priv->rx_ring_size;
+	ering->tx_pending = priv->tx_ring_size;
+}
+
+static int bcm_enet_set_ringparam(struct net_device *dev,
+				  struct ethtool_ringparam *ering)
+{
+	struct bcm_enet_priv *priv;
+	int was_running;
+
+	priv = netdev_priv(dev);
+
+	was_running = 0;
+	if (netif_running(dev)) {
+		bcm_enet_stop(dev);
+		was_running = 1;
+	}
+
+	priv->rx_ring_size = ering->rx_pending;
+	priv->tx_ring_size = ering->tx_pending;
+
+	if (was_running) {
+		int err;
+
+		err = bcm_enet_open(dev);
+		if (err)
+			dev_close(dev);
+		else
+			bcm_enet_set_multicast_list(dev);
+	}
+	return 0;
+}
+
+static void bcm_enet_get_pauseparam(struct net_device *dev,
+				    struct ethtool_pauseparam *ecmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	ecmd->autoneg = priv->pause_auto;
+	ecmd->rx_pause = priv->pause_rx;
+	ecmd->tx_pause = priv->pause_tx;
+}
+
+static int bcm_enet_set_pauseparam(struct net_device *dev,
+				   struct ethtool_pauseparam *ecmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	if (priv->has_phy) {
+		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
+			/* asymetric pause mode not supported,
+			 * actually possible but integrated PHY has RO
+			 * asym_pause bit */
+			return -EINVAL;
+		}
+	} else {
+		/* no pause autoneg on direct mii connection */
+		if (ecmd->autoneg)
+			return -EINVAL;
+	}
+
+	priv->pause_auto = ecmd->autoneg;
+	priv->pause_rx = ecmd->rx_pause;
+	priv->pause_tx = ecmd->tx_pause;
+
+	return 0;
+}
+
+static const struct ethtool_ops bcm_enet_ethtool_ops = {
+	.get_strings		= bcm_enet_get_strings,
+	.get_sset_count		= bcm_enet_get_sset_count,
+	.get_ethtool_stats      = bcm_enet_get_ethtool_stats,
+	.nway_reset		= bcm_enet_nway_reset,
+	.get_settings		= bcm_enet_get_settings,
+	.set_settings		= bcm_enet_set_settings,
+	.get_drvinfo		= bcm_enet_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_ringparam		= bcm_enet_get_ringparam,
+	.set_ringparam		= bcm_enet_set_ringparam,
+	.get_pauseparam		= bcm_enet_get_pauseparam,
+	.set_pauseparam		= bcm_enet_set_pauseparam,
+};
+
+static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return phy_mii_ioctl(priv->phydev, rq, cmd);
+	} else {
+		struct mii_if_info mii;
+
+		mii.dev = dev;
+		mii.mdio_read = bcm_enet_mdio_read_mii;
+		mii.mdio_write = bcm_enet_mdio_write_mii;
+		mii.phy_id = 0;
+		mii.phy_id_mask = 0x3f;
+		mii.reg_num_mask = 0x1f;
+		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
+	}
+}
+
+/*
+ * calculate actual hardware mtu
+ */
+static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
+{
+	int actual_mtu;
+
+	actual_mtu = mtu;
+
+	/* add ethernet header + vlan tag size */
+	actual_mtu += VLAN_ETH_HLEN;
+
+	if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
+		return -EINVAL;
+
+	/*
+	 * setup maximum size before we get overflow mark in
+	 * descriptor, note that this will not prevent reception of
+	 * big frames, they will be split into multiple buffers
+	 * anyway
+	 */
+	priv->hw_mtu = actual_mtu;
+
+	/*
+	 * align rx buffer size to dma burst len, account FCS since
+	 * it's appended
+	 */
+	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+				  priv->dma_maxburst * 4);
+	return 0;
+}
+
+/*
+ * adjust mtu, can't be called while device is running
+ */
+static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
+	if (ret)
+		return ret;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/*
+ * preinit hardware to allow mii operation while device is down
+ */
+static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
+{
+	u32 val;
+	int limit;
+
+	/* make sure mac is disabled */
+	bcm_enet_disable_mac(priv);
+
+	/* soft reset mac */
+	val = ENET_CTL_SRESET_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+	wmb();
+
+	limit = 1000;
+	do {
+		val = enet_readl(priv, ENET_CTL_REG);
+		if (!(val & ENET_CTL_SRESET_MASK))
+			break;
+		udelay(1);
+	} while (limit--);
+
+	/* select correct mii interface */
+	val = enet_readl(priv, ENET_CTL_REG);
+	if (priv->use_external_mii)
+		val |= ENET_CTL_EPHYSEL_MASK;
+	else
+		val &= ~ENET_CTL_EPHYSEL_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+
+	/* turn on mdc clock */
+	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
+		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
+
+	/* set mib counters to self-clear when read */
+	val = enet_readl(priv, ENET_MIBCTL_REG);
+	val |= ENET_MIBCTL_RDCLEAR_MASK;
+	enet_writel(priv, val, ENET_MIBCTL_REG);
+}
+
+static const struct net_device_ops bcm_enet_ops = {
+	.ndo_open		= bcm_enet_open,
+	.ndo_stop		= bcm_enet_stop,
+	.ndo_start_xmit		= bcm_enet_start_xmit,
+	.ndo_set_mac_address	= bcm_enet_set_mac_address,
+	.ndo_set_rx_mode	= bcm_enet_set_multicast_list,
+	.ndo_do_ioctl		= bcm_enet_ioctl,
+	.ndo_change_mtu		= bcm_enet_change_mtu,
+};
+
+/*
+ * allocate netdevice, request register memory and register device.
+ */
+static int bcm_enet_probe(struct platform_device *pdev)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	struct bcm63xx_enet_platform_data *pd;
+	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+	struct mii_bus *bus;
+	const char *clk_name;
+	int i, ret;
+
+	/* stop if shared driver failed, assume driver->probe will be
+	 * called in the same order we register devices (correct ?) */
+	if (!bcm_enet_shared_base[0])
+		return -ENODEV;
+
+	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+	if (!res_irq || !res_irq_rx || !res_irq_tx)
+		return -ENODEV;
+
+	ret = 0;
+	dev = alloc_etherdev(sizeof(*priv));
+	if (!dev)
+		return -ENOMEM;
+	priv = netdev_priv(dev);
+
+	priv->enet_is_sw = false;
+	priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+
+	ret = compute_hw_mtu(priv, dev->mtu);
+	if (ret)
+		goto out;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+	if (IS_ERR(priv->base)) {
+		ret = PTR_ERR(priv->base);
+		goto out;
+	}
+
+	dev->irq = priv->irq = res_irq->start;
+	priv->irq_rx = res_irq_rx->start;
+	priv->irq_tx = res_irq_tx->start;
+	priv->mac_id = pdev->id;
+
+	/* get rx & tx dma channel id for this mac */
+	if (priv->mac_id == 0) {
+		priv->rx_chan = 0;
+		priv->tx_chan = 1;
+		clk_name = "enet0";
+	} else {
+		priv->rx_chan = 2;
+		priv->tx_chan = 3;
+		clk_name = "enet1";
+	}
+
+	priv->mac_clk = clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(priv->mac_clk)) {
+		ret = PTR_ERR(priv->mac_clk);
+		goto out;
+	}
+	clk_prepare_enable(priv->mac_clk);
+
+	/* initialize default and fetch platform data */
+	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
+	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
+
+	pd = dev_get_platdata(&pdev->dev);
+	if (pd) {
+		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+		priv->has_phy = pd->has_phy;
+		priv->phy_id = pd->phy_id;
+		priv->has_phy_interrupt = pd->has_phy_interrupt;
+		priv->phy_interrupt = pd->phy_interrupt;
+		priv->use_external_mii = !pd->use_internal_phy;
+		priv->pause_auto = pd->pause_auto;
+		priv->pause_rx = pd->pause_rx;
+		priv->pause_tx = pd->pause_tx;
+		priv->force_duplex_full = pd->force_duplex_full;
+		priv->force_speed_100 = pd->force_speed_100;
+		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
+		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
+		priv->dma_chan_width = pd->dma_chan_width;
+		priv->dma_has_sram = pd->dma_has_sram;
+		priv->dma_desc_shift = pd->dma_desc_shift;
+	}
+
+	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+		/* using internal PHY, enable clock */
+		priv->phy_clk = clk_get(&pdev->dev, "ephy");
+		if (IS_ERR(priv->phy_clk)) {
+			ret = PTR_ERR(priv->phy_clk);
+			priv->phy_clk = NULL;
+			goto out_put_clk_mac;
+		}
+		clk_prepare_enable(priv->phy_clk);
+	}
+
+	/* do minimal hardware init to be able to probe mii bus */
+	bcm_enet_hw_preinit(priv);
+
+	/* MII bus registration */
+	if (priv->has_phy) {
+
+		priv->mii_bus = mdiobus_alloc();
+		if (!priv->mii_bus) {
+			ret = -ENOMEM;
+			goto out_uninit_hw;
+		}
+
+		bus = priv->mii_bus;
+		bus->name = "bcm63xx_enet MII bus";
+		bus->parent = &pdev->dev;
+		bus->priv = priv;
+		bus->read = bcm_enet_mdio_read_phylib;
+		bus->write = bcm_enet_mdio_write_phylib;
+		sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+
+		/* only probe bus where we think the PHY is, because
+		 * the mdio read operation return 0 instead of 0xffff
+		 * if a slave is not present on hw */
+		bus->phy_mask = ~(1 << priv->phy_id);
+
+		bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+					GFP_KERNEL);
+		if (!bus->irq) {
+			ret = -ENOMEM;
+			goto out_free_mdio;
+		}
+
+		if (priv->has_phy_interrupt)
+			bus->irq[priv->phy_id] = priv->phy_interrupt;
+		else
+			bus->irq[priv->phy_id] = PHY_POLL;
+
+		ret = mdiobus_register(bus);
+		if (ret) {
+			dev_err(&pdev->dev, "unable to register mdio bus\n");
+			goto out_free_mdio;
+		}
+	} else {
+
+		/* run platform code to initialize PHY device */
+		if (pd->mii_config &&
+		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
+				   bcm_enet_mdio_write_mii)) {
+			dev_err(&pdev->dev, "unable to configure mdio bus\n");
+			goto out_uninit_hw;
+		}
+	}
+
+	spin_lock_init(&priv->rx_lock);
+
+	/* init rx timeout (used for oom) */
+	init_timer(&priv->rx_timeout);
+	priv->rx_timeout.function = bcm_enet_refill_rx_timer;
+	priv->rx_timeout.data = (unsigned long)dev;
+
+	/* init the mib update lock&work */
+	mutex_init(&priv->mib_update_lock);
+	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
+
+	/* zero mib counters */
+	for (i = 0; i < ENET_MIB_REG_COUNT; i++)
+		enet_writel(priv, 0, ENET_MIB_REG(i));
+
+	/* register netdevice */
+	dev->netdev_ops = &bcm_enet_ops;
+	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+
+	dev->ethtool_ops = &bcm_enet_ethtool_ops;
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	ret = register_netdev(dev);
+	if (ret)
+		goto out_unregister_mdio;
+
+	netif_carrier_off(dev);
+	platform_set_drvdata(pdev, dev);
+	priv->pdev = pdev;
+	priv->net_dev = dev;
+
+	return 0;
+
+out_unregister_mdio:
+	if (priv->mii_bus)
+		mdiobus_unregister(priv->mii_bus);
+
+out_free_mdio:
+	if (priv->mii_bus)
+		mdiobus_free(priv->mii_bus);
+
+out_uninit_hw:
+	/* turn off mdc clock */
+	enet_writel(priv, 0, ENET_MIISC_REG);
+	if (priv->phy_clk) {
+		clk_disable_unprepare(priv->phy_clk);
+		clk_put(priv->phy_clk);
+	}
+
+out_put_clk_mac:
+	clk_disable_unprepare(priv->mac_clk);
+	clk_put(priv->mac_clk);
+out:
+	free_netdev(dev);
+	return ret;
+}
+
+
+/*
+ * exit func, stops hardware and unregisters netdevice
+ */
+static int bcm_enet_remove(struct platform_device *pdev)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+
+	/* stop netdevice */
+	dev = platform_get_drvdata(pdev);
+	priv = netdev_priv(dev);
+	unregister_netdev(dev);
+
+	/* turn off mdc clock */
+	enet_writel(priv, 0, ENET_MIISC_REG);
+
+	if (priv->has_phy) {
+		mdiobus_unregister(priv->mii_bus);
+		mdiobus_free(priv->mii_bus);
+	} else {
+		struct bcm63xx_enet_platform_data *pd;
+
+		pd = dev_get_platdata(&pdev->dev);
+		if (pd && pd->mii_config)
+			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
+				       bcm_enet_mdio_write_mii);
+	}
+
+	/* disable hw block clocks */
+	if (priv->phy_clk) {
+		clk_disable_unprepare(priv->phy_clk);
+		clk_put(priv->phy_clk);
+	}
+	clk_disable_unprepare(priv->mac_clk);
+	clk_put(priv->mac_clk);
+
+	free_netdev(dev);
+	return 0;
+}
+
+struct platform_driver bcm63xx_enet_driver = {
+	.probe	= bcm_enet_probe,
+	.remove	= bcm_enet_remove,
+	.driver	= {
+		.name	= "bcm63xx_enet",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/*
+ * switch mii access callbacks
+ */
+static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
+				int ext, int phy_id, int location)
+{
+	u32 reg;
+	int ret;
+
+	spin_lock_bh(&priv->enetsw_mdio_lock);
+	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
+
+	reg = ENETSW_MDIOC_RD_MASK |
+		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
+		(location << ENETSW_MDIOC_REG_SHIFT);
+
+	if (ext)
+		reg |= ENETSW_MDIOC_EXT_MASK;
+
+	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
+	udelay(50);
+	ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
+	spin_unlock_bh(&priv->enetsw_mdio_lock);
+	return ret;
+}
+
+static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
+				 int ext, int phy_id, int location,
+				 uint16_t data)
+{
+	u32 reg;
+
+	spin_lock_bh(&priv->enetsw_mdio_lock);
+	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
+
+	reg = ENETSW_MDIOC_WR_MASK |
+		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
+		(location << ENETSW_MDIOC_REG_SHIFT);
+
+	if (ext)
+		reg |= ENETSW_MDIOC_EXT_MASK;
+
+	reg |= data;
+
+	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
+	udelay(50);
+	spin_unlock_bh(&priv->enetsw_mdio_lock);
+}
+
+static inline int bcm_enet_port_is_rgmii(int portid)
+{
+	return portid >= ENETSW_RGMII_PORT0;
+}
+
+/*
+ * enet sw PHY polling
+ */
+static void swphy_poll_timer(unsigned long data)
+{
+	struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
+	unsigned int i;
+
+	for (i = 0; i < priv->num_ports; i++) {
+		struct bcm63xx_enetsw_port *port;
+		int val, j, up, advertise, lpa, speed, duplex, media;
+		int external_phy = bcm_enet_port_is_rgmii(i);
+		u8 override;
+
+		port = &priv->used_ports[i];
+		if (!port->used)
+			continue;
+
+		if (port->bypass_link)
+			continue;
+
+		/* dummy read to clear */
+		for (j = 0; j < 2; j++)
+			val = bcmenet_sw_mdio_read(priv, external_phy,
+						   port->phy_id, MII_BMSR);
+
+		if (val == 0xffff)
+			continue;
+
+		up = (val & BMSR_LSTATUS) ? 1 : 0;
+		if (!(up ^ priv->sw_port_link[i]))
+			continue;
+
+		priv->sw_port_link[i] = up;
+
+		/* link changed */
+		if (!up) {
+			dev_info(&priv->pdev->dev, "link DOWN on %s\n",
+				 port->name);
+			enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
+				      ENETSW_PORTOV_REG(i));
+			enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
+				      ENETSW_PTCTRL_TXDIS_MASK,
+				      ENETSW_PTCTRL_REG(i));
+			continue;
+		}
+
+		advertise = bcmenet_sw_mdio_read(priv, external_phy,
+						 port->phy_id, MII_ADVERTISE);
+
+		lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
+					   MII_LPA);
+
+		/* figure out media and duplex from advertise and LPA values */
+		media = mii_nway_result(lpa & advertise);
+		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+
+		if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+			speed = 100;
+		else
+			speed = 10;
+
+		if (val & BMSR_ESTATEN) {
+			advertise = bcmenet_sw_mdio_read(priv, external_phy,
+						port->phy_id, MII_CTRL1000);
+
+			lpa = bcmenet_sw_mdio_read(priv, external_phy,
+						port->phy_id, MII_STAT1000);
+
+			if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+					&& lpa & (LPA_1000FULL | LPA_1000HALF)) {
+				speed = 1000;
+				duplex = (lpa & LPA_1000FULL);
+			}
+		}
+
+		dev_info(&priv->pdev->dev,
+			 "link UP on %s, %dMbps, %s-duplex\n",
+			 port->name, speed, duplex ? "full" : "half");
+
+		override = ENETSW_PORTOV_ENABLE_MASK |
+			ENETSW_PORTOV_LINKUP_MASK;
+
+		if (speed == 1000)
+			override |= ENETSW_IMPOV_1000_MASK;
+		else if (speed == 100)
+			override |= ENETSW_IMPOV_100_MASK;
+		if (duplex)
+			override |= ENETSW_IMPOV_FDX_MASK;
+
+		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
+		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
+	}
+
+	priv->swphy_poll.expires = jiffies + HZ;
+	add_timer(&priv->swphy_poll);
+}
+
+/*
+ * open callback, allocate dma rings & buffers and start rx operation
+ */
+static int bcm_enetsw_open(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct device *kdev;
+	int i, ret;
+	unsigned int size;
+	void *p;
+	u32 val;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+
+	/* mask all interrupts and request them */
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
+			  0, dev->name, dev);
+	if (ret)
+		goto out_freeirq;
+
+	if (priv->irq_tx != -1) {
+		ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
+				  0, dev->name, dev);
+		if (ret)
+			goto out_freeirq_rx;
+	}
+
+	/* allocate rx dma ring */
+	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
+	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+	if (!p) {
+		dev_err(kdev, "cannot allocate rx ring %u\n", size);
+		ret = -ENOMEM;
+		goto out_freeirq_tx;
+	}
+
+	memset(p, 0, size);
+	priv->rx_desc_alloc_size = size;
+	priv->rx_desc_cpu = p;
+
+	/* allocate tx dma ring */
+	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
+	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+	if (!p) {
+		dev_err(kdev, "cannot allocate tx ring\n");
+		ret = -ENOMEM;
+		goto out_free_rx_ring;
+	}
+
+	memset(p, 0, size);
+	priv->tx_desc_alloc_size = size;
+	priv->tx_desc_cpu = p;
+
+	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+			       GFP_KERNEL);
+	if (!priv->tx_skb) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out_free_tx_ring;
+	}
+
+	priv->tx_desc_count = priv->tx_ring_size;
+	priv->tx_dirty_desc = 0;
+	priv->tx_curr_desc = 0;
+	spin_lock_init(&priv->tx_lock);
+
+	/* init & fill rx ring with skbs */
+	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+			       GFP_KERNEL);
+	if (!priv->rx_skb) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out_free_tx_skb;
+	}
+
+	priv->rx_desc_count = 0;
+	priv->rx_dirty_desc = 0;
+	priv->rx_curr_desc = 0;
+
+	/* disable all ports */
+	for (i = 0; i < priv->num_ports; i++) {
+		enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
+			      ENETSW_PORTOV_REG(i));
+		enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
+			      ENETSW_PTCTRL_TXDIS_MASK,
+			      ENETSW_PTCTRL_REG(i));
+
+		priv->sw_port_link[i] = 0;
+	}
+
+	/* reset mib */
+	val = enetsw_readb(priv, ENETSW_GMCR_REG);
+	val |= ENETSW_GMCR_RST_MIB_MASK;
+	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
+	mdelay(1);
+	val &= ~ENETSW_GMCR_RST_MIB_MASK;
+	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
+	mdelay(1);
+
+	/* force CPU port state */
+	val = enetsw_readb(priv, ENETSW_IMPOV_REG);
+	val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
+	enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
+
+	/* enable switch forward engine */
+	val = enetsw_readb(priv, ENETSW_SWMODE_REG);
+	val |= ENETSW_SWMODE_FWD_EN_MASK;
+	enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
+
+	/* enable jumbo on all ports */
+	enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
+	enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
+
+	/* initialize flow control buffer allocation */
+	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+			ENETDMA_BUFALLOC_REG(priv->rx_chan));
+
+	if (bcm_enet_refill_rx(dev)) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* write rx & tx ring addresses */
+	enet_dmas_writel(priv, priv->rx_desc_dma,
+			 ENETDMAS_RSTART_REG, priv->rx_chan);
+	enet_dmas_writel(priv, priv->tx_desc_dma,
+			 ENETDMAS_RSTART_REG, priv->tx_chan);
+
+	/* clear remaining state ram for rx & tx channel */
+	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
+	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
+	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
+	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
+	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
+	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
+
+	/* set dma maximum burst len */
+	enet_dmac_writel(priv, priv->dma_maxburst,
+			 ENETDMAC_MAXBURST, priv->rx_chan);
+	enet_dmac_writel(priv, priv->dma_maxburst,
+			 ENETDMAC_MAXBURST, priv->tx_chan);
+
+	/* set flow control low/high threshold to 1/3 / 2/3 */
+	val = priv->rx_ring_size / 3;
+	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
+	val = (priv->rx_ring_size * 2) / 3;
+	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+
+	/* all set, enable mac and interrupts, start dma engine and
+	 * kick rx dma channel
+	 */
+	wmb();
+	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+	enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
+			 ENETDMAC_CHANCFG, priv->rx_chan);
+
+	/* watch "packet transferred" interrupt in rx and tx */
+	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+			 ENETDMAC_IR, priv->rx_chan);
+	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+			 ENETDMAC_IR, priv->tx_chan);
+
+	/* make sure we enable napi before rx interrupt  */
+	napi_enable(&priv->napi);
+
+	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+			 ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
+			 ENETDMAC_IRMASK, priv->tx_chan);
+
+	netif_carrier_on(dev);
+	netif_start_queue(dev);
+
+	/* apply override config for bypass_link ports here. */
+	for (i = 0; i < priv->num_ports; i++) {
+		struct bcm63xx_enetsw_port *port;
+		u8 override;
+		port = &priv->used_ports[i];
+		if (!port->used)
+			continue;
+
+		if (!port->bypass_link)
+			continue;
+
+		override = ENETSW_PORTOV_ENABLE_MASK |
+			ENETSW_PORTOV_LINKUP_MASK;
+
+		switch (port->force_speed) {
+		case 1000:
+			override |= ENETSW_IMPOV_1000_MASK;
+			break;
+		case 100:
+			override |= ENETSW_IMPOV_100_MASK;
+			break;
+		case 10:
+			break;
+		default:
+			pr_warn("invalid forced speed on port %s: assume 10\n",
+			       port->name);
+			break;
+		}
+
+		if (port->force_duplex_full)
+			override |= ENETSW_IMPOV_FDX_MASK;
+
+
+		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
+		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
+	}
+
+	/* start phy polling timer */
+	init_timer(&priv->swphy_poll);
+	priv->swphy_poll.function = swphy_poll_timer;
+	priv->swphy_poll.data = (unsigned long)priv;
+	priv->swphy_poll.expires = jiffies;
+	add_timer(&priv->swphy_poll);
+	return 0;
+
+out:
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		struct bcm_enet_desc *desc;
+
+		if (!priv->rx_skb[i])
+			continue;
+
+		desc = &priv->rx_desc_cpu[i];
+		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+				 DMA_FROM_DEVICE);
+		kfree_skb(priv->rx_skb[i]);
+	}
+	kfree(priv->rx_skb);
+
+out_free_tx_skb:
+	kfree(priv->tx_skb);
+
+out_free_tx_ring:
+	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+			  priv->tx_desc_cpu, priv->tx_desc_dma);
+
+out_free_rx_ring:
+	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+			  priv->rx_desc_cpu, priv->rx_desc_dma);
+
+out_freeirq_tx:
+	if (priv->irq_tx != -1)
+		free_irq(priv->irq_tx, dev);
+
+out_freeirq_rx:
+	free_irq(priv->irq_rx, dev);
+
+out_freeirq:
+	return ret;
+}
+
+/* stop callback */
+static int bcm_enetsw_stop(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct device *kdev;
+	int i;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+
+	del_timer_sync(&priv->swphy_poll);
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	del_timer_sync(&priv->rx_timeout);
+
+	/* mask all interrupts */
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
+
+	/* disable dma & mac */
+	bcm_enet_disable_dma(priv, priv->tx_chan);
+	bcm_enet_disable_dma(priv, priv->rx_chan);
+
+	/* force reclaim of all tx buffers */
+	bcm_enet_tx_reclaim(dev, 1);
+
+	/* free the rx skb ring */
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		struct bcm_enet_desc *desc;
+
+		if (!priv->rx_skb[i])
+			continue;
+
+		desc = &priv->rx_desc_cpu[i];
+		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+				 DMA_FROM_DEVICE);
+		kfree_skb(priv->rx_skb[i]);
+	}
+
+	/* free remaining allocated memory */
+	kfree(priv->rx_skb);
+	kfree(priv->tx_skb);
+	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+			  priv->rx_desc_cpu, priv->rx_desc_dma);
+	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+			  priv->tx_desc_cpu, priv->tx_desc_dma);
+	if (priv->irq_tx != -1)
+		free_irq(priv->irq_tx, dev);
+	free_irq(priv->irq_rx, dev);
+
+	return 0;
+}
+
+/* try to sort out phy external status by walking the used_port field
+ * in the bcm_enet_priv structure. in case the phy address is not
+ * assigned to any physical port on the switch, assume it is external
+ * (and yell at the user).
+ */
+static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
+{
+	int i;
+
+	for (i = 0; i < priv->num_ports; ++i) {
+		if (!priv->used_ports[i].used)
+			continue;
+		if (priv->used_ports[i].phy_id == phy_id)
+			return bcm_enet_port_is_rgmii(i);
+	}
+
+	printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
+		    phy_id);
+	return 1;
+}
+
+/* can't use bcmenet_sw_mdio_read directly as we need to sort out
+ * external/internal status of the given phy_id first.
+ */
+static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
+				    int location)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	return bcmenet_sw_mdio_read(priv,
+				    bcm_enetsw_phy_is_external(priv, phy_id),
+				    phy_id, location);
+}
+
+/* can't use bcmenet_sw_mdio_write directly as we need to sort out
+ * external/internal status of the given phy_id first.
+ */
+static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
+				      int location,
+				      int val)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
+			      phy_id, location, val);
+}
+
+static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct mii_if_info mii;
+
+	mii.dev = dev;
+	mii.mdio_read = bcm_enetsw_mii_mdio_read;
+	mii.mdio_write = bcm_enetsw_mii_mdio_write;
+	mii.phy_id = 0;
+	mii.phy_id_mask = 0x3f;
+	mii.reg_num_mask = 0x1f;
+	return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
+
+}
+
+static const struct net_device_ops bcm_enetsw_ops = {
+	.ndo_open		= bcm_enetsw_open,
+	.ndo_stop		= bcm_enetsw_stop,
+	.ndo_start_xmit		= bcm_enet_start_xmit,
+	.ndo_change_mtu		= bcm_enet_change_mtu,
+	.ndo_do_ioctl		= bcm_enetsw_ioctl,
+};
+
+
+static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
+	{ "rx_packets", DEV_STAT(rx_packets), -1 },
+	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
+	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
+	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
+	{ "rx_errors", DEV_STAT(rx_errors), -1 },
+	{ "tx_errors", DEV_STAT(tx_errors), -1 },
+	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
+	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
+
+	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
+	{ "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
+	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
+	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
+	{ "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
+	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
+	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
+	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
+	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
+	{ "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
+	  ETHSW_MIB_RX_1024_1522 },
+	{ "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
+	  ETHSW_MIB_RX_1523_2047 },
+	{ "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
+	  ETHSW_MIB_RX_2048_4095 },
+	{ "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
+	  ETHSW_MIB_RX_4096_8191 },
+	{ "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
+	  ETHSW_MIB_RX_8192_9728 },
+	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
+	{ "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
+	{ "tx_dropped",	GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
+	{ "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
+	{ "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
+
+	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
+	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
+	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
+	{ "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
+	{ "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
+	{ "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
+
+};
+
+#define BCM_ENETSW_STATS_LEN	\
+	(sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
+
+static void bcm_enetsw_get_strings(struct net_device *netdev,
+				   u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       bcm_enetsw_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static int bcm_enetsw_get_sset_count(struct net_device *netdev,
+				     int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return BCM_ENETSW_STATS_LEN;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
+				   struct ethtool_drvinfo *drvinfo)
+{
+	strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
+	strncpy(drvinfo->version, bcm_enet_driver_version, 32);
+	strncpy(drvinfo->fw_version, "N/A", 32);
+	strncpy(drvinfo->bus_info, "bcm63xx", 32);
+}
+
+static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
+					 struct ethtool_stats *stats,
+					 u64 *data)
+{
+	struct bcm_enet_priv *priv;
+	int i;
+
+	priv = netdev_priv(netdev);
+
+	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
+		const struct bcm_enet_stats *s;
+		u32 lo, hi;
+		char *p;
+		int reg;
+
+		s = &bcm_enetsw_gstrings_stats[i];
+
+		reg = s->mib_reg;
+		if (reg == -1)
+			continue;
+
+		lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
+		p = (char *)priv + s->stat_offset;
+
+		if (s->sizeof_stat == sizeof(u64)) {
+			hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
+			*(u64 *)p = ((u64)hi << 32 | lo);
+		} else {
+			*(u32 *)p = lo;
+		}
+	}
+
+	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
+		const struct bcm_enet_stats *s;
+		char *p;
+
+		s = &bcm_enetsw_gstrings_stats[i];
+
+		if (s->mib_reg == -1)
+			p = (char *)&netdev->stats + s->stat_offset;
+		else
+			p = (char *)priv + s->stat_offset;
+
+		data[i] = (s->sizeof_stat == sizeof(u64)) ?
+			*(u64 *)p : *(u32 *)p;
+	}
+}
+
+static void bcm_enetsw_get_ringparam(struct net_device *dev,
+				     struct ethtool_ringparam *ering)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	/* rx/tx ring is actually only limited by memory */
+	ering->rx_max_pending = 8192;
+	ering->tx_max_pending = 8192;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = 0;
+	ering->rx_pending = priv->rx_ring_size;
+	ering->tx_pending = priv->tx_ring_size;
+}
+
+static int bcm_enetsw_set_ringparam(struct net_device *dev,
+				    struct ethtool_ringparam *ering)
+{
+	struct bcm_enet_priv *priv;
+	int was_running;
+
+	priv = netdev_priv(dev);
+
+	was_running = 0;
+	if (netif_running(dev)) {
+		bcm_enetsw_stop(dev);
+		was_running = 1;
+	}
+
+	priv->rx_ring_size = ering->rx_pending;
+	priv->tx_ring_size = ering->tx_pending;
+
+	if (was_running) {
+		int err;
+
+		err = bcm_enetsw_open(dev);
+		if (err)
+			dev_close(dev);
+	}
+	return 0;
+}
+
+static struct ethtool_ops bcm_enetsw_ethtool_ops = {
+	.get_strings		= bcm_enetsw_get_strings,
+	.get_sset_count		= bcm_enetsw_get_sset_count,
+	.get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
+	.get_drvinfo		= bcm_enetsw_get_drvinfo,
+	.get_ringparam		= bcm_enetsw_get_ringparam,
+	.set_ringparam		= bcm_enetsw_set_ringparam,
+};
+
+/* allocate netdevice, request register memory and register device. */
+static int bcm_enetsw_probe(struct platform_device *pdev)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	struct bcm63xx_enetsw_platform_data *pd;
+	struct resource *res_mem;
+	int ret, irq_rx, irq_tx;
+
+	/* stop if shared driver failed, assume driver->probe will be
+	 * called in the same order we register devices (correct ?)
+	 */
+	if (!bcm_enet_shared_base[0])
+		return -ENODEV;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq_rx = platform_get_irq(pdev, 0);
+	irq_tx = platform_get_irq(pdev, 1);
+	if (!res_mem || irq_rx < 0)
+		return -ENODEV;
+
+	ret = 0;
+	dev = alloc_etherdev(sizeof(*priv));
+	if (!dev)
+		return -ENOMEM;
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof(*priv));
+
+	/* initialize default and fetch platform data */
+	priv->enet_is_sw = true;
+	priv->irq_rx = irq_rx;
+	priv->irq_tx = irq_tx;
+	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
+	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
+	priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+
+	pd = dev_get_platdata(&pdev->dev);
+	if (pd) {
+		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+		memcpy(priv->used_ports, pd->used_ports,
+		       sizeof(pd->used_ports));
+		priv->num_ports = pd->num_ports;
+		priv->dma_has_sram = pd->dma_has_sram;
+		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
+		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
+		priv->dma_chan_width = pd->dma_chan_width;
+	}
+
+	ret = compute_hw_mtu(priv, dev->mtu);
+	if (ret)
+		goto out;
+
+	if (!request_mem_region(res_mem->start, resource_size(res_mem),
+				"bcm63xx_enetsw")) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	priv->base = ioremap(res_mem->start, resource_size(res_mem));
+	if (priv->base == NULL) {
+		ret = -ENOMEM;
+		goto out_release_mem;
+	}
+
+	priv->mac_clk = clk_get(&pdev->dev, "enetsw");
+	if (IS_ERR(priv->mac_clk)) {
+		ret = PTR_ERR(priv->mac_clk);
+		goto out_unmap;
+	}
+	clk_enable(priv->mac_clk);
+
+	priv->rx_chan = 0;
+	priv->tx_chan = 1;
+	spin_lock_init(&priv->rx_lock);
+
+	/* init rx timeout (used for oom) */
+	init_timer(&priv->rx_timeout);
+	priv->rx_timeout.function = bcm_enet_refill_rx_timer;
+	priv->rx_timeout.data = (unsigned long)dev;
+
+	/* register netdevice */
+	dev->netdev_ops = &bcm_enetsw_ops;
+	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+	dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	spin_lock_init(&priv->enetsw_mdio_lock);
+
+	ret = register_netdev(dev);
+	if (ret)
+		goto out_put_clk;
+
+	netif_carrier_off(dev);
+	platform_set_drvdata(pdev, dev);
+	priv->pdev = pdev;
+	priv->net_dev = dev;
+
+	return 0;
+
+out_put_clk:
+	clk_put(priv->mac_clk);
+
+out_unmap:
+	iounmap(priv->base);
+
+out_release_mem:
+	release_mem_region(res_mem->start, resource_size(res_mem));
+out:
+	free_netdev(dev);
+	return ret;
+}
+
+
+/* exit func, stops hardware and unregisters netdevice */
+static int bcm_enetsw_remove(struct platform_device *pdev)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	struct resource *res;
+
+	/* stop netdevice */
+	dev = platform_get_drvdata(pdev);
+	priv = netdev_priv(dev);
+	unregister_netdev(dev);
+
+	/* release device resources */
+	iounmap(priv->base);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	free_netdev(dev);
+	return 0;
+}
+
+struct platform_driver bcm63xx_enetsw_driver = {
+	.probe	= bcm_enetsw_probe,
+	.remove	= bcm_enetsw_remove,
+	.driver	= {
+		.name	= "bcm63xx_enetsw",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/* reserve & remap memory space shared between all macs */
+static int bcm_enet_shared_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	void __iomem *p[3];
+	unsigned int i;
+
+	memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
+
+	for (i = 0; i < 3; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		p[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(p[i]))
+			return PTR_ERR(p[i]);
+	}
+
+	memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
+
+	return 0;
+}
+
+static int bcm_enet_shared_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+/* this "shared" driver is needed because both macs share a single
+ * address space
+ */
+struct platform_driver bcm63xx_enet_shared_driver = {
+	.probe	= bcm_enet_shared_probe,
+	.remove	= bcm_enet_shared_remove,
+	.driver	= {
+		.name	= "bcm63xx_enet_shared",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/* entry point */
+static int __init bcm_enet_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&bcm63xx_enet_shared_driver);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&bcm63xx_enet_driver);
+	if (ret)
+		platform_driver_unregister(&bcm63xx_enet_shared_driver);
+
+	ret = platform_driver_register(&bcm63xx_enetsw_driver);
+	if (ret) {
+		platform_driver_unregister(&bcm63xx_enet_driver);
+		platform_driver_unregister(&bcm63xx_enet_shared_driver);
+	}
+
+	return ret;
+}
+
+static void __exit bcm_enet_exit(void)
+{
+	platform_driver_unregister(&bcm63xx_enet_driver);
+	platform_driver_unregister(&bcm63xx_enetsw_driver);
+	platform_driver_unregister(&bcm63xx_enet_shared_driver);
+}
+
+
+module_init(bcm_enet_init);
+module_exit(bcm_enet_exit);
+
+MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
new file mode 100644
index 0000000..f55af43
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -0,0 +1,360 @@
+#ifndef BCM63XX_ENET_H_
+#define BCM63XX_ENET_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#include <bcm63xx_regs.h>
+#include <bcm63xx_irq.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_iudma.h>
+
+/* default number of descriptor */
+#define BCMENET_DEF_RX_DESC	64
+#define BCMENET_DEF_TX_DESC	32
+
+/* maximum burst len for dma (4 bytes unit) */
+#define BCMENET_DMA_MAXBURST	16
+#define BCMENETSW_DMA_MAXBURST	8
+
+/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
+ * must be low enough so that a DMA transfer of above burst length can
+ * not overflow the fifo  */
+#define BCMENET_TX_FIFO_TRESH	32
+
+/*
+ * hardware maximum rx/tx packet size including FCS, max mtu is
+ * actually 2047, but if we set max rx size register to 2047 we won't
+ * get overflow information if packet size is 2048 or above
+ */
+#define BCMENET_MAX_MTU		2046
+
+/*
+ * MIB Counters register definitions
+*/
+#define ETH_MIB_TX_GD_OCTETS			0
+#define ETH_MIB_TX_GD_PKTS			1
+#define ETH_MIB_TX_ALL_OCTETS			2
+#define ETH_MIB_TX_ALL_PKTS			3
+#define ETH_MIB_TX_BRDCAST			4
+#define ETH_MIB_TX_MULT				5
+#define ETH_MIB_TX_64				6
+#define ETH_MIB_TX_65_127			7
+#define ETH_MIB_TX_128_255			8
+#define ETH_MIB_TX_256_511			9
+#define ETH_MIB_TX_512_1023			10
+#define ETH_MIB_TX_1024_MAX			11
+#define ETH_MIB_TX_JAB				12
+#define ETH_MIB_TX_OVR				13
+#define ETH_MIB_TX_FRAG				14
+#define ETH_MIB_TX_UNDERRUN			15
+#define ETH_MIB_TX_COL				16
+#define ETH_MIB_TX_1_COL			17
+#define ETH_MIB_TX_M_COL			18
+#define ETH_MIB_TX_EX_COL			19
+#define ETH_MIB_TX_LATE				20
+#define ETH_MIB_TX_DEF				21
+#define ETH_MIB_TX_CRS				22
+#define ETH_MIB_TX_PAUSE			23
+
+#define ETH_MIB_RX_GD_OCTETS			32
+#define ETH_MIB_RX_GD_PKTS			33
+#define ETH_MIB_RX_ALL_OCTETS			34
+#define ETH_MIB_RX_ALL_PKTS			35
+#define ETH_MIB_RX_BRDCAST			36
+#define ETH_MIB_RX_MULT				37
+#define ETH_MIB_RX_64				38
+#define ETH_MIB_RX_65_127			39
+#define ETH_MIB_RX_128_255			40
+#define ETH_MIB_RX_256_511			41
+#define ETH_MIB_RX_512_1023			42
+#define ETH_MIB_RX_1024_MAX			43
+#define ETH_MIB_RX_JAB				44
+#define ETH_MIB_RX_OVR				45
+#define ETH_MIB_RX_FRAG				46
+#define ETH_MIB_RX_DROP				47
+#define ETH_MIB_RX_CRC_ALIGN			48
+#define ETH_MIB_RX_UND				49
+#define ETH_MIB_RX_CRC				50
+#define ETH_MIB_RX_ALIGN			51
+#define ETH_MIB_RX_SYM				52
+#define ETH_MIB_RX_PAUSE			53
+#define ETH_MIB_RX_CNTRL			54
+
+
+/*
+ * SW MIB Counters register definitions
+*/
+#define ETHSW_MIB_TX_ALL_OCT			0
+#define ETHSW_MIB_TX_DROP_PKTS			2
+#define ETHSW_MIB_TX_QOS_PKTS			3
+#define ETHSW_MIB_TX_BRDCAST			4
+#define ETHSW_MIB_TX_MULT			5
+#define ETHSW_MIB_TX_UNI			6
+#define ETHSW_MIB_TX_COL			7
+#define ETHSW_MIB_TX_1_COL			8
+#define ETHSW_MIB_TX_M_COL			9
+#define ETHSW_MIB_TX_DEF			10
+#define ETHSW_MIB_TX_LATE			11
+#define ETHSW_MIB_TX_EX_COL			12
+#define ETHSW_MIB_TX_PAUSE			14
+#define ETHSW_MIB_TX_QOS_OCT			15
+
+#define ETHSW_MIB_RX_ALL_OCT			17
+#define ETHSW_MIB_RX_UND			19
+#define ETHSW_MIB_RX_PAUSE			20
+#define ETHSW_MIB_RX_64				21
+#define ETHSW_MIB_RX_65_127			22
+#define ETHSW_MIB_RX_128_255			23
+#define ETHSW_MIB_RX_256_511			24
+#define ETHSW_MIB_RX_512_1023			25
+#define ETHSW_MIB_RX_1024_1522			26
+#define ETHSW_MIB_RX_OVR			27
+#define ETHSW_MIB_RX_JAB			28
+#define ETHSW_MIB_RX_ALIGN			29
+#define ETHSW_MIB_RX_CRC			30
+#define ETHSW_MIB_RX_GD_OCT			31
+#define ETHSW_MIB_RX_DROP			33
+#define ETHSW_MIB_RX_UNI			34
+#define ETHSW_MIB_RX_MULT			35
+#define ETHSW_MIB_RX_BRDCAST			36
+#define ETHSW_MIB_RX_SA_CHANGE			37
+#define ETHSW_MIB_RX_FRAG			38
+#define ETHSW_MIB_RX_OVR_DISC			39
+#define ETHSW_MIB_RX_SYM			40
+#define ETHSW_MIB_RX_QOS_PKTS			41
+#define ETHSW_MIB_RX_QOS_OCT			42
+#define ETHSW_MIB_RX_1523_2047			44
+#define ETHSW_MIB_RX_2048_4095			45
+#define ETHSW_MIB_RX_4096_8191			46
+#define ETHSW_MIB_RX_8192_9728			47
+
+
+struct bcm_enet_mib_counters {
+	u64 tx_gd_octets;
+	u32 tx_gd_pkts;
+	u32 tx_all_octets;
+	u32 tx_all_pkts;
+	u32 tx_unicast;
+	u32 tx_brdcast;
+	u32 tx_mult;
+	u32 tx_64;
+	u32 tx_65_127;
+	u32 tx_128_255;
+	u32 tx_256_511;
+	u32 tx_512_1023;
+	u32 tx_1024_max;
+	u32 tx_1523_2047;
+	u32 tx_2048_4095;
+	u32 tx_4096_8191;
+	u32 tx_8192_9728;
+	u32 tx_jab;
+	u32 tx_drop;
+	u32 tx_ovr;
+	u32 tx_frag;
+	u32 tx_underrun;
+	u32 tx_col;
+	u32 tx_1_col;
+	u32 tx_m_col;
+	u32 tx_ex_col;
+	u32 tx_late;
+	u32 tx_def;
+	u32 tx_crs;
+	u32 tx_pause;
+	u64 rx_gd_octets;
+	u32 rx_gd_pkts;
+	u32 rx_all_octets;
+	u32 rx_all_pkts;
+	u32 rx_brdcast;
+	u32 rx_unicast;
+	u32 rx_mult;
+	u32 rx_64;
+	u32 rx_65_127;
+	u32 rx_128_255;
+	u32 rx_256_511;
+	u32 rx_512_1023;
+	u32 rx_1024_max;
+	u32 rx_jab;
+	u32 rx_ovr;
+	u32 rx_frag;
+	u32 rx_drop;
+	u32 rx_crc_align;
+	u32 rx_und;
+	u32 rx_crc;
+	u32 rx_align;
+	u32 rx_sym;
+	u32 rx_pause;
+	u32 rx_cntrl;
+};
+
+
+struct bcm_enet_priv {
+
+	/* mac id (from platform device id) */
+	int mac_id;
+
+	/* base remapped address of device */
+	void __iomem *base;
+
+	/* mac irq, rx_dma irq, tx_dma irq */
+	int irq;
+	int irq_rx;
+	int irq_tx;
+
+	/* hw view of rx & tx dma ring */
+	dma_addr_t rx_desc_dma;
+	dma_addr_t tx_desc_dma;
+
+	/* allocated size (in bytes) for rx & tx dma ring */
+	unsigned int rx_desc_alloc_size;
+	unsigned int tx_desc_alloc_size;
+
+
+	struct napi_struct napi;
+
+	/* dma channel id for rx */
+	int rx_chan;
+
+	/* number of dma desc in rx ring */
+	int rx_ring_size;
+
+	/* cpu view of rx dma ring */
+	struct bcm_enet_desc *rx_desc_cpu;
+
+	/* current number of armed descriptor given to hardware for rx */
+	int rx_desc_count;
+
+	/* next rx descriptor to fetch from hardware */
+	int rx_curr_desc;
+
+	/* next dirty rx descriptor to refill */
+	int rx_dirty_desc;
+
+	/* size of allocated rx skbs */
+	unsigned int rx_skb_size;
+
+	/* list of skb given to hw for rx */
+	struct sk_buff **rx_skb;
+
+	/* used when rx skb allocation failed, so we defer rx queue
+	 * refill */
+	struct timer_list rx_timeout;
+
+	/* lock rx_timeout against rx normal operation */
+	spinlock_t rx_lock;
+
+
+	/* dma channel id for tx */
+	int tx_chan;
+
+	/* number of dma desc in tx ring */
+	int tx_ring_size;
+
+	/* maximum dma burst size */
+	int dma_maxburst;
+
+	/* cpu view of rx dma ring */
+	struct bcm_enet_desc *tx_desc_cpu;
+
+	/* number of available descriptor for tx */
+	int tx_desc_count;
+
+	/* next tx descriptor avaiable */
+	int tx_curr_desc;
+
+	/* next dirty tx descriptor to reclaim */
+	int tx_dirty_desc;
+
+	/* list of skb given to hw for tx */
+	struct sk_buff **tx_skb;
+
+	/* lock used by tx reclaim and xmit */
+	spinlock_t tx_lock;
+
+
+	/* set if internal phy is ignored and external mii interface
+	 * is selected */
+	int use_external_mii;
+
+	/* set if a phy is connected, phy address must be known,
+	 * probing is not possible */
+	int has_phy;
+	int phy_id;
+
+	/* set if connected phy has an associated irq */
+	int has_phy_interrupt;
+	int phy_interrupt;
+
+	/* used when a phy is connected (phylib used) */
+	struct mii_bus *mii_bus;
+	struct phy_device *phydev;
+	int old_link;
+	int old_duplex;
+	int old_pause;
+
+	/* used when no phy is connected */
+	int force_speed_100;
+	int force_duplex_full;
+
+	/* pause parameters */
+	int pause_auto;
+	int pause_rx;
+	int pause_tx;
+
+	/* stats */
+	struct bcm_enet_mib_counters mib;
+
+	/* after mib interrupt, mib registers update is done in this
+	 * work queue */
+	struct work_struct mib_update_task;
+
+	/* lock mib update between userspace request and workqueue */
+	struct mutex mib_update_lock;
+
+	/* mac clock */
+	struct clk *mac_clk;
+
+	/* phy clock if internal phy is used */
+	struct clk *phy_clk;
+
+	/* network device reference */
+	struct net_device *net_dev;
+
+	/* platform device reference */
+	struct platform_device *pdev;
+
+	/* maximum hardware transmit/receive size */
+	unsigned int hw_mtu;
+
+	bool enet_is_sw;
+
+	/* port mapping for switch devices */
+	int num_ports;
+	struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
+	int sw_port_link[ENETSW_MAX_PORT];
+
+	/* used to poll switch port state */
+	struct timer_list swphy_poll;
+	spinlock_t enetsw_mdio_lock;
+
+	/* dma channel enable mask */
+	u32 dma_chan_en_mask;
+
+	/* dma channel interrupt mask */
+	u32 dma_chan_int_mask;
+
+	/* DMA engine has internal SRAM */
+	bool dma_has_sram;
+
+	/* dma channel width */
+	unsigned int dma_chan_width;
+
+	/* dma descriptor shift value */
+	unsigned int dma_desc_shift;
+};
+
+
+#endif /* ! BCM63XX_ENET_H_ */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 0000000..8860e74
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,2108 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
+{									\
+	u32 reg = __raw_readl(priv->base + offset + off);		\
+	return reg;							\
+}									\
+static inline void name##_writel(struct bcm_sysport_priv *priv,		\
+				  u32 val, u32 off)			\
+{									\
+	__raw_writel(val, priv->base + offset + off);			\
+}									\
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+  */
+#define BCM_SYSPORT_INTR_L2(which)	\
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+						u32 mask)		\
+{									\
+	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
+	priv->irq##which##_mask &= ~(mask);				\
+}									\
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+						u32 mask)		\
+{									\
+	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
+	priv->irq##which##_mask |= (mask);				\
+}									\
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+				     void __iomem *d,
+				     dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+	__raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+		     d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+	__raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+					     struct dma_desc *desc,
+					     unsigned int port)
+{
+	/* Ports are latched, so write upper address first */
+	tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+	tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+				    struct ethtool_cmd *cmd)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+				    struct ethtool_cmd *cmd)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+				   netdev_features_t wanted)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
+	reg = rxchk_readl(priv, RXCHK_CONTROL);
+	if (priv->rx_chk_en)
+		reg |= RXCHK_EN;
+	else
+		reg &= ~RXCHK_EN;
+
+	/* If UniMAC forwards CRC, we need to skip over it to get
+	 * a valid CHK bit to be set in the per-packet status word
+	 */
+	if (priv->rx_chk_en && priv->crc_fwd)
+		reg |= RXCHK_SKIP_FCS;
+	else
+		reg &= ~RXCHK_SKIP_FCS;
+
+	/* If Broadcom tags are enabled (e.g: using a switch), make
+	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
+	 * tag after the Ethernet MAC Source Address.
+	 */
+	if (netdev_uses_dsa(dev))
+		reg |= RXCHK_BRCM_TAG_EN;
+	else
+		reg &= ~RXCHK_BRCM_TAG_EN;
+
+	rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+	return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+				   netdev_features_t wanted)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	/* Hardware transmit checksum requires us to enable the Transmit status
+	 * block prepended to the packet contents
+	 */
+	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+	reg = tdma_readl(priv, TDMA_CONTROL);
+	if (priv->tsb_en)
+		reg |= TSB_EN;
+	else
+		reg &= ~TSB_EN;
+	tdma_writel(priv, reg, TDMA_CONTROL);
+
+	return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+				    netdev_features_t features)
+{
+	netdev_features_t changed = features ^ dev->features;
+	netdev_features_t wanted = dev->wanted_features;
+	int ret = 0;
+
+	if (changed & NETIF_F_RXCSUM)
+		ret = bcm_sysport_set_rx_csum(dev, wanted);
+	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+		ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+	return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+	/* general stats */
+	STAT_NETDEV(rx_packets),
+	STAT_NETDEV(tx_packets),
+	STAT_NETDEV(rx_bytes),
+	STAT_NETDEV(tx_bytes),
+	STAT_NETDEV(rx_errors),
+	STAT_NETDEV(tx_errors),
+	STAT_NETDEV(rx_dropped),
+	STAT_NETDEV(tx_dropped),
+	STAT_NETDEV(multicast),
+	/* UniMAC RSV counters */
+	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+	STAT_MIB_RX("rx_multicast", mib.rx.mca),
+	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+	STAT_MIB_RX("rx_control", mib.rx.cf),
+	STAT_MIB_RX("rx_pause", mib.rx.pf),
+	STAT_MIB_RX("rx_unknown", mib.rx.uo),
+	STAT_MIB_RX("rx_align", mib.rx.aln),
+	STAT_MIB_RX("rx_outrange", mib.rx.flr),
+	STAT_MIB_RX("rx_code", mib.rx.cde),
+	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+	STAT_MIB_RX("rx_unicast", mib.rx.uc),
+	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+	/* UniMAC TSV counters */
+	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+	STAT_MIB_TX("tx_multicast", mib.tx.mca),
+	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+	STAT_MIB_TX("tx_pause", mib.tx.pf),
+	STAT_MIB_TX("tx_control", mib.tx.cf),
+	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+	STAT_MIB_TX("tx_defer", mib.tx.drf),
+	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+	STAT_MIB_TX("tx_single_col", mib.tx.scl),
+	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+	STAT_MIB_TX("tx_frags", mib.tx.frg),
+	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+	STAT_MIB_TX("tx_unicast", mib.tx.uc),
+	/* UniMAC RUNT counters */
+	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+	/* RXCHK misc statistics */
+	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+		   RXCHK_OTHER_DISC_CNTR),
+	/* RBUF misc statistics */
+	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+};
+
+#define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->version, "0.1", sizeof(info->version));
+	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return BCM_SYSPORT_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+				    u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       bcm_sysport_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+	int i, j = 0;
+
+	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+		const struct bcm_sysport_stats *s;
+		u8 offset = 0;
+		u32 val = 0;
+		char *p;
+
+		s = &bcm_sysport_gstrings_stats[i];
+		switch (s->type) {
+		case BCM_SYSPORT_STAT_NETDEV:
+		case BCM_SYSPORT_STAT_SOFT:
+			continue;
+		case BCM_SYSPORT_STAT_MIB_RX:
+		case BCM_SYSPORT_STAT_MIB_TX:
+		case BCM_SYSPORT_STAT_RUNT:
+			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+				offset = UMAC_MIB_STAT_OFFSET;
+			val = umac_readl(priv, UMAC_MIB_START + j + offset);
+			break;
+		case BCM_SYSPORT_STAT_RXCHK:
+			val = rxchk_readl(priv, s->reg_offset);
+			if (val == ~0)
+				rxchk_writel(priv, 0, s->reg_offset);
+			break;
+		case BCM_SYSPORT_STAT_RBUF:
+			val = rbuf_readl(priv, s->reg_offset);
+			if (val == ~0)
+				rbuf_writel(priv, 0, s->reg_offset);
+			break;
+		}
+
+		j += s->stat_sizeof;
+		p = (char *)priv + s->stat_offset;
+		*(u32 *)p = val;
+	}
+
+	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+				  struct ethtool_stats *stats, u64 *data)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (netif_running(dev))
+		bcm_sysport_update_mib_counters(priv);
+
+	for (i =  0; i < BCM_SYSPORT_STATS_LEN; i++) {
+		const struct bcm_sysport_stats *s;
+		char *p;
+
+		s = &bcm_sysport_gstrings_stats[i];
+		if (s->type == BCM_SYSPORT_STAT_NETDEV)
+			p = (char *)&dev->stats;
+		else
+			p = (char *)priv;
+		p += s->stat_offset;
+		data[i] = *(u32 *)p;
+	}
+}
+
+static void bcm_sysport_get_wol(struct net_device *dev,
+				struct ethtool_wolinfo *wol)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+	wol->wolopts = priv->wolopts;
+
+	if (!(priv->wolopts & WAKE_MAGICSECURE))
+		return;
+
+	/* Return the programmed SecureOn password */
+	reg = umac_readl(priv, UMAC_PSW_MS);
+	put_unaligned_be16(reg, &wol->sopass[0]);
+	reg = umac_readl(priv, UMAC_PSW_LS);
+	put_unaligned_be32(reg, &wol->sopass[2]);
+}
+
+static int bcm_sysport_set_wol(struct net_device *dev,
+			       struct ethtool_wolinfo *wol)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
+	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+
+	if (!device_can_wakeup(kdev))
+		return -ENOTSUPP;
+
+	if (wol->wolopts & ~supported)
+		return -EINVAL;
+
+	/* Program the SecureOn password */
+	if (wol->wolopts & WAKE_MAGICSECURE) {
+		umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+			    UMAC_PSW_MS);
+		umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+			    UMAC_PSW_LS);
+	}
+
+	/* Flag the device and relevant IRQ as wakeup capable */
+	if (wol->wolopts) {
+		device_set_wakeup_enable(kdev, 1);
+		if (priv->wol_irq_disabled)
+			enable_irq_wake(priv->wol_irq);
+		priv->wol_irq_disabled = 0;
+	} else {
+		device_set_wakeup_enable(kdev, 0);
+		/* Avoid unbalanced disable_irq_wake calls */
+		if (!priv->wol_irq_disabled)
+			disable_irq_wake(priv->wol_irq);
+		priv->wol_irq_disabled = 1;
+	}
+
+	priv->wolopts = wol->wolopts;
+
+	return 0;
+}
+
+static int bcm_sysport_get_coalesce(struct net_device *dev,
+				    struct ethtool_coalesce *ec)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
+
+	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
+	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+
+	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+
+	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
+	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+
+	return 0;
+}
+
+static int bcm_sysport_set_coalesce(struct net_device *dev,
+				    struct ethtool_coalesce *ec)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	u32 reg;
+
+	/* Base system clock is 125Mhz, DMA timeout is this reference clock
+	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
+	 * to fit in the RING_TIMEOUT_MASK (16 bits).
+	 */
+	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
+	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
+	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
+	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
+		return -EINVAL;
+
+	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
+	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
+		reg &= ~(RING_INTR_THRESH_MASK |
+			 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+		reg |= ec->tx_max_coalesced_frames;
+		reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+			 RING_TIMEOUT_SHIFT;
+		tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+	}
+
+	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+	reg &= ~(RDMA_INTR_THRESH_MASK |
+		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+	reg |= ec->rx_max_coalesced_frames;
+	reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
+			    RDMA_TIMEOUT_SHIFT;
+	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+
+	return 0;
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+	dev_kfree_skb_any(cb->skb);
+	cb->skb = NULL;
+	dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+					     struct bcm_sysport_cb *cb)
+{
+	struct device *kdev = &priv->pdev->dev;
+	struct net_device *ndev = priv->netdev;
+	struct sk_buff *skb, *rx_skb;
+	dma_addr_t mapping;
+
+	/* Allocate a new SKB for a new packet */
+	skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+	if (!skb) {
+		priv->mib.alloc_rx_buff_failed++;
+		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+		return NULL;
+	}
+
+	mapping = dma_map_single(kdev, skb->data,
+				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
+	if (dma_mapping_error(kdev, mapping)) {
+		priv->mib.rx_dma_failed++;
+		dev_kfree_skb_any(skb);
+		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+		return NULL;
+	}
+
+	/* Grab the current SKB on the ring */
+	rx_skb = cb->skb;
+	if (likely(rx_skb))
+		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
+
+	/* Put the new SKB on the ring */
+	cb->skb = skb;
+	dma_unmap_addr_set(cb, dma_addr, mapping);
+	dma_desc_set_addr(priv, cb->bd_addr, mapping);
+
+	netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+	/* Return the current SKB to the caller */
+	return rx_skb;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+	struct bcm_sysport_cb *cb;
+	struct sk_buff *skb;
+	unsigned int i;
+
+	for (i = 0; i < priv->num_rx_bds; i++) {
+		cb = &priv->rx_cbs[i];
+		skb = bcm_sysport_rx_refill(priv, cb);
+		if (skb)
+			dev_kfree_skb(skb);
+		if (!cb->skb)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+					unsigned int budget)
+{
+	struct net_device *ndev = priv->netdev;
+	unsigned int processed = 0, to_process;
+	struct bcm_sysport_cb *cb;
+	struct sk_buff *skb;
+	unsigned int p_index;
+	u16 len, status;
+	struct bcm_rsb *rsb;
+
+	/* Determine how much we should process since last call */
+	p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+	p_index &= RDMA_PROD_INDEX_MASK;
+
+	if (p_index < priv->rx_c_index)
+		to_process = (RDMA_CONS_INDEX_MASK + 1) -
+			priv->rx_c_index + p_index;
+	else
+		to_process = p_index - priv->rx_c_index;
+
+	netif_dbg(priv, rx_status, ndev,
+		  "p_index=%d rx_c_index=%d to_process=%d\n",
+		  p_index, priv->rx_c_index, to_process);
+
+	while ((processed < to_process) && (processed < budget)) {
+		cb = &priv->rx_cbs[priv->rx_read_ptr];
+		skb = bcm_sysport_rx_refill(priv, cb);
+
+
+		/* We do not have a backing SKB, so we do not a corresponding
+		 * DMA mapping for this incoming packet since
+		 * bcm_sysport_rx_refill always either has both skb and mapping
+		 * or none.
+		 */
+		if (unlikely(!skb)) {
+			netif_err(priv, rx_err, ndev, "out of memory!\n");
+			ndev->stats.rx_dropped++;
+			ndev->stats.rx_errors++;
+			goto next;
+		}
+
+		/* Extract the Receive Status Block prepended */
+		rsb = (struct bcm_rsb *)skb->data;
+		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+			  DESC_STATUS_MASK;
+
+		netif_dbg(priv, rx_status, ndev,
+			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+			  p_index, priv->rx_c_index, priv->rx_read_ptr,
+			  len, status);
+
+		if (unlikely(len > RX_BUF_LENGTH)) {
+			netif_err(priv, rx_status, ndev, "oversized packet\n");
+			ndev->stats.rx_length_errors++;
+			ndev->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto next;
+		}
+
+		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+			ndev->stats.rx_dropped++;
+			ndev->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto next;
+		}
+
+		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+			netif_err(priv, rx_err, ndev, "error packet\n");
+			if (status & RX_STATUS_OVFLOW)
+				ndev->stats.rx_over_errors++;
+			ndev->stats.rx_dropped++;
+			ndev->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto next;
+		}
+
+		skb_put(skb, len);
+
+		/* Hardware validated our checksum */
+		if (likely(status & DESC_L4_CSUM))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		/* Hardware pre-pends packets with 2bytes before Ethernet
+		 * header plus we have the Receive Status Block, strip off all
+		 * of this from the SKB.
+		 */
+		skb_pull(skb, sizeof(*rsb) + 2);
+		len -= (sizeof(*rsb) + 2);
+
+		/* UniMAC may forward CRC */
+		if (priv->crc_fwd) {
+			skb_trim(skb, len - ETH_FCS_LEN);
+			len -= ETH_FCS_LEN;
+		}
+
+		skb->protocol = eth_type_trans(skb, ndev);
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += len;
+
+		napi_gro_receive(&priv->napi, skb);
+next:
+		processed++;
+		priv->rx_read_ptr++;
+
+		if (priv->rx_read_ptr == priv->num_rx_bds)
+			priv->rx_read_ptr = 0;
+	}
+
+	return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+				       struct bcm_sysport_cb *cb,
+				       unsigned int *bytes_compl,
+				       unsigned int *pkts_compl)
+{
+	struct device *kdev = &priv->pdev->dev;
+	struct net_device *ndev = priv->netdev;
+
+	if (cb->skb) {
+		ndev->stats.tx_bytes += cb->skb->len;
+		*bytes_compl += cb->skb->len;
+		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+				 dma_unmap_len(cb, dma_len),
+				 DMA_TO_DEVICE);
+		ndev->stats.tx_packets++;
+		(*pkts_compl)++;
+		bcm_sysport_free_cb(cb);
+	/* SKB fragment */
+	} else if (dma_unmap_addr(cb, dma_addr)) {
+		ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+		dma_unmap_addr_set(cb, dma_addr, 0);
+	}
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+					     struct bcm_sysport_tx_ring *ring)
+{
+	struct net_device *ndev = priv->netdev;
+	unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
+	struct bcm_sysport_cb *cb;
+	u32 hw_ind;
+
+	/* Compute how many descriptors have been processed since last call */
+	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+	ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+	last_c_index = ring->c_index;
+	num_tx_cbs = ring->size;
+
+	c_index &= (num_tx_cbs - 1);
+
+	if (c_index >= last_c_index)
+		last_tx_cn = c_index - last_c_index;
+	else
+		last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+	netif_dbg(priv, tx_done, ndev,
+		  "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+		  ring->index, c_index, last_tx_cn, last_c_index);
+
+	while (last_tx_cn-- > 0) {
+		cb = ring->cbs + last_c_index;
+		bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+		ring->desc_count++;
+		last_c_index++;
+		last_c_index &= (num_tx_cbs - 1);
+	}
+
+	ring->c_index = c_index;
+
+	netif_dbg(priv, tx_done, ndev,
+		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+		  ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+	return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+					   struct bcm_sysport_tx_ring *ring)
+{
+	struct netdev_queue *txq;
+	unsigned int released;
+	unsigned long flags;
+
+	txq = netdev_get_tx_queue(priv->netdev, ring->index);
+
+	spin_lock_irqsave(&ring->lock, flags);
+	released = __bcm_sysport_tx_reclaim(priv, ring);
+	if (released)
+		netif_tx_wake_queue(txq);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	return released;
+}
+
+/* Locked version of the per-ring TX reclaim, but does not wake the queue */
+static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
+				 struct bcm_sysport_tx_ring *ring)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	__bcm_sysport_tx_reclaim(priv, ring);
+	spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+	struct bcm_sysport_tx_ring *ring =
+		container_of(napi, struct bcm_sysport_tx_ring, napi);
+	unsigned int work_done = 0;
+
+	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+	if (work_done == 0) {
+		napi_complete(napi);
+		/* re-enable TX interrupt */
+		intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+
+		return 0;
+	}
+
+	return budget;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+	unsigned int q;
+
+	for (q = 0; q < priv->netdev->num_tx_queues; q++)
+		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+	struct bcm_sysport_priv *priv =
+		container_of(napi, struct bcm_sysport_priv, napi);
+	unsigned int work_done = 0;
+
+	work_done = bcm_sysport_desc_rx(priv, budget);
+
+	priv->rx_c_index += work_done;
+	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+	rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		/* re-enable RX interrupts */
+		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+	}
+
+	return work_done;
+}
+
+static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+{
+	u32 reg;
+
+	/* Stop monitoring MPD interrupt */
+	intrl2_0_mask_set(priv, INTRL2_0_MPD);
+
+	/* Clear the MagicPacket detection logic */
+	reg = umac_readl(priv, UMAC_MPD_CTRL);
+	reg &= ~MPD_EN;
+	umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
+}
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+	if (unlikely(priv->irq0_stat == 0)) {
+		netdev_warn(priv->netdev, "spurious RX interrupt\n");
+		return IRQ_NONE;
+	}
+
+	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+		if (likely(napi_schedule_prep(&priv->napi))) {
+			/* disable RX interrupts */
+			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+			__napi_schedule(&priv->napi);
+		}
+	}
+
+	/* TX ring is full, perform a full reclaim since we do not know
+	 * which one would trigger this interrupt
+	 */
+	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+		bcm_sysport_tx_reclaim_all(priv);
+
+	if (priv->irq0_stat & INTRL2_0_MPD) {
+		netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
+		bcm_sysport_resume_from_wol(priv);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	struct bcm_sysport_tx_ring *txr;
+	unsigned int ring;
+
+	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+	if (unlikely(priv->irq1_stat == 0)) {
+		netdev_warn(priv->netdev, "spurious TX interrupt\n");
+		return IRQ_NONE;
+	}
+
+	for (ring = 0; ring < dev->num_tx_queues; ring++) {
+		if (!(priv->irq1_stat & BIT(ring)))
+			continue;
+
+		txr = &priv->tx_rings[ring];
+
+		if (likely(napi_schedule_prep(&txr->napi))) {
+			intrl2_1_mask_set(priv, BIT(ring));
+			__napi_schedule(&txr->napi);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
+{
+	struct bcm_sysport_priv *priv = dev_id;
+
+	pm_wakeup_event(&priv->pdev->dev, 0);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	disable_irq(priv->irq0);
+	bcm_sysport_rx_isr(priv->irq0, priv);
+	enable_irq(priv->irq0);
+
+	disable_irq(priv->irq1);
+	bcm_sysport_tx_isr(priv->irq1, priv);
+	enable_irq(priv->irq1);
+}
+#endif
+
+static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
+					      struct net_device *dev)
+{
+	struct sk_buff *nskb;
+	struct bcm_tsb *tsb;
+	u32 csum_info;
+	u8 ip_proto;
+	u16 csum_start;
+	u16 ip_ver;
+
+	/* Re-allocate SKB if needed */
+	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+		dev_kfree_skb(skb);
+		if (!nskb) {
+			dev->stats.tx_errors++;
+			dev->stats.tx_dropped++;
+			return NULL;
+		}
+		skb = nskb;
+	}
+
+	tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
+	/* Zero-out TSB by default */
+	memset(tsb, 0, sizeof(*tsb));
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		ip_ver = htons(skb->protocol);
+		switch (ip_ver) {
+		case ETH_P_IP:
+			ip_proto = ip_hdr(skb)->protocol;
+			break;
+		case ETH_P_IPV6:
+			ip_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		default:
+			return skb;
+		}
+
+		/* Get the checksum offset and the L4 (transport) offset */
+		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+		csum_info |= (csum_start << L4_PTR_SHIFT);
+
+		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+			csum_info |= L4_LENGTH_VALID;
+			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+				csum_info |= L4_UDP;
+		} else {
+			csum_info = 0;
+		}
+
+		tsb->l4_ptr_dest_map = csum_info;
+	}
+
+	return skb;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+				    struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
+	struct bcm_sysport_tx_ring *ring;
+	struct bcm_sysport_cb *cb;
+	struct netdev_queue *txq;
+	struct dma_desc *desc;
+	unsigned int skb_len;
+	unsigned long flags;
+	dma_addr_t mapping;
+	u32 len_status;
+	u16 queue;
+	int ret;
+
+	queue = skb_get_queue_mapping(skb);
+	txq = netdev_get_tx_queue(dev, queue);
+	ring = &priv->tx_rings[queue];
+
+	/* lock against tx reclaim in BH context and TX ring full interrupt */
+	spin_lock_irqsave(&ring->lock, flags);
+	if (unlikely(ring->desc_count == 0)) {
+		netif_tx_stop_queue(txq);
+		netdev_err(dev, "queue %d awake and ring full!\n", queue);
+		ret = NETDEV_TX_BUSY;
+		goto out;
+	}
+
+	/* Insert TSB and checksum infos */
+	if (priv->tsb_en) {
+		skb = bcm_sysport_insert_tsb(skb, dev);
+		if (!skb) {
+			ret = NETDEV_TX_OK;
+			goto out;
+		}
+	}
+
+	/* The Ethernet switch we are interfaced with needs packets to be at
+	 * least 64 bytes (including FCS) otherwise they will be discarded when
+	 * they enter the switch port logic. When Broadcom tags are enabled, we
+	 * need to make sure that packets are at least 68 bytes
+	 * (including FCS and tag) because the length verification is done after
+	 * the Broadcom tag is stripped off the ingress packet.
+	 */
+	if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
+	skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+			ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+
+	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(kdev, mapping)) {
+		priv->mib.tx_dma_failed++;
+		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+			  skb->data, skb_len);
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
+	/* Remember the SKB for future freeing */
+	cb = &ring->cbs[ring->curr_desc];
+	cb->skb = skb;
+	dma_unmap_addr_set(cb, dma_addr, mapping);
+	dma_unmap_len_set(cb, dma_len, skb_len);
+
+	/* Fetch a descriptor entry from our pool */
+	desc = ring->desc_cpu;
+
+	desc->addr_lo = lower_32_bits(mapping);
+	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+	len_status |= (skb_len << DESC_LEN_SHIFT);
+	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+		       DESC_STATUS_SHIFT;
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+	ring->curr_desc++;
+	if (ring->curr_desc == ring->size)
+		ring->curr_desc = 0;
+	ring->desc_count--;
+
+	/* Ensure write completion of the descriptor status/length
+	 * in DRAM before the System Port WRITE_PORT register latches
+	 * the value
+	 */
+	wmb();
+	desc->addr_status_len = len_status;
+	wmb();
+
+	/* Write this descriptor address to the RING write port */
+	tdma_port_write_desc_addr(priv, desc, ring->index);
+
+	/* Check ring space and update SW control flow */
+	if (ring->desc_count == 0)
+		netif_tx_stop_queue(txq);
+
+	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+		  ring->index, ring->desc_count, ring->curr_desc);
+
+	ret = NETDEV_TX_OK;
+out:
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+	netdev_warn(dev, "transmit timeout!\n");
+
+	dev->trans_start = jiffies;
+	dev->stats.tx_errors++;
+
+	netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = priv->phydev;
+	unsigned int changed = 0;
+	u32 cmd_bits = 0, reg;
+
+	if (priv->old_link != phydev->link) {
+		changed = 1;
+		priv->old_link = phydev->link;
+	}
+
+	if (priv->old_duplex != phydev->duplex) {
+		changed = 1;
+		priv->old_duplex = phydev->duplex;
+	}
+
+	switch (phydev->speed) {
+	case SPEED_2500:
+		cmd_bits = CMD_SPEED_2500;
+		break;
+	case SPEED_1000:
+		cmd_bits = CMD_SPEED_1000;
+		break;
+	case SPEED_100:
+		cmd_bits = CMD_SPEED_100;
+		break;
+	case SPEED_10:
+		cmd_bits = CMD_SPEED_10;
+		break;
+	default:
+		break;
+	}
+	cmd_bits <<= CMD_SPEED_SHIFT;
+
+	if (phydev->duplex == DUPLEX_HALF)
+		cmd_bits |= CMD_HD_EN;
+
+	if (priv->old_pause != phydev->pause) {
+		changed = 1;
+		priv->old_pause = phydev->pause;
+	}
+
+	if (!phydev->pause)
+		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+	if (!changed)
+		return;
+
+	if (phydev->link) {
+		reg = umac_readl(priv, UMAC_CMD);
+		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+			CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+			CMD_TX_PAUSE_IGNORE);
+		reg |= cmd_bits;
+		umac_writel(priv, reg, UMAC_CMD);
+	}
+
+	phy_print_status(priv->phydev);
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+				    unsigned int index)
+{
+	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+	struct device *kdev = &priv->pdev->dev;
+	size_t size;
+	void *p;
+	u32 reg;
+
+	/* Simple descriptors partitioning for now */
+	size = 256;
+
+	/* We just need one DMA descriptor which is DMA-able, since writing to
+	 * the port will allocate a new descriptor in its internal linked-list
+	 */
+	p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+				GFP_KERNEL);
+	if (!p) {
+		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+		return -ENOMEM;
+	}
+
+	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+	if (!ring->cbs) {
+		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+		return -ENOMEM;
+	}
+
+	/* Initialize SW view of the ring */
+	spin_lock_init(&ring->lock);
+	ring->priv = priv;
+	netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+	ring->index = index;
+	ring->size = size;
+	ring->alloc_size = ring->size;
+	ring->desc_cpu = p;
+	ring->desc_count = ring->size;
+	ring->curr_desc = 0;
+
+	/* Initialize HW ring */
+	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+	tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+	tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+	/* Program the number of descriptors as MAX_THRESHOLD and half of
+	 * its size for the hysteresis trigger
+	 */
+	tdma_writel(priv, ring->size |
+			1 << RING_HYST_THRESH_SHIFT,
+			TDMA_DESC_RING_MAX_HYST(index));
+
+	/* Enable the ring queue in the arbiter */
+	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+	reg |= (1 << index);
+	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+	napi_enable(&ring->napi);
+
+	netif_dbg(priv, hw, priv->netdev,
+		  "TDMA cfg, size=%d, desc_cpu=%p\n",
+		  ring->size, ring->desc_cpu);
+
+	return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+				     unsigned int index)
+{
+	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+	struct device *kdev = &priv->pdev->dev;
+	u32 reg;
+
+	/* Caller should stop the TDMA engine */
+	reg = tdma_readl(priv, TDMA_STATUS);
+	if (!(reg & TDMA_DISABLED))
+		netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+	/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
+	 * fail, so by checking this pointer we know whether the TX ring was
+	 * fully initialized or not.
+	 */
+	if (!ring->cbs)
+		return;
+
+	napi_disable(&ring->napi);
+	netif_napi_del(&ring->napi);
+
+	bcm_sysport_tx_clean(priv, ring);
+
+	kfree(ring->cbs);
+	ring->cbs = NULL;
+
+	if (ring->desc_dma) {
+		dma_free_coherent(kdev, sizeof(struct dma_desc),
+				  ring->desc_cpu, ring->desc_dma);
+		ring->desc_dma = 0;
+	}
+	ring->size = 0;
+	ring->alloc_size = 0;
+
+	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+				  unsigned int enable)
+{
+	unsigned int timeout = 1000;
+	u32 reg;
+
+	reg = rdma_readl(priv, RDMA_CONTROL);
+	if (enable)
+		reg |= RDMA_EN;
+	else
+		reg &= ~RDMA_EN;
+	rdma_writel(priv, reg, RDMA_CONTROL);
+
+	/* Poll for RMDA disabling completion */
+	do {
+		reg = rdma_readl(priv, RDMA_STATUS);
+		if (!!(reg & RDMA_DISABLED) == !enable)
+			return 0;
+		usleep_range(1000, 2000);
+	} while (timeout-- > 0);
+
+	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+	return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+				  unsigned int enable)
+{
+	unsigned int timeout = 1000;
+	u32 reg;
+
+	reg = tdma_readl(priv, TDMA_CONTROL);
+	if (enable)
+		reg |= TDMA_EN;
+	else
+		reg &= ~TDMA_EN;
+	tdma_writel(priv, reg, TDMA_CONTROL);
+
+	/* Poll for TMDA disabling completion */
+	do {
+		reg = tdma_readl(priv, TDMA_STATUS);
+		if (!!(reg & TDMA_DISABLED) == !enable)
+			return 0;
+
+		usleep_range(1000, 2000);
+	} while (timeout-- > 0);
+
+	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+	return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+	struct bcm_sysport_cb *cb;
+	u32 reg;
+	int ret;
+	int i;
+
+	/* Initialize SW view of the RX ring */
+	priv->num_rx_bds = NUM_RX_DESC;
+	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+	priv->rx_c_index = 0;
+	priv->rx_read_ptr = 0;
+	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
+				GFP_KERNEL);
+	if (!priv->rx_cbs) {
+		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < priv->num_rx_bds; i++) {
+		cb = priv->rx_cbs + i;
+		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+	}
+
+	ret = bcm_sysport_alloc_rx_bufs(priv);
+	if (ret) {
+		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+		return ret;
+	}
+
+	/* Initialize HW, ensure RDMA is disabled */
+	reg = rdma_readl(priv, RDMA_STATUS);
+	if (!(reg & RDMA_DISABLED))
+		rdma_enable_set(priv, 0);
+
+	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+	rdma_writel(priv, 0, RDMA_PROD_INDEX);
+	rdma_writel(priv, 0, RDMA_CONS_INDEX);
+	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+	/* Operate the queue in ring mode */
+	rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+	rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+	rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+	rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+	rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+	netif_dbg(priv, hw, priv->netdev,
+		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+		  priv->num_rx_bds, priv->rx_bds);
+
+	return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+	struct bcm_sysport_cb *cb;
+	unsigned int i;
+	u32 reg;
+
+	/* Caller should ensure RDMA is disabled */
+	reg = rdma_readl(priv, RDMA_STATUS);
+	if (!(reg & RDMA_DISABLED))
+		netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+	for (i = 0; i < priv->num_rx_bds; i++) {
+		cb = &priv->rx_cbs[i];
+		if (dma_unmap_addr(cb, dma_addr))
+			dma_unmap_single(&priv->pdev->dev,
+					 dma_unmap_addr(cb, dma_addr),
+					 RX_BUF_LENGTH, DMA_FROM_DEVICE);
+		bcm_sysport_free_cb(cb);
+	}
+
+	kfree(priv->rx_cbs);
+	priv->rx_cbs = NULL;
+
+	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	reg = umac_readl(priv, UMAC_CMD);
+	if (dev->flags & IFF_PROMISC)
+		reg |= CMD_PROMISC;
+	else
+		reg &= ~CMD_PROMISC;
+	umac_writel(priv, reg, UMAC_CMD);
+
+	/* No support for ALLMULTI */
+	if (dev->flags & IFF_ALLMULTI)
+		return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+				   u32 mask, unsigned int enable)
+{
+	u32 reg;
+
+	reg = umac_readl(priv, UMAC_CMD);
+	if (enable)
+		reg |= mask;
+	else
+		reg &= ~mask;
+	umac_writel(priv, reg, UMAC_CMD);
+
+	/* UniMAC stops on a packet boundary, wait for a full-sized packet
+	 * to be processed (1 msec).
+	 */
+	if (enable == 0)
+		usleep_range(1000, 2000);
+}
+
+static inline void umac_reset(struct bcm_sysport_priv *priv)
+{
+	u32 reg;
+
+	reg = umac_readl(priv, UMAC_CMD);
+	reg |= CMD_SW_RESET;
+	umac_writel(priv, reg, UMAC_CMD);
+	udelay(10);
+	reg = umac_readl(priv, UMAC_CMD);
+	reg &= ~CMD_SW_RESET;
+	umac_writel(priv, reg, UMAC_CMD);
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+			     unsigned char *addr)
+{
+	umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+			(addr[2] << 8) | addr[3], UMAC_MAC0);
+	umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+	mdelay(1);
+	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_change_mac(struct net_device *dev, void *p)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	/* interface is disabled, changes to MAC will be reflected on next
+	 * open call
+	 */
+	if (!netif_running(dev))
+		return 0;
+
+	umac_set_hw_addr(priv, dev->dev_addr);
+
+	return 0;
+}
+
+static void bcm_sysport_netif_start(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	/* Enable NAPI */
+	napi_enable(&priv->napi);
+
+	/* Enable RX interrupt and TX ring full interrupt */
+	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+	phy_start(priv->phydev);
+
+	/* Enable TX interrupts for the 32 TXQs */
+	intrl2_1_mask_clear(priv, 0xffffffff);
+
+	/* Last call before we start the real business */
+	netif_tx_start_all_queues(dev);
+}
+
+static void rbuf_init(struct bcm_sysport_priv *priv)
+{
+	u32 reg;
+
+	reg = rbuf_readl(priv, RBUF_CONTROL);
+	reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+	rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	int ret;
+
+	/* Reset UniMAC */
+	umac_reset(priv);
+
+	/* Flush TX and RX FIFOs at TOPCTRL level */
+	topctrl_flush(priv);
+
+	/* Disable the UniMAC RX/TX */
+	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
+
+	/* Enable RBUF 2bytes alignment and Receive Status Block */
+	rbuf_init(priv);
+
+	/* Set maximum frame length */
+	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+	/* Set MAC address */
+	umac_set_hw_addr(priv, dev->dev_addr);
+
+	/* Read CRC forward */
+	priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+	priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+					0, priv->phy_interface);
+	if (!priv->phydev) {
+		netdev_err(dev, "could not attach to PHY\n");
+		return -ENODEV;
+	}
+
+	/* Reset house keeping link status */
+	priv->old_duplex = -1;
+	priv->old_link = -1;
+	priv->old_pause = -1;
+
+	/* mask all interrupts and request them */
+	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+	intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+	if (ret) {
+		netdev_err(dev, "failed to request RX interrupt\n");
+		goto out_phy_disconnect;
+	}
+
+	ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+	if (ret) {
+		netdev_err(dev, "failed to request TX interrupt\n");
+		goto out_free_irq0;
+	}
+
+	/* Initialize both hardware and software ring */
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		ret = bcm_sysport_init_tx_ring(priv, i);
+		if (ret) {
+			netdev_err(dev, "failed to initialize TX ring %d\n",
+				   i);
+			goto out_free_tx_ring;
+		}
+	}
+
+	/* Initialize linked-list */
+	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+	/* Initialize RX ring */
+	ret = bcm_sysport_init_rx_ring(priv);
+	if (ret) {
+		netdev_err(dev, "failed to initialize RX ring\n");
+		goto out_free_rx_ring;
+	}
+
+	/* Turn on RDMA */
+	ret = rdma_enable_set(priv, 1);
+	if (ret)
+		goto out_free_rx_ring;
+
+	/* Turn on TDMA */
+	ret = tdma_enable_set(priv, 1);
+	if (ret)
+		goto out_clear_rx_int;
+
+	/* Turn on UniMAC TX/RX */
+	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
+
+	bcm_sysport_netif_start(dev);
+
+	return 0;
+
+out_clear_rx_int:
+	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+	bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+	for (i = 0; i < dev->num_tx_queues; i++)
+		bcm_sysport_fini_tx_ring(priv, i);
+	free_irq(priv->irq1, dev);
+out_free_irq0:
+	free_irq(priv->irq0, dev);
+out_phy_disconnect:
+	phy_disconnect(priv->phydev);
+	return ret;
+}
+
+static void bcm_sysport_netif_stop(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	/* stop all software from updating hardware */
+	netif_tx_stop_all_queues(dev);
+	napi_disable(&priv->napi);
+	phy_stop(priv->phydev);
+
+	/* mask all interrupts */
+	intrl2_0_mask_set(priv, 0xffffffff);
+	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+	intrl2_1_mask_set(priv, 0xffffffff);
+	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	int ret;
+
+	bcm_sysport_netif_stop(dev);
+
+	/* Disable UniMAC RX */
+	umac_enable_set(priv, CMD_RX_EN, 0);
+
+	ret = tdma_enable_set(priv, 0);
+	if (ret) {
+		netdev_err(dev, "timeout disabling RDMA\n");
+		return ret;
+	}
+
+	/* Wait for a maximum packet size to be drained */
+	usleep_range(2000, 3000);
+
+	ret = rdma_enable_set(priv, 0);
+	if (ret) {
+		netdev_err(dev, "timeout disabling TDMA\n");
+		return ret;
+	}
+
+	/* Disable UniMAC TX */
+	umac_enable_set(priv, CMD_TX_EN, 0);
+
+	/* Free RX/TX rings SW structures */
+	for (i = 0; i < dev->num_tx_queues; i++)
+		bcm_sysport_fini_tx_ring(priv, i);
+	bcm_sysport_fini_rx_ring(priv);
+
+	free_irq(priv->irq0, dev);
+	free_irq(priv->irq1, dev);
+
+	/* Disconnect from PHY */
+	phy_disconnect(priv->phydev);
+
+	return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+	.get_settings		= bcm_sysport_get_settings,
+	.set_settings		= bcm_sysport_set_settings,
+	.get_drvinfo		= bcm_sysport_get_drvinfo,
+	.get_msglevel		= bcm_sysport_get_msglvl,
+	.set_msglevel		= bcm_sysport_set_msglvl,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= bcm_sysport_get_strings,
+	.get_ethtool_stats	= bcm_sysport_get_stats,
+	.get_sset_count		= bcm_sysport_get_sset_count,
+	.get_wol		= bcm_sysport_get_wol,
+	.set_wol		= bcm_sysport_set_wol,
+	.get_coalesce		= bcm_sysport_get_coalesce,
+	.set_coalesce		= bcm_sysport_set_coalesce,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+	.ndo_start_xmit		= bcm_sysport_xmit,
+	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
+	.ndo_open		= bcm_sysport_open,
+	.ndo_stop		= bcm_sysport_stop,
+	.ndo_set_features	= bcm_sysport_set_features,
+	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
+	.ndo_set_mac_address	= bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bcm_sysport_poll_controller,
+#endif
+};
+
+#define REV_FMT	"v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+	struct bcm_sysport_priv *priv;
+	struct device_node *dn;
+	struct net_device *dev;
+	const void *macaddr;
+	struct resource *r;
+	u32 txq, rxq;
+	int ret;
+
+	dn = pdev->dev.of_node;
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	/* Read the Transmit/Receive Queue properties */
+	if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+		txq = TDMA_NUM_RINGS;
+	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+		rxq = 1;
+
+	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+	if (!dev)
+		return -ENOMEM;
+
+	/* Initialize private members */
+	priv = netdev_priv(dev);
+
+	priv->irq0 = platform_get_irq(pdev, 0);
+	priv->irq1 = platform_get_irq(pdev, 1);
+	priv->wol_irq = platform_get_irq(pdev, 2);
+	if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+		dev_err(&pdev->dev, "invalid interrupts\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	priv->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(priv->base)) {
+		ret = PTR_ERR(priv->base);
+		goto err;
+	}
+
+	priv->netdev = dev;
+	priv->pdev = pdev;
+
+	priv->phy_interface = of_get_phy_mode(dn);
+	/* Default to GMII interface mode */
+	if (priv->phy_interface < 0)
+		priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+	/* In the case of a fixed PHY, the DT node associated
+	 * to the PHY is the Ethernet MAC DT node.
+	 */
+	if (of_phy_is_fixed_link(dn)) {
+		ret = of_phy_register_fixed_link(dn);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to register fixed PHY\n");
+			goto err;
+		}
+
+		priv->phy_dn = dn;
+	}
+
+	/* Initialize netdevice members */
+	macaddr = of_get_mac_address(dn);
+	if (!macaddr || !is_valid_ether_addr(macaddr)) {
+		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+		eth_hw_addr_random(dev);
+	} else {
+		ether_addr_copy(dev->dev_addr, macaddr);
+	}
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	dev_set_drvdata(&pdev->dev, dev);
+	dev->ethtool_ops = &bcm_sysport_ethtool_ops;
+	dev->netdev_ops = &bcm_sysport_netdev_ops;
+	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+	/* HW supported features, none enabled by default */
+	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+	/* Request the WOL interrupt and advertise suspend if available */
+	priv->wol_irq_disabled = 1;
+	ret = devm_request_irq(&pdev->dev, priv->wol_irq,
+			       bcm_sysport_wol_isr, 0, dev->name, priv);
+	if (!ret)
+		device_set_wakeup_capable(&pdev->dev, 1);
+
+	/* Set the needed headroom once and for all */
+	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
+	dev->needed_headroom += sizeof(struct bcm_tsb);
+
+	/* libphy will adjust the link state accordingly */
+	netif_carrier_off(dev);
+
+	ret = register_netdev(dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register net_device\n");
+		goto err;
+	}
+
+	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+	dev_info(&pdev->dev,
+		 "Broadcom SYSTEMPORT" REV_FMT
+		 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+		 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+		 priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+	return 0;
+err:
+	free_netdev(dev);
+	return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+	/* Not much to do, ndo_close has been called
+	 * and we use managed allocations
+	 */
+	unregister_netdev(dev);
+	free_netdev(dev);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
+{
+	struct net_device *ndev = priv->netdev;
+	unsigned int timeout = 1000;
+	u32 reg;
+
+	/* Password has already been programmed */
+	reg = umac_readl(priv, UMAC_MPD_CTRL);
+	reg |= MPD_EN;
+	reg &= ~PSW_EN;
+	if (priv->wolopts & WAKE_MAGICSECURE)
+		reg |= PSW_EN;
+	umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+	/* Make sure RBUF entered WoL mode as result */
+	do {
+		reg = rbuf_readl(priv, RBUF_STATUS);
+		if (reg & RBUF_WOL_MODE)
+			break;
+
+		udelay(10);
+	} while (timeout-- > 0);
+
+	/* Do not leave the UniMAC RBUF matching only MPD packets */
+	if (!timeout) {
+		reg = umac_readl(priv, UMAC_MPD_CTRL);
+		reg &= ~MPD_EN;
+		umac_writel(priv, reg, UMAC_MPD_CTRL);
+		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
+		return -ETIMEDOUT;
+	}
+
+	/* UniMAC receive needs to be turned on */
+	umac_enable_set(priv, CMD_RX_EN, 1);
+
+	/* Enable the interrupt wake-up source */
+	intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+
+	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
+
+	return 0;
+}
+
+static int bcm_sysport_suspend(struct device *d)
+{
+	struct net_device *dev = dev_get_drvdata(d);
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	int ret = 0;
+	u32 reg;
+
+	if (!netif_running(dev))
+		return 0;
+
+	bcm_sysport_netif_stop(dev);
+
+	phy_suspend(priv->phydev);
+
+	netif_device_detach(dev);
+
+	/* Disable UniMAC RX */
+	umac_enable_set(priv, CMD_RX_EN, 0);
+
+	ret = rdma_enable_set(priv, 0);
+	if (ret) {
+		netdev_err(dev, "RDMA timeout!\n");
+		return ret;
+	}
+
+	/* Disable RXCHK if enabled */
+	if (priv->rx_chk_en) {
+		reg = rxchk_readl(priv, RXCHK_CONTROL);
+		reg &= ~RXCHK_EN;
+		rxchk_writel(priv, reg, RXCHK_CONTROL);
+	}
+
+	/* Flush RX pipe */
+	if (!priv->wolopts)
+		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+
+	ret = tdma_enable_set(priv, 0);
+	if (ret) {
+		netdev_err(dev, "TDMA timeout!\n");
+		return ret;
+	}
+
+	/* Wait for a packet boundary */
+	usleep_range(2000, 3000);
+
+	umac_enable_set(priv, CMD_TX_EN, 0);
+
+	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+
+	/* Free RX/TX rings SW structures */
+	for (i = 0; i < dev->num_tx_queues; i++)
+		bcm_sysport_fini_tx_ring(priv, i);
+	bcm_sysport_fini_rx_ring(priv);
+
+	/* Get prepared for Wake-on-LAN */
+	if (device_may_wakeup(d) && priv->wolopts)
+		ret = bcm_sysport_suspend_to_wol(priv);
+
+	return ret;
+}
+
+static int bcm_sysport_resume(struct device *d)
+{
+	struct net_device *dev = dev_get_drvdata(d);
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	u32 reg;
+	int ret;
+
+	if (!netif_running(dev))
+		return 0;
+
+	umac_reset(priv);
+
+	/* We may have been suspended and never received a WOL event that
+	 * would turn off MPD detection, take care of that now
+	 */
+	bcm_sysport_resume_from_wol(priv);
+
+	/* Initialize both hardware and software ring */
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		ret = bcm_sysport_init_tx_ring(priv, i);
+		if (ret) {
+			netdev_err(dev, "failed to initialize TX ring %d\n",
+				   i);
+			goto out_free_tx_rings;
+		}
+	}
+
+	/* Initialize linked-list */
+	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+	/* Initialize RX ring */
+	ret = bcm_sysport_init_rx_ring(priv);
+	if (ret) {
+		netdev_err(dev, "failed to initialize RX ring\n");
+		goto out_free_rx_ring;
+	}
+
+	netif_device_attach(dev);
+
+	/* RX pipe enable */
+	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+
+	ret = rdma_enable_set(priv, 1);
+	if (ret) {
+		netdev_err(dev, "failed to enable RDMA\n");
+		goto out_free_rx_ring;
+	}
+
+	/* Enable rxhck */
+	if (priv->rx_chk_en) {
+		reg = rxchk_readl(priv, RXCHK_CONTROL);
+		reg |= RXCHK_EN;
+		rxchk_writel(priv, reg, RXCHK_CONTROL);
+	}
+
+	rbuf_init(priv);
+
+	/* Set maximum frame length */
+	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+	/* Set MAC address */
+	umac_set_hw_addr(priv, dev->dev_addr);
+
+	umac_enable_set(priv, CMD_RX_EN, 1);
+
+	/* TX pipe enable */
+	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+
+	umac_enable_set(priv, CMD_TX_EN, 1);
+
+	ret = tdma_enable_set(priv, 1);
+	if (ret) {
+		netdev_err(dev, "TDMA timeout!\n");
+		goto out_free_rx_ring;
+	}
+
+	phy_resume(priv->phydev);
+
+	bcm_sysport_netif_start(dev);
+
+	return 0;
+
+out_free_rx_ring:
+	bcm_sysport_fini_rx_ring(priv);
+out_free_tx_rings:
+	for (i = 0; i < dev->num_tx_queues; i++)
+		bcm_sysport_fini_tx_ring(priv, i);
+	return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
+		bcm_sysport_suspend, bcm_sysport_resume);
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+	{ .compatible = "brcm,systemport-v1.00" },
+	{ .compatible = "brcm,systemport" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
+
+static struct platform_driver bcm_sysport_driver = {
+	.probe	= bcm_sysport_probe,
+	.remove	= bcm_sysport_remove,
+	.driver =  {
+		.name = "brcm-systemport",
+		.of_match_table = bcm_sysport_of_match,
+		.pm = &bcm_sysport_pm_ops,
+	},
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 0000000..f28bf54
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,693 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN	0x00
+#define  DESC_ADDR_HI_SHIFT	0
+#define  DESC_ADDR_HI_MASK	0xff
+#define  DESC_STATUS_SHIFT	8
+#define  DESC_STATUS_MASK	0x3ff
+#define  DESC_LEN_SHIFT		18
+#define  DESC_LEN_MASK		0x7fff
+#define DESC_ADDR_LO		0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE		(WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH		2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN	4
+#define ENET_PAD		10
+#define UMAC_MAX_MTU_SIZE	(ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+				 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct bcm_tsb {
+	u32 pcp_dei_vid;
+#define PCP_DEI_MASK		0xf
+#define VID_SHIFT		4
+#define VID_MASK		0xfff
+	u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK	0x1ff
+#define L4_PTR_SHIFT		9
+#define L4_PTR_MASK		0x1ff
+#define L4_UDP			(1 << 18)
+#define L4_LENGTH_VALID		(1 << 19)
+#define DEST_MAP_SHIFT		20
+#define DEST_MAP_MASK		0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct bcm_rsb {
+	u32 rx_status_len;
+	u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM		(1 << 7)
+#define DESC_SOP		(1 << 8)
+#define DESC_EOP		(1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST			0
+#define RX_STATUS_BCAST			0x04
+#define RX_STATUS_MCAST			0x08
+#define RX_STATUS_L2_MCAST		0x0c
+#define RX_STATUS_ERR			(1 << 4)
+#define RX_STATUS_OVFLOW		(1 << 5)
+#define RX_STATUS_PARSE_FAIL		(1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT		0x00
+#define TX_STATUS_VLAN_PCP_TSB		0x01
+#define TX_STATUS_VLAN_QUEUE		0x02
+#define TX_STATUS_VLAN_VID_TSB		0x03
+#define TX_STATUS_OWR_CRC		(1 << 2)
+#define TX_STATUS_APP_CRC		(1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT	0
+#define TX_STATUS_BRCM_TAG_ZERO		0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE	0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB	0x30
+#define TX_STATUS_SKIP_BYTES		(1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET		0
+#define REV_CNTL			0x00
+#define  REV_MASK			0xffff
+
+#define RX_FLUSH_CNTL			0x04
+#define  RX_FLUSH			(1 << 0)
+
+#define TX_FLUSH_CNTL			0x08
+#define  TX_FLUSH			(1 << 0)
+
+#define MISC_CNTL			0x0c
+#define  SYS_CLK_SEL			(1 << 0)
+#define  TDMA_EOP_SEL			(1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET	0x200
+#define SYS_PORT_INTRL2_1_OFFSET	0x240
+#define INTRL2_CPU_STATUS		0x00
+#define INTRL2_CPU_SET			0x04
+#define INTRL2_CPU_CLEAR		0x08
+#define INTRL2_CPU_MASK_STATUS		0x0c
+#define INTRL2_CPU_MASK_SET		0x10
+#define INTRL2_CPU_MASK_CLEAR		0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR		(1 << 0)
+#define INTRL2_0_RBUF_OVFLOW		(1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW		(1 << 2)
+#define INTRL2_0_MPD			(1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG		(1 << 4)
+#define INTRL2_0_RDMA_MBDONE		(1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH	(1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH	(1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY	(1 << 8)
+#define INTRL2_0_TX_RING_FULL		(1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR		(1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK	(1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET		0x300
+
+#define RXCHK_CONTROL			0x00
+#define  RXCHK_EN			(1 << 0)
+#define  RXCHK_SKIP_FCS			(1 << 1)
+#define  RXCHK_BAD_CSUM_DIS		(1 << 2)
+#define  RXCHK_BRCM_TAG_EN		(1 << 3)
+#define  RXCHK_BRCM_TAG_MATCH_SHIFT	4
+#define  RXCHK_BRCM_TAG_MATCH_MASK	0xff
+#define  RXCHK_PARSE_TNL		(1 << 12)
+#define  RXCHK_VIOL_EN			(1 << 13)
+#define  RXCHK_VIOL_DIS			(1 << 14)
+#define  RXCHK_INCOM_PKT		(1 << 15)
+#define  RXCHK_V6_DUPEXT_EN		(1 << 16)
+#define  RXCHK_V6_DUPEXT_DIS		(1 << 17)
+#define  RXCHK_ETHERTYPE_DIS		(1 << 18)
+#define  RXCHK_L2_HDR_DIS		(1 << 19)
+#define  RXCHK_L3_HDR_DIS		(1 << 20)
+#define  RXCHK_MAC_RX_ERR_DIS		(1 << 21)
+#define  RXCHK_PARSE_AUTH		(1 << 22)
+
+#define RXCHK_BRCM_TAG0			0x04
+#define RXCHK_BRCM_TAG(i)		((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK		0x24
+#define RXCHK_BRCM_TAG_MASK(i)		((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS	0x44
+#define RXCHK_ETHERTYPE			0x48
+#define RXCHK_BAD_CSUM_CNTR		0x4C
+#define RXCHK_OTHER_DISC_CNTR		0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET		0x380
+#define TXCHK_PKT_RDY_THRESH		0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET		0x400
+
+#define RBUF_CONTROL			0x00
+#define  RBUF_RSB_EN			(1 << 0)
+#define  RBUF_4B_ALGN			(1 << 1)
+#define  RBUF_BRCM_TAG_STRIP		(1 << 2)
+#define  RBUF_BAD_PKT_DISC		(1 << 3)
+#define  RBUF_RESUME_THRESH_SHIFT	4
+#define  RBUF_RESUME_THRESH_MASK	0xff
+#define  RBUF_OK_TO_SEND_SHIFT		12
+#define  RBUF_OK_TO_SEND_MASK		0xff
+#define  RBUF_CRC_REPLACE		(1 << 20)
+#define  RBUF_OK_TO_SEND_MODE		(1 << 21)
+#define  RBUF_RSB_SWAP			(1 << 22)
+#define  RBUF_ACPI_EN			(1 << 23)
+
+#define RBUF_PKT_RDY_THRESH		0x04
+
+#define RBUF_STATUS			0x08
+#define  RBUF_WOL_MODE			(1 << 0)
+#define  RBUF_MPD			(1 << 1)
+#define  RBUF_ACPI			(1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR		0x0c
+#define RBUF_ERR_PKT_CNTR		0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET		0x600
+
+#define TBUF_CONTROL			0x00
+#define  TBUF_BP_EN			(1 << 0)
+#define  TBUF_MAX_PKT_THRESH_SHIFT	1
+#define  TBUF_MAX_PKT_THRESH_MASK	0x1f
+#define  TBUF_FULL_THRESH_SHIFT		8
+#define  TBUF_FULL_THRESH_MASK		0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET		0x800
+
+#define UMAC_CMD			0x008
+#define  CMD_TX_EN			(1 << 0)
+#define  CMD_RX_EN			(1 << 1)
+#define  CMD_SPEED_SHIFT		2
+#define  CMD_SPEED_10			0
+#define  CMD_SPEED_100			1
+#define  CMD_SPEED_1000			2
+#define  CMD_SPEED_2500			3
+#define  CMD_SPEED_MASK			3
+#define  CMD_PROMISC			(1 << 4)
+#define  CMD_PAD_EN			(1 << 5)
+#define  CMD_CRC_FWD			(1 << 6)
+#define  CMD_PAUSE_FWD			(1 << 7)
+#define  CMD_RX_PAUSE_IGNORE		(1 << 8)
+#define  CMD_TX_ADDR_INS		(1 << 9)
+#define  CMD_HD_EN			(1 << 10)
+#define  CMD_SW_RESET			(1 << 13)
+#define  CMD_LCL_LOOP_EN		(1 << 15)
+#define  CMD_AUTO_CONFIG		(1 << 22)
+#define  CMD_CNTL_FRM_EN		(1 << 23)
+#define  CMD_NO_LEN_CHK			(1 << 24)
+#define  CMD_RMT_LOOP_EN		(1 << 25)
+#define  CMD_PRBL_EN			(1 << 27)
+#define  CMD_TX_PAUSE_IGNORE		(1 << 28)
+#define  CMD_TX_RX_EN			(1 << 29)
+#define  CMD_RUNT_FILTER_DIS		(1 << 30)
+
+#define UMAC_MAC0			0x00c
+#define UMAC_MAC1			0x010
+#define UMAC_MAX_FRAME_LEN		0x014
+
+#define UMAC_TX_FLUSH			0x334
+
+#define UMAC_MIB_START			0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET		0xc
+
+#define UMAC_MIB_CTRL			0x580
+#define  MIB_RX_CNT_RST			(1 << 0)
+#define  MIB_RUNT_CNT_RST		(1 << 1)
+#define  MIB_TX_CNT_RST			(1 << 2)
+
+#define UMAC_MPD_CTRL			0x620
+#define  MPD_EN				(1 << 0)
+#define  MSEQ_LEN_SHIFT			16
+#define  MSEQ_LEN_MASK			0xff
+#define  PSW_EN				(1 << 27)
+
+#define UMAC_PSW_MS			0x624
+#define UMAC_PSW_LS			0x628
+#define UMAC_MDF_CTRL			0x650
+#define UMAC_MDF_ADDR			0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET		0x2000
+
+#define RDMA_CONTROL			0x1000
+#define  RDMA_EN			(1 << 0)
+#define  RDMA_RING_CFG			(1 << 1)
+#define  RDMA_DISC_EN			(1 << 2)
+#define  RDMA_BUF_DATA_OFFSET_SHIFT	4
+#define  RDMA_BUF_DATA_OFFSET_MASK	0x3ff
+
+#define RDMA_STATUS			0x1004
+#define  RDMA_DISABLED			(1 << 0)
+#define  RDMA_DESC_RAM_INIT_BUSY	(1 << 1)
+#define  RDMA_BP_STATUS			(1 << 2)
+
+#define RDMA_SCB_BURST_SIZE		0x1008
+
+#define RDMA_RING_BUF_SIZE		0x100c
+#define  RDMA_RING_SIZE_SHIFT		16
+
+#define RDMA_WRITE_PTR_HI		0x1010
+#define RDMA_WRITE_PTR_LO		0x1014
+#define RDMA_PROD_INDEX			0x1018
+#define  RDMA_PROD_INDEX_MASK		0xffff
+
+#define RDMA_CONS_INDEX			0x101c
+#define  RDMA_CONS_INDEX_MASK		0xffff
+
+#define RDMA_START_ADDR_HI		0x1020
+#define RDMA_START_ADDR_LO		0x1024
+#define RDMA_END_ADDR_HI		0x1028
+#define RDMA_END_ADDR_LO		0x102c
+
+#define RDMA_MBDONE_INTR		0x1030
+#define  RDMA_INTR_THRESH_MASK		0x1ff
+#define  RDMA_TIMEOUT_SHIFT		16
+#define  RDMA_TIMEOUT_MASK		0xffff
+
+#define RDMA_XON_XOFF_THRESH		0x1034
+#define  RDMA_XON_XOFF_THRESH_MASK	0xffff
+#define  RDMA_XOFF_THRESH_SHIFT		16
+
+#define RDMA_READ_PTR_HI		0x1038
+#define RDMA_READ_PTR_LO		0x103c
+
+#define RDMA_OVERRIDE			0x1040
+#define  RDMA_LE_MODE			(1 << 0)
+#define  RDMA_REG_MODE			(1 << 1)
+
+#define RDMA_TEST			0x1044
+#define  RDMA_TP_OUT_SEL		(1 << 0)
+#define  RDMA_MEM_SEL			(1 << 1)
+
+#define RDMA_DEBUG			0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS			32	/* rings = queues */
+#define TDMA_PORT_SIZE			DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET		0x4000
+#define TDMA_WRITE_PORT_OFFSET		0x0000
+#define TDMA_WRITE_PORT_HI(i)		(TDMA_WRITE_PORT_OFFSET + \
+					(i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i)		(TDMA_WRITE_PORT_OFFSET + \
+					sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET		(TDMA_WRITE_PORT_OFFSET + \
+					(TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i)		(TDMA_READ_PORT_OFFSET + \
+					(i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i)		(TDMA_READ_PORT_OFFSET + \
+					sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET	(TDMA_READ_PORT_OFFSET + \
+					(TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i)		(TDMA_READ_PORT_CMD_OFFSET + \
+					(i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE		(TDMA_READ_PORT_CMD_OFFSET + \
+					(TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR		0x00
+#define  RING_HEAD_MASK			0x7ff
+#define  RING_TAIL_SHIFT		11
+#define  RING_TAIL_MASK			0x7ff
+#define  RING_FLUSH			(1 << 24)
+#define  RING_EN			(1 << 25)
+
+#define RING_COUNT			0x04
+#define  RING_COUNT_MASK		0x7ff
+#define  RING_BUFF_DONE_SHIFT		11
+#define  RING_BUFF_DONE_MASK		0x7ff
+
+#define RING_MAX_HYST			0x08
+#define  RING_MAX_THRESH_MASK		0x7ff
+#define  RING_HYST_THRESH_SHIFT		11
+#define  RING_HYST_THRESH_MASK		0x7ff
+
+#define RING_INTR_CONTROL		0x0c
+#define  RING_INTR_THRESH_MASK		0x7ff
+#define  RING_EMPTY_INTR_EN		(1 << 15)
+#define  RING_TIMEOUT_SHIFT		16
+#define  RING_TIMEOUT_MASK		0xffff
+
+#define RING_PROD_CONS_INDEX		0x10
+#define  RING_PROD_INDEX_MASK		0xffff
+#define  RING_CONS_INDEX_SHIFT		16
+#define  RING_CONS_INDEX_MASK		0xffff
+
+#define RING_MAPPING			0x14
+#define  RING_QID_MASK			0x3
+#define  RING_PORT_ID_SHIFT		3
+#define  RING_PORT_ID_MASK		0x7
+#define  RING_IGNORE_STATUS		(1 << 6)
+#define  RING_FAILOVER_EN		(1 << 7)
+#define  RING_CREDIT_SHIFT		8
+#define  RING_CREDIT_MASK		0xffff
+
+#define RING_PCP_DEI_VID		0x18
+#define  RING_VID_MASK			0x7ff
+#define  RING_DEI			(1 << 12)
+#define  RING_PCP_SHIFT			13
+#define  RING_PCP_MASK			0x7
+#define  RING_PKT_SIZE_ADJ_SHIFT	16
+#define  RING_PKT_SIZE_ADJ_MASK		0xf
+
+#define TDMA_DESC_RING_SIZE		28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i)		(TDMA_DESC_RING_00_BASE + \
+					((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i)	(TDMA_DESC_RING_BASE(i) + \
+					RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i)		(TDMA_DESC_RING_BASE(i) + \
+					RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i)	(TDMA_DESC_RING_BASE(i) + \
+					RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i)	(TDMA_DESC_RING_BASE(i) + \
+					RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+					(TDMA_DESC_RING_BASE(i) + \
+					RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i)	(TDMA_DESC_RING_BASE(i) + \
+					RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i)	(TDMA_DESC_RING_BASE(i) + \
+					RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL			0x600
+#define  TDMA_EN			(1 << 0)
+#define  TSB_EN				(1 << 1)
+#define  TSB_SWAP			(1 << 2)
+#define  ACB_ALGO			(1 << 3)
+#define  BUF_DATA_OFFSET_SHIFT		4
+#define  BUF_DATA_OFFSET_MASK		0x3ff
+#define  VLAN_EN			(1 << 14)
+#define  SW_BRCM_TAG			(1 << 15)
+#define  WNC_KPT_SIZE_UPDATE		(1 << 16)
+#define  SYNC_PKT_SIZE			(1 << 17)
+#define  ACH_TXDONE_DELAY_SHIFT		18
+#define  ACH_TXDONE_DELAY_MASK		0xff
+
+#define TDMA_STATUS			0x604
+#define  TDMA_DISABLED			(1 << 0)
+#define  TDMA_LL_RAM_INIT_BUSY		(1 << 1)
+
+#define TDMA_SCB_BURST_SIZE		0x608
+#define TDMA_OVER_MAX_THRESH_STATUS	0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS	0x610
+#define TDMA_TPID			0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR	0x618
+#define  TDMA_FREE_HEAD_MASK		0x7ff
+#define  TDMA_FREE_TAIL_SHIFT		11
+#define  TDMA_FREE_TAIL_MASK		0x7ff
+
+#define TDMA_FREE_LIST_COUNT		0x61c
+#define  TDMA_FREE_LIST_COUNT_MASK	0x7ff
+
+#define TDMA_TIER2_ARB_CTRL		0x620
+#define  TDMA_ARB_MODE_RR		0
+#define  TDMA_ARB_MODE_WEIGHT_RR	0x1
+#define  TDMA_ARB_MODE_STRICT		0x2
+#define  TDMA_ARB_MODE_DEFICIT_RR	0x3
+#define  TDMA_CREDIT_SHIFT		4
+#define  TDMA_CREDIT_MASK		0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL		0x624
+#define  TDMA_ARB_EN			(1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN	0x628
+#define TDMA_TIER1_ARB_1_CTRL		0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN	0x630
+#define TDMA_TIER1_ARB_2_CTRL		0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN	0x638
+#define TDMA_TIER1_ARB_3_CTRL		0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN	0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE	0x644
+#define  TDMA_LE_MODE			(1 << 0)
+#define  TDMA_REG_MODE			(1 << 1)
+
+#define TDMA_TEST			0x648
+#define  TDMA_TP_OUT_SEL		(1 << 0)
+#define  TDMA_MEM_TM			(1 << 1)
+
+#define TDMA_DEBUG			0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+	u32	addr_status_len;
+	u32	addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS		1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC			(NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC			1536
+
+#define WORDS_PER_DESC			(sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+	u32	cnt_64;		/* RO Received/Transmited 64 bytes packet */
+	u32	cnt_127;	/* RO Rx/Tx 127 bytes packet */
+	u32	cnt_255;	/* RO Rx/Tx 65-255 bytes packet */
+	u32	cnt_511;	/* RO Rx/Tx 256-511 bytes packet */
+	u32	cnt_1023;	/* RO Rx/Tx 512-1023 bytes packet */
+	u32	cnt_1518;	/* RO Rx/Tx 1024-1518 bytes packet */
+	u32	cnt_mgv;	/* RO Rx/Tx 1519-1522 good VLAN packet */
+	u32	cnt_2047;	/* RO Rx/Tx 1522-2047 bytes packet*/
+	u32	cnt_4095;	/* RO Rx/Tx 2048-4095 bytes packet*/
+	u32	cnt_9216;	/* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+	struct  bcm_sysport_pkt_counters pkt_cnt;
+	u32	pkt;		/* RO (0x428) Received pkt count*/
+	u32	bytes;		/* RO Received byte count */
+	u32	mca;		/* RO # of Received multicast pkt */
+	u32	bca;		/* RO # of Receive broadcast pkt */
+	u32	fcs;		/* RO # of Received FCS error  */
+	u32	cf;		/* RO # of Received control frame pkt*/
+	u32	pf;		/* RO # of Received pause frame pkt */
+	u32	uo;		/* RO # of unknown op code pkt */
+	u32	aln;		/* RO # of alignment error count */
+	u32	flr;		/* RO # of frame length out of range count */
+	u32	cde;		/* RO # of code error pkt */
+	u32	fcr;		/* RO # of carrier sense error pkt */
+	u32	ovr;		/* RO # of oversize pkt*/
+	u32	jbr;		/* RO # of jabber count */
+	u32	mtue;		/* RO # of MTU error pkt*/
+	u32	pok;		/* RO # of Received good pkt */
+	u32	uc;		/* RO # of unicast pkt */
+	u32	ppp;		/* RO # of PPP pkt */
+	u32	rcrc;		/* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+	struct bcm_sysport_pkt_counters pkt_cnt;
+	u32	pkts;		/* RO (0x4a8) Transmited pkt */
+	u32	mca;		/* RO # of xmited multicast pkt */
+	u32	bca;		/* RO # of xmited broadcast pkt */
+	u32	pf;		/* RO # of xmited pause frame count */
+	u32	cf;		/* RO # of xmited control frame count */
+	u32	fcs;		/* RO # of xmited FCS error count */
+	u32	ovr;		/* RO # of xmited oversize pkt */
+	u32	drf;		/* RO # of xmited deferral pkt */
+	u32	edf;		/* RO # of xmited Excessive deferral pkt*/
+	u32	scl;		/* RO # of xmited single collision pkt */
+	u32	mcl;		/* RO # of xmited multiple collision pkt*/
+	u32	lcl;		/* RO # of xmited late collision pkt */
+	u32	ecl;		/* RO # of xmited excessive collision pkt*/
+	u32	frg;		/* RO # of xmited fragments pkt*/
+	u32	ncl;		/* RO # of xmited total collision count */
+	u32	jbr;		/* RO # of xmited jabber count*/
+	u32	bytes;		/* RO # of xmited byte count */
+	u32	pok;		/* RO # of xmited good pkt */
+	u32	uc;		/* RO (0x4f0) # of xmited unicast pkt */
+};
+
+struct bcm_sysport_mib {
+	struct bcm_sysport_rx_counters rx;
+	struct bcm_sysport_tx_counters tx;
+	u32 rx_runt_cnt;
+	u32 rx_runt_fcs;
+	u32 rx_runt_fcs_align;
+	u32 rx_runt_bytes;
+	u32 rxchk_bad_csum;
+	u32 rxchk_other_pkt_disc;
+	u32 rbuf_ovflow_cnt;
+	u32 rbuf_err_cnt;
+	u32 alloc_rx_buff_failed;
+	u32 rx_dma_failed;
+	u32 tx_dma_failed;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+	BCM_SYSPORT_STAT_NETDEV = -1,
+	BCM_SYSPORT_STAT_MIB_RX,
+	BCM_SYSPORT_STAT_MIB_TX,
+	BCM_SYSPORT_STAT_RUNT,
+	BCM_SYSPORT_STAT_RXCHK,
+	BCM_SYSPORT_STAT_RBUF,
+	BCM_SYSPORT_STAT_SOFT,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+	.stat_string = __stringify(m), \
+	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+	.stat_offset = offsetof(struct net_device_stats, m), \
+	.type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+	.stat_string = str, \
+	.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+	.stat_offset = offsetof(struct bcm_sysport_priv, m), \
+	.type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+	.stat_string = str, \
+	.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+	.stat_offset = offsetof(struct bcm_sysport_priv, m), \
+	.type = BCM_SYSPORT_STAT_RXCHK, \
+	.reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+	.stat_string = str, \
+	.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+	.stat_offset = offsetof(struct bcm_sysport_priv, m), \
+	.type = BCM_SYSPORT_STAT_RBUF, \
+	.reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int stat_sizeof;
+	int stat_offset;
+	enum bcm_sysport_stat_type type;
+	/* reg offset from UMAC base for misc counters */
+	u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+	struct sk_buff	*skb;		/* SKB for RX packets */
+	void __iomem	*bd_addr;	/* Buffer descriptor PHYS addr */
+
+	DEFINE_DMA_UNMAP_ADDR(dma_addr);
+	DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+	spinlock_t	lock;		/* Ring lock for tx reclaim/xmit */
+	struct napi_struct napi;	/* NAPI per tx queue */
+	dma_addr_t	desc_dma;	/* DMA cookie */
+	unsigned int	index;		/* Ring index */
+	unsigned int	size;		/* Ring current size */
+	unsigned int	alloc_size;	/* Ring one-time allocated size */
+	unsigned int	desc_count;	/* Number of descriptors */
+	unsigned int	curr_desc;	/* Current descriptor */
+	unsigned int	c_index;	/* Last consumer index */
+	unsigned int	p_index;	/* Current producer index */
+	struct bcm_sysport_cb *cbs;	/* Transmit control blocks */
+	struct dma_desc	*desc_cpu;	/* CPU view of the descriptor */
+	struct bcm_sysport_priv *priv;	/* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+	void __iomem		*base;
+	u32			irq0_stat;
+	u32			irq0_mask;
+	u32			irq1_stat;
+	u32			irq1_mask;
+	struct napi_struct	napi ____cacheline_aligned;
+	struct net_device	*netdev;
+	struct platform_device	*pdev;
+	int			irq0;
+	int			irq1;
+	int			wol_irq;
+
+	/* Transmit rings */
+	struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+	/* Receive queue */
+	void __iomem		*rx_bds;
+	struct bcm_sysport_cb	*rx_cbs;
+	unsigned int		num_rx_bds;
+	unsigned int		rx_read_ptr;
+	unsigned int		rx_c_index;
+
+	/* PHY device */
+	struct device_node	*phy_dn;
+	struct phy_device	*phydev;
+	phy_interface_t		phy_interface;
+	int			old_pause;
+	int			old_link;
+	int			old_duplex;
+
+	/* Misc fields */
+	unsigned int		rx_chk_en:1;
+	unsigned int		tsb_en:1;
+	unsigned int		crc_fwd:1;
+	u16			rev;
+	u32			wolopts;
+	unsigned int		wol_irq_disabled:1;
+
+	/* MIB related fields */
+	struct bcm_sysport_mib	mib;
+
+	/* Ethtool */
+	u32			msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
new file mode 100644
index 0000000..a5e4b4b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -0,0 +1,1759 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bgmac.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/bcm47xx_nvram.h>
+
+static const struct bcma_device_id bgmac_bcma_tbl[] = {
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	{},
+};
+MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+
+static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+			     u32 value, int timeout)
+{
+	u32 val;
+	int i;
+
+	for (i = 0; i < timeout / 10; i++) {
+		val = bcma_read32(core, reg);
+		if ((val & mask) == value)
+			return true;
+		udelay(10);
+	}
+	pr_err("Timeout waiting for reg 0x%X\n", reg);
+	return false;
+}
+
+/**************************************************
+ * DMA
+ **************************************************/
+
+static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+	u32 val;
+	int i;
+
+	if (!ring->mmio_base)
+		return;
+
+	/* Suspend DMA TX ring first.
+	 * bgmac_wait_value doesn't support waiting for any of few values, so
+	 * implement whole loop here.
+	 */
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
+		    BGMAC_DMA_TX_SUSPEND);
+	for (i = 0; i < 10000 / 10; i++) {
+		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+		val &= BGMAC_DMA_TX_STAT;
+		if (val == BGMAC_DMA_TX_STAT_DISABLED ||
+		    val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
+		    val == BGMAC_DMA_TX_STAT_STOPPED) {
+			i = 0;
+			break;
+		}
+		udelay(10);
+	}
+	if (i)
+		bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
+			  ring->mmio_base, val);
+
+	/* Remove SUSPEND bit */
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
+	if (!bgmac_wait_value(bgmac->core,
+			      ring->mmio_base + BGMAC_DMA_TX_STATUS,
+			      BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
+			      10000)) {
+		bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
+			   ring->mmio_base);
+		udelay(300);
+		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+		if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
+			bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
+				  ring->mmio_base);
+	}
+}
+
+static void bgmac_dma_tx_enable(struct bgmac *bgmac,
+				struct bgmac_dma_ring *ring)
+{
+	u32 ctl;
+
+	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+	if (bgmac->core->id.rev >= 4) {
+		ctl &= ~BGMAC_DMA_TX_BL_MASK;
+		ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
+
+		ctl &= ~BGMAC_DMA_TX_MR_MASK;
+		ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
+
+		ctl &= ~BGMAC_DMA_TX_PC_MASK;
+		ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
+
+		ctl &= ~BGMAC_DMA_TX_PT_MASK;
+		ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
+	}
+	ctl |= BGMAC_DMA_TX_ENABLE;
+	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
+}
+
+static void
+bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+		     int i, int len, u32 ctl0)
+{
+	struct bgmac_slot_info *slot;
+	struct bgmac_dma_desc *dma_desc;
+	u32 ctl1;
+
+	if (i == BGMAC_TX_RING_SLOTS - 1)
+		ctl0 |= BGMAC_DESC_CTL0_EOT;
+
+	ctl1 = len & BGMAC_DESC_CTL1_LEN;
+
+	slot = &ring->slots[i];
+	dma_desc = &ring->cpu_base[i];
+	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+	dma_desc->ctl0 = cpu_to_le32(ctl0);
+	dma_desc->ctl1 = cpu_to_le32(ctl1);
+}
+
+static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
+				    struct bgmac_dma_ring *ring,
+				    struct sk_buff *skb)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	struct net_device *net_dev = bgmac->net_dev;
+	int index = ring->end % BGMAC_TX_RING_SLOTS;
+	struct bgmac_slot_info *slot = &ring->slots[index];
+	int nr_frags;
+	u32 flags;
+	int i;
+
+	if (skb->len > BGMAC_DESC_CTL1_LEN) {
+		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
+		goto err_drop;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		skb_checksum_help(skb);
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
+	/* ring->end - ring->start will return the number of valid slots,
+	 * even when ring->end overflows
+	 */
+	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
+		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
+		netif_stop_queue(net_dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
+					DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+		goto err_dma_head;
+
+	flags = BGMAC_DESC_CTL0_SOF;
+	if (!nr_frags)
+		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
+
+	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
+	flags = 0;
+
+	for (i = 0; i < nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+		int len = skb_frag_size(frag);
+
+		index = (index + 1) % BGMAC_TX_RING_SLOTS;
+		slot = &ring->slots[index];
+		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
+						  len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
+			goto err_dma;
+
+		if (i == nr_frags - 1)
+			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
+
+		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
+	}
+
+	slot->skb = skb;
+	ring->end += nr_frags + 1;
+	netdev_sent_queue(net_dev, skb->len);
+
+	wmb();
+
+	/* Increase ring->end to point empty slot. We tell hardware the first
+	 * slot it should *not* read.
+	 */
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+		    ring->index_base +
+		    (ring->end % BGMAC_TX_RING_SLOTS) *
+		    sizeof(struct bgmac_dma_desc));
+
+	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
+		netif_stop_queue(net_dev);
+
+	return NETDEV_TX_OK;
+
+err_dma:
+	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
+			 DMA_TO_DEVICE);
+
+	while (i-- > 0) {
+		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
+		struct bgmac_slot_info *slot = &ring->slots[index];
+		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
+		int len = ctl1 & BGMAC_DESC_CTL1_LEN;
+
+		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
+	}
+
+err_dma_head:
+	bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+		  ring->mmio_base);
+
+err_drop:
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+/* Free transmitted packets */
+static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	int empty_slot;
+	bool freed = false;
+	unsigned bytes_compl = 0, pkts_compl = 0;
+
+	/* The last slot that hardware didn't consume yet */
+	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+	empty_slot &= BGMAC_DMA_TX_STATDPTR;
+	empty_slot -= ring->index_base;
+	empty_slot &= BGMAC_DMA_TX_STATDPTR;
+	empty_slot /= sizeof(struct bgmac_dma_desc);
+
+	while (ring->start != ring->end) {
+		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
+		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
+		u32 ctl0, ctl1;
+		int len;
+
+		if (slot_idx == empty_slot)
+			break;
+
+		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
+		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
+		len = ctl1 & BGMAC_DESC_CTL1_LEN;
+		if (ctl0 & BGMAC_DESC_CTL0_SOF)
+			/* Unmap no longer used buffer */
+			dma_unmap_single(dma_dev, slot->dma_addr, len,
+					 DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dma_dev, slot->dma_addr, len,
+				       DMA_TO_DEVICE);
+
+		if (slot->skb) {
+			bytes_compl += slot->skb->len;
+			pkts_compl++;
+
+			/* Free memory! :) */
+			dev_kfree_skb(slot->skb);
+			slot->skb = NULL;
+		}
+
+		slot->dma_addr = 0;
+		ring->start++;
+		freed = true;
+	}
+
+	if (!pkts_compl)
+		return;
+
+	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
+	if (netif_queue_stopped(bgmac->net_dev))
+		netif_wake_queue(bgmac->net_dev);
+}
+
+static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+	if (!ring->mmio_base)
+		return;
+
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
+	if (!bgmac_wait_value(bgmac->core,
+			      ring->mmio_base + BGMAC_DMA_RX_STATUS,
+			      BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
+			      10000))
+		bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
+			  ring->mmio_base);
+}
+
+static void bgmac_dma_rx_enable(struct bgmac *bgmac,
+				struct bgmac_dma_ring *ring)
+{
+	u32 ctl;
+
+	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+
+	/* preserve ONLY bits 16-17 from current hardware value */
+	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+
+	if (bgmac->core->id.rev >= 4) {
+		ctl &= ~BGMAC_DMA_RX_BL_MASK;
+		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
+
+		ctl &= ~BGMAC_DMA_RX_PC_MASK;
+		ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
+
+		ctl &= ~BGMAC_DMA_RX_PT_MASK;
+		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
+	}
+	ctl |= BGMAC_DMA_RX_ENABLE;
+	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
+	ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
+	ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
+}
+
+static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
+				     struct bgmac_slot_info *slot)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	dma_addr_t dma_addr;
+	struct bgmac_rx_header *rx;
+	void *buf;
+
+	/* Alloc skb */
+	buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Poison - if everything goes fine, hardware will overwrite it */
+	rx = buf + BGMAC_RX_BUF_OFFSET;
+	rx->len = cpu_to_le16(0xdead);
+	rx->flags = cpu_to_le16(0xbeef);
+
+	/* Map skb for the DMA */
+	dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
+				  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dma_dev, dma_addr)) {
+		bgmac_err(bgmac, "DMA mapping error\n");
+		put_page(virt_to_head_page(buf));
+		return -ENOMEM;
+	}
+
+	/* Update the slot */
+	slot->buf = buf;
+	slot->dma_addr = dma_addr;
+
+	return 0;
+}
+
+static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
+				      struct bgmac_dma_ring *ring)
+{
+	dma_wmb();
+
+	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+		    ring->index_base +
+		    ring->end * sizeof(struct bgmac_dma_desc));
+}
+
+static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
+				    struct bgmac_dma_ring *ring, int desc_idx)
+{
+	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
+	u32 ctl0 = 0, ctl1 = 0;
+
+	if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
+		ctl0 |= BGMAC_DESC_CTL0_EOT;
+	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+	/* Is there any BGMAC device that requires extension? */
+	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+	 * B43_DMA64_DCTL1_ADDREXT_MASK;
+	 */
+
+	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
+	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
+	dma_desc->ctl0 = cpu_to_le32(ctl0);
+	dma_desc->ctl1 = cpu_to_le32(ctl1);
+
+	ring->end = desc_idx;
+}
+
+static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
+				    struct bgmac_slot_info *slot)
+{
+	struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
+
+	dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
+				DMA_FROM_DEVICE);
+	rx->len = cpu_to_le16(0xdead);
+	rx->flags = cpu_to_le16(0xbeef);
+	dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
+				   DMA_FROM_DEVICE);
+}
+
+static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+			     int weight)
+{
+	u32 end_slot;
+	int handled = 0;
+
+	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
+	end_slot &= BGMAC_DMA_RX_STATDPTR;
+	end_slot -= ring->index_base;
+	end_slot &= BGMAC_DMA_RX_STATDPTR;
+	end_slot /= sizeof(struct bgmac_dma_desc);
+
+	while (ring->start != end_slot) {
+		struct device *dma_dev = bgmac->core->dma_dev;
+		struct bgmac_slot_info *slot = &ring->slots[ring->start];
+		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
+		struct sk_buff *skb;
+		void *buf = slot->buf;
+		dma_addr_t dma_addr = slot->dma_addr;
+		u16 len, flags;
+
+		do {
+			/* Prepare new skb as replacement */
+			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
+				bgmac_dma_rx_poison_buf(dma_dev, slot);
+				break;
+			}
+
+			/* Unmap buffer to make it accessible to the CPU */
+			dma_unmap_single(dma_dev, dma_addr,
+					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+			/* Get info from the header */
+			len = le16_to_cpu(rx->len);
+			flags = le16_to_cpu(rx->flags);
+
+			/* Check for poison and drop or pass the packet */
+			if (len == 0xdead && flags == 0xbeef) {
+				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+					  ring->start);
+				put_page(virt_to_head_page(buf));
+				break;
+			}
+
+			if (len > BGMAC_RX_ALLOC_SIZE) {
+				bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
+					  ring->start);
+				put_page(virt_to_head_page(buf));
+				break;
+			}
+
+			/* Omit CRC. */
+			len -= ETH_FCS_LEN;
+
+			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
+			if (unlikely(!skb)) {
+				bgmac_err(bgmac, "build_skb failed\n");
+				put_page(virt_to_head_page(buf));
+				break;
+			}
+			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
+				BGMAC_RX_BUF_OFFSET + len);
+			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
+				 BGMAC_RX_BUF_OFFSET);
+
+			skb_checksum_none_assert(skb);
+			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+			napi_gro_receive(&bgmac->napi, skb);
+			handled++;
+		} while (0);
+
+		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
+
+		if (++ring->start >= BGMAC_RX_RING_SLOTS)
+			ring->start = 0;
+
+		if (handled >= weight) /* Should never be greater */
+			break;
+	}
+
+	bgmac_dma_rx_update_index(bgmac, ring);
+
+	return handled;
+}
+
+/* Does ring support unaligned addressing? */
+static bool bgmac_dma_unaligned(struct bgmac *bgmac,
+				struct bgmac_dma_ring *ring,
+				enum bgmac_dma_ring_type ring_type)
+{
+	switch (ring_type) {
+	case BGMAC_DMA_RING_TX:
+		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+			    0xff0);
+		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
+			return true;
+		break;
+	case BGMAC_DMA_RING_RX:
+		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+			    0xff0);
+		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
+			return true;
+		break;
+	}
+	return false;
+}
+
+static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
+				   struct bgmac_dma_ring *ring)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	struct bgmac_dma_desc *dma_desc = ring->cpu_base;
+	struct bgmac_slot_info *slot;
+	int i;
+
+	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
+		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+
+		slot = &ring->slots[i];
+		dev_kfree_skb(slot->skb);
+
+		if (!slot->dma_addr)
+			continue;
+
+		if (slot->skb)
+			dma_unmap_single(dma_dev, slot->dma_addr,
+					 len, DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dma_dev, slot->dma_addr,
+				       len, DMA_TO_DEVICE);
+	}
+}
+
+static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
+				   struct bgmac_dma_ring *ring)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	struct bgmac_slot_info *slot;
+	int i;
+
+	for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
+		slot = &ring->slots[i];
+		if (!slot->dma_addr)
+			continue;
+
+		dma_unmap_single(dma_dev, slot->dma_addr,
+				 BGMAC_RX_BUF_SIZE,
+				 DMA_FROM_DEVICE);
+		put_page(virt_to_head_page(slot->buf));
+		slot->dma_addr = 0;
+	}
+}
+
+static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
+				     struct bgmac_dma_ring *ring,
+				     int num_slots)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	int size;
+
+	if (!ring->cpu_base)
+	    return;
+
+	/* Free ring of descriptors */
+	size = num_slots * sizeof(struct bgmac_dma_desc);
+	dma_free_coherent(dma_dev, size, ring->cpu_base,
+			  ring->dma_base);
+}
+
+static void bgmac_dma_cleanup(struct bgmac *bgmac)
+{
+	int i;
+
+	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+		bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
+
+	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+		bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
+}
+
+static void bgmac_dma_free(struct bgmac *bgmac)
+{
+	int i;
+
+	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+		bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
+					 BGMAC_TX_RING_SLOTS);
+
+	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+		bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
+					 BGMAC_RX_RING_SLOTS);
+}
+
+static int bgmac_dma_alloc(struct bgmac *bgmac)
+{
+	struct device *dma_dev = bgmac->core->dma_dev;
+	struct bgmac_dma_ring *ring;
+	static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
+					 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
+	int size; /* ring size: different for Tx and Rx */
+	int err;
+	int i;
+
+	BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
+	BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
+
+	if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
+		bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
+		return -ENOTSUPP;
+	}
+
+	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+		ring = &bgmac->tx_ring[i];
+		ring->mmio_base = ring_base[i];
+
+		/* Alloc ring of descriptors */
+		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
+		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+						     &ring->dma_base,
+						     GFP_KERNEL);
+		if (!ring->cpu_base) {
+			bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
+				  ring->mmio_base);
+			goto err_dma_free;
+		}
+
+		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+						      BGMAC_DMA_RING_TX);
+		if (ring->unaligned)
+			ring->index_base = lower_32_bits(ring->dma_base);
+		else
+			ring->index_base = 0;
+
+		/* No need to alloc TX slots yet */
+	}
+
+	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+		ring = &bgmac->rx_ring[i];
+		ring->mmio_base = ring_base[i];
+
+		/* Alloc ring of descriptors */
+		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
+		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+						     &ring->dma_base,
+						     GFP_KERNEL);
+		if (!ring->cpu_base) {
+			bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
+				  ring->mmio_base);
+			err = -ENOMEM;
+			goto err_dma_free;
+		}
+
+		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+						      BGMAC_DMA_RING_RX);
+		if (ring->unaligned)
+			ring->index_base = lower_32_bits(ring->dma_base);
+		else
+			ring->index_base = 0;
+	}
+
+	return 0;
+
+err_dma_free:
+	bgmac_dma_free(bgmac);
+	return -ENOMEM;
+}
+
+static int bgmac_dma_init(struct bgmac *bgmac)
+{
+	struct bgmac_dma_ring *ring;
+	int i, err;
+
+	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+		ring = &bgmac->tx_ring[i];
+
+		if (!ring->unaligned)
+			bgmac_dma_tx_enable(bgmac, ring);
+		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+			    lower_32_bits(ring->dma_base));
+		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
+			    upper_32_bits(ring->dma_base));
+		if (ring->unaligned)
+			bgmac_dma_tx_enable(bgmac, ring);
+
+		ring->start = 0;
+		ring->end = 0;	/* Points the slot that should *not* be read */
+	}
+
+	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+		int j;
+
+		ring = &bgmac->rx_ring[i];
+
+		if (!ring->unaligned)
+			bgmac_dma_rx_enable(bgmac, ring);
+		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+			    lower_32_bits(ring->dma_base));
+		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
+			    upper_32_bits(ring->dma_base));
+		if (ring->unaligned)
+			bgmac_dma_rx_enable(bgmac, ring);
+
+		ring->start = 0;
+		ring->end = 0;
+		for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
+			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
+			if (err)
+				goto error;
+
+			bgmac_dma_rx_setup_desc(bgmac, ring, j);
+		}
+
+		bgmac_dma_rx_update_index(bgmac, ring);
+	}
+
+	return 0;
+
+error:
+	bgmac_dma_cleanup(bgmac);
+	return err;
+}
+
+/**************************************************
+ * PHY ops
+ **************************************************/
+
+static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
+{
+	struct bcma_device *core;
+	u16 phy_access_addr;
+	u16 phy_ctl_addr;
+	u32 tmp;
+
+	BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
+	BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
+	BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
+	BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
+	BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
+	BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
+	BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
+	BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
+	BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
+	BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
+	BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
+
+	if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+		core = bgmac->core->bus->drv_gmac_cmn.core;
+		phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+		phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+	} else {
+		core = bgmac->core;
+		phy_access_addr = BGMAC_PHY_ACCESS;
+		phy_ctl_addr = BGMAC_PHY_CNTL;
+	}
+
+	tmp = bcma_read32(core, phy_ctl_addr);
+	tmp &= ~BGMAC_PC_EPA_MASK;
+	tmp |= phyaddr;
+	bcma_write32(core, phy_ctl_addr, tmp);
+
+	tmp = BGMAC_PA_START;
+	tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+	tmp |= reg << BGMAC_PA_REG_SHIFT;
+	bcma_write32(core, phy_access_addr, tmp);
+
+	if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+		bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
+			  phyaddr, reg);
+		return 0xffff;
+	}
+
+	return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
+static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
+{
+	struct bcma_device *core;
+	u16 phy_access_addr;
+	u16 phy_ctl_addr;
+	u32 tmp;
+
+	if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+		core = bgmac->core->bus->drv_gmac_cmn.core;
+		phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+		phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+	} else {
+		core = bgmac->core;
+		phy_access_addr = BGMAC_PHY_ACCESS;
+		phy_ctl_addr = BGMAC_PHY_CNTL;
+	}
+
+	tmp = bcma_read32(core, phy_ctl_addr);
+	tmp &= ~BGMAC_PC_EPA_MASK;
+	tmp |= phyaddr;
+	bcma_write32(core, phy_ctl_addr, tmp);
+
+	bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+		bgmac_warn(bgmac, "Error setting MDIO int\n");
+
+	tmp = BGMAC_PA_START;
+	tmp |= BGMAC_PA_WRITE;
+	tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+	tmp |= reg << BGMAC_PA_REG_SHIFT;
+	tmp |= value;
+	bcma_write32(core, phy_access_addr, tmp);
+
+	if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+		bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
+			  phyaddr, reg);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
+static void bgmac_phy_init(struct bgmac *bgmac)
+{
+	struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+	struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+	u8 i;
+
+	if (ci->id == BCMA_CHIP_ID_BCM5356) {
+		for (i = 0; i < 5; i++) {
+			bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
+			bgmac_phy_write(bgmac, i, 0x15, 0x0100);
+			bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+			bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
+			bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+		}
+	}
+	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
+	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
+	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
+		bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
+		bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
+		for (i = 0; i < 5; i++) {
+			bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+			bgmac_phy_write(bgmac, i, 0x16, 0x5284);
+			bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+			bgmac_phy_write(bgmac, i, 0x17, 0x0010);
+			bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+			bgmac_phy_write(bgmac, i, 0x16, 0x5296);
+			bgmac_phy_write(bgmac, i, 0x17, 0x1073);
+			bgmac_phy_write(bgmac, i, 0x17, 0x9073);
+			bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
+			bgmac_phy_write(bgmac, i, 0x17, 0x9273);
+			bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+		}
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
+static void bgmac_phy_reset(struct bgmac *bgmac)
+{
+	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+		return;
+
+	bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
+	udelay(100);
+	if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
+		bgmac_err(bgmac, "PHY reset failed\n");
+	bgmac_phy_init(bgmac);
+}
+
+/**************************************************
+ * Chip ops
+ **************************************************/
+
+/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
+ * nothing to change? Try if after stabilizng driver.
+ */
+static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+				 bool force)
+{
+	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+	u32 new_val = (cmdcfg & mask) | set;
+
+	bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
+	udelay(2);
+
+	if (new_val != cmdcfg || force)
+		bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+
+	bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
+	udelay(2);
+}
+
+static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+{
+	u32 tmp;
+
+	tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+	bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+	tmp = (addr[4] << 8) | addr[5];
+	bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+}
+
+static void bgmac_set_rx_mode(struct net_device *net_dev)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+
+	if (net_dev->flags & IFF_PROMISC)
+		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+	else
+		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+}
+
+#if 0 /* We don't use that regs yet */
+static void bgmac_chip_stats_update(struct bgmac *bgmac)
+{
+	int i;
+
+	if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
+		for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+			bgmac->mib_tx_regs[i] =
+				bgmac_read(bgmac,
+					   BGMAC_TX_GOOD_OCTETS + (i * 4));
+		for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+			bgmac->mib_rx_regs[i] =
+				bgmac_read(bgmac,
+					   BGMAC_RX_GOOD_OCTETS + (i * 4));
+	}
+
+	/* TODO: what else? how to handle BCM4706? Specs are needed */
+}
+#endif
+
+static void bgmac_clear_mib(struct bgmac *bgmac)
+{
+	int i;
+
+	if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
+		return;
+
+	bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
+	for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+		bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
+	for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+		bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
+static void bgmac_mac_speed(struct bgmac *bgmac)
+{
+	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+	u32 set = 0;
+
+	switch (bgmac->mac_speed) {
+	case SPEED_10:
+		set |= BGMAC_CMDCFG_ES_10;
+		break;
+	case SPEED_100:
+		set |= BGMAC_CMDCFG_ES_100;
+		break;
+	case SPEED_1000:
+		set |= BGMAC_CMDCFG_ES_1000;
+		break;
+	case SPEED_2500:
+		set |= BGMAC_CMDCFG_ES_2500;
+		break;
+	default:
+		bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+	}
+
+	if (bgmac->mac_duplex == DUPLEX_HALF)
+		set |= BGMAC_CMDCFG_HD;
+
+	bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+}
+
+static void bgmac_miiconfig(struct bgmac *bgmac)
+{
+	struct bcma_device *core = bgmac->core;
+	struct bcma_chipinfo *ci = &core->bus->chipinfo;
+	u8 imode;
+
+	if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+	    ci->id == BCMA_CHIP_ID_BCM53018) {
+		bcma_awrite32(core, BCMA_IOCTL,
+			      bcma_aread32(core, BCMA_IOCTL) | 0x40 |
+			      BGMAC_BCMA_IOCTL_SW_CLKEN);
+		bgmac->mac_speed = SPEED_2500;
+		bgmac->mac_duplex = DUPLEX_FULL;
+		bgmac_mac_speed(bgmac);
+	} else {
+		imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
+			BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
+		if (imode == 0 || imode == 1) {
+			bgmac->mac_speed = SPEED_100;
+			bgmac->mac_duplex = DUPLEX_FULL;
+			bgmac_mac_speed(bgmac);
+		}
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
+static void bgmac_chip_reset(struct bgmac *bgmac)
+{
+	struct bcma_device *core = bgmac->core;
+	struct bcma_bus *bus = core->bus;
+	struct bcma_chipinfo *ci = &bus->chipinfo;
+	u32 flags;
+	u32 iost;
+	int i;
+
+	if (bcma_core_is_enabled(core)) {
+		if (!bgmac->stats_grabbed) {
+			/* bgmac_chip_stats_update(bgmac); */
+			bgmac->stats_grabbed = true;
+		}
+
+		for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+			bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
+
+		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+		udelay(1);
+
+		for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+			bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
+
+		/* TODO: Clear software multicast filter list */
+	}
+
+	iost = bcma_aread32(core, BCMA_IOST);
+	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
+		iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+	/* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
+	if (ci->id != BCMA_CHIP_ID_BCM4707) {
+		flags = 0;
+		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+			if (!bgmac->has_robosw)
+				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+		}
+		bcma_core_enable(core, flags);
+	}
+
+	/* Request Misc PLL for corerev > 2 */
+	if (core->id.rev > 2 &&
+	    ci->id != BCMA_CHIP_ID_BCM4707 &&
+	    ci->id != BCMA_CHIP_ID_BCM53018) {
+		bgmac_set(bgmac, BCMA_CLKCTLST,
+			  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
+		bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+				 1000);
+	}
+
+	if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+	    ci->id == BCMA_CHIP_ID_BCM4749 ||
+	    ci->id == BCMA_CHIP_ID_BCM53572) {
+		struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+		u8 et_swtype = 0;
+		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
+			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
+		char buf[4];
+
+		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
+			if (kstrtou8(buf, 0, &et_swtype))
+				bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
+					  buf);
+			et_swtype &= 0x0f;
+			et_swtype <<= 4;
+			sw_type = et_swtype;
+		} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
+			sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
+		} else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+			   (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+			   (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
+			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
+				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
+		}
+		bcma_chipco_chipctl_maskset(cc, 1,
+					    ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
+					      BGMAC_CHIPCTL_1_SW_TYPE_MASK),
+					    sw_type);
+	}
+
+	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+		bcma_awrite32(core, BCMA_IOCTL,
+			      bcma_aread32(core, BCMA_IOCTL) &
+			      ~BGMAC_BCMA_IOCTL_SW_RESET);
+
+	/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
+	 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
+	 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+	 * be keps until taking MAC out of the reset.
+	 */
+	bgmac_cmdcfg_maskset(bgmac,
+			     ~(BGMAC_CMDCFG_TE |
+			       BGMAC_CMDCFG_RE |
+			       BGMAC_CMDCFG_RPI |
+			       BGMAC_CMDCFG_TAI |
+			       BGMAC_CMDCFG_HD |
+			       BGMAC_CMDCFG_ML |
+			       BGMAC_CMDCFG_CFE |
+			       BGMAC_CMDCFG_RL |
+			       BGMAC_CMDCFG_RED |
+			       BGMAC_CMDCFG_PE |
+			       BGMAC_CMDCFG_TPI |
+			       BGMAC_CMDCFG_PAD_EN |
+			       BGMAC_CMDCFG_PF),
+			     BGMAC_CMDCFG_PROM |
+			     BGMAC_CMDCFG_NLC |
+			     BGMAC_CMDCFG_CFE |
+			     BGMAC_CMDCFG_SR(core->id.rev),
+			     false);
+	bgmac->mac_speed = SPEED_UNKNOWN;
+	bgmac->mac_duplex = DUPLEX_UNKNOWN;
+
+	bgmac_clear_mib(bgmac);
+	if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
+		bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
+			       BCMA_GMAC_CMN_PC_MTE);
+	else
+		bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
+	bgmac_miiconfig(bgmac);
+	bgmac_phy_init(bgmac);
+
+	netdev_reset_queue(bgmac->net_dev);
+}
+
+static void bgmac_chip_intrs_on(struct bgmac *bgmac)
+{
+	bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
+}
+
+static void bgmac_chip_intrs_off(struct bgmac *bgmac)
+{
+	bgmac_write(bgmac, BGMAC_INT_MASK, 0);
+	bgmac_read(bgmac, BGMAC_INT_MASK);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
+static void bgmac_enable(struct bgmac *bgmac)
+{
+	struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+	u32 cmdcfg;
+	u32 mode;
+	u32 rxq_ctl;
+	u32 fl_ctl;
+	u16 bp_clk;
+	u8 mdp;
+
+	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
+			     BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
+	udelay(2);
+	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
+	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+
+	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+		BGMAC_DS_MM_SHIFT;
+	if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
+		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+	if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
+		bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
+					    BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
+
+	switch (ci->id) {
+	case BCMA_CHIP_ID_BCM5357:
+	case BCMA_CHIP_ID_BCM4749:
+	case BCMA_CHIP_ID_BCM53572:
+	case BCMA_CHIP_ID_BCM4716:
+	case BCMA_CHIP_ID_BCM47162:
+		fl_ctl = 0x03cb04cb;
+		if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+		    ci->id == BCMA_CHIP_ID_BCM4749 ||
+		    ci->id == BCMA_CHIP_ID_BCM53572)
+			fl_ctl = 0x2300e1;
+		bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
+		bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+		break;
+	}
+
+	if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+	    ci->id != BCMA_CHIP_ID_BCM53018) {
+		rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+		rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+		bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
+				1000000;
+		mdp = (bp_clk * 128 / 1000) - 3;
+		rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+		bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+	}
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
+static void bgmac_chip_init(struct bgmac *bgmac)
+{
+	/* 1 interrupt per received frame */
+	bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
+
+	/* Enable 802.3x tx flow control (honor received PAUSE frames) */
+	bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+
+	bgmac_set_rx_mode(bgmac->net_dev);
+
+	bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
+
+	if (bgmac->loopback)
+		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+	else
+		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+
+	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+
+	bgmac_chip_intrs_on(bgmac);
+
+	bgmac_enable(bgmac);
+}
+
+static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
+{
+	struct bgmac *bgmac = netdev_priv(dev_id);
+
+	u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
+	int_status &= bgmac->int_mask;
+
+	if (!int_status)
+		return IRQ_NONE;
+
+	int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
+	if (int_status)
+		bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
+
+	/* Disable new interrupts until handling existing ones */
+	bgmac_chip_intrs_off(bgmac);
+
+	napi_schedule(&bgmac->napi);
+
+	return IRQ_HANDLED;
+}
+
+static int bgmac_poll(struct napi_struct *napi, int weight)
+{
+	struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
+	int handled = 0;
+
+	/* Ack */
+	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
+
+	bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
+	handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
+
+	/* Poll again if more events arrived in the meantime */
+	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
+		return weight;
+
+	if (handled < weight) {
+		napi_complete(napi);
+		bgmac_chip_intrs_on(bgmac);
+	}
+
+	return handled;
+}
+
+/**************************************************
+ * net_device_ops
+ **************************************************/
+
+static int bgmac_open(struct net_device *net_dev)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+	int err = 0;
+
+	bgmac_chip_reset(bgmac);
+
+	err = bgmac_dma_init(bgmac);
+	if (err)
+		return err;
+
+	/* Specs say about reclaiming rings here, but we do that in DMA init */
+	bgmac_chip_init(bgmac);
+
+	err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
+			  KBUILD_MODNAME, net_dev);
+	if (err < 0) {
+		bgmac_err(bgmac, "IRQ request error: %d!\n", err);
+		bgmac_dma_cleanup(bgmac);
+		return err;
+	}
+	napi_enable(&bgmac->napi);
+
+	phy_start(bgmac->phy_dev);
+
+	netif_start_queue(net_dev);
+
+	return 0;
+}
+
+static int bgmac_stop(struct net_device *net_dev)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+
+	netif_carrier_off(net_dev);
+
+	phy_stop(bgmac->phy_dev);
+
+	napi_disable(&bgmac->napi);
+	bgmac_chip_intrs_off(bgmac);
+	free_irq(bgmac->core->irq, net_dev);
+
+	bgmac_chip_reset(bgmac);
+	bgmac_dma_cleanup(bgmac);
+
+	return 0;
+}
+
+static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
+				    struct net_device *net_dev)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+	struct bgmac_dma_ring *ring;
+
+	/* No QOS support yet */
+	ring = &bgmac->tx_ring[0];
+	return bgmac_dma_tx_add(bgmac, ring, skb);
+}
+
+static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+	int ret;
+
+	ret = eth_prepare_mac_addr_change(net_dev, addr);
+	if (ret < 0)
+		return ret;
+	bgmac_write_mac_address(bgmac, (u8 *)addr);
+	eth_commit_mac_addr_change(net_dev, addr);
+	return 0;
+}
+
+static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+
+	if (!netif_running(net_dev))
+		return -EINVAL;
+
+	return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
+}
+
+static const struct net_device_ops bgmac_netdev_ops = {
+	.ndo_open		= bgmac_open,
+	.ndo_stop		= bgmac_stop,
+	.ndo_start_xmit		= bgmac_start_xmit,
+	.ndo_set_rx_mode	= bgmac_set_rx_mode,
+	.ndo_set_mac_address	= bgmac_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl           = bgmac_ioctl,
+};
+
+/**************************************************
+ * ethtool_ops
+ **************************************************/
+
+static int bgmac_get_settings(struct net_device *net_dev,
+			      struct ethtool_cmd *cmd)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+
+	return phy_ethtool_gset(bgmac->phy_dev, cmd);
+}
+
+static int bgmac_set_settings(struct net_device *net_dev,
+			      struct ethtool_cmd *cmd)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+
+	return phy_ethtool_sset(bgmac->phy_dev, cmd);
+}
+
+static void bgmac_get_drvinfo(struct net_device *net_dev,
+			      struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bgmac_ethtool_ops = {
+	.get_settings		= bgmac_get_settings,
+	.set_settings		= bgmac_set_settings,
+	.get_drvinfo		= bgmac_get_drvinfo,
+};
+
+/**************************************************
+ * MII
+ **************************************************/
+
+static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	return bgmac_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+			   u16 value)
+{
+	return bgmac_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+static void bgmac_adjust_link(struct net_device *net_dev)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+	struct phy_device *phy_dev = bgmac->phy_dev;
+	bool update = false;
+
+	if (phy_dev->link) {
+		if (phy_dev->speed != bgmac->mac_speed) {
+			bgmac->mac_speed = phy_dev->speed;
+			update = true;
+		}
+
+		if (phy_dev->duplex != bgmac->mac_duplex) {
+			bgmac->mac_duplex = phy_dev->duplex;
+			update = true;
+		}
+	}
+
+	if (update) {
+		bgmac_mac_speed(bgmac);
+		phy_print_status(phy_dev);
+	}
+}
+
+static int bgmac_fixed_phy_register(struct bgmac *bgmac)
+{
+	struct fixed_phy_status fphy_status = {
+		.link = 1,
+		.speed = SPEED_1000,
+		.duplex = DUPLEX_FULL,
+	};
+	struct phy_device *phy_dev;
+	int err;
+
+	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
+	if (!phy_dev || IS_ERR(phy_dev)) {
+		bgmac_err(bgmac, "Failed to register fixed PHY device\n");
+		return -ENODEV;
+	}
+
+	err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
+				 PHY_INTERFACE_MODE_MII);
+	if (err) {
+		bgmac_err(bgmac, "Connecting PHY failed\n");
+		return err;
+	}
+
+	bgmac->phy_dev = phy_dev;
+
+	return err;
+}
+
+static int bgmac_mii_register(struct bgmac *bgmac)
+{
+	struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+	struct mii_bus *mii_bus;
+	struct phy_device *phy_dev;
+	char bus_id[MII_BUS_ID_SIZE + 3];
+	int i, err = 0;
+
+	if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+	    ci->id == BCMA_CHIP_ID_BCM53018)
+		return bgmac_fixed_phy_register(bgmac);
+
+	mii_bus = mdiobus_alloc();
+	if (!mii_bus)
+		return -ENOMEM;
+
+	mii_bus->name = "bgmac mii bus";
+	sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
+		bgmac->core->core_unit);
+	mii_bus->priv = bgmac;
+	mii_bus->read = bgmac_mii_read;
+	mii_bus->write = bgmac_mii_write;
+	mii_bus->parent = &bgmac->core->dev;
+	mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
+
+	mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+	if (!mii_bus->irq) {
+		err = -ENOMEM;
+		goto err_free_bus;
+	}
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		mii_bus->irq[i] = PHY_POLL;
+
+	err = mdiobus_register(mii_bus);
+	if (err) {
+		bgmac_err(bgmac, "Registration of mii bus failed\n");
+		goto err_free_irq;
+	}
+
+	bgmac->mii_bus = mii_bus;
+
+	/* Connect to the PHY */
+	snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+		 bgmac->phyaddr);
+	phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
+			      PHY_INTERFACE_MODE_MII);
+	if (IS_ERR(phy_dev)) {
+		bgmac_err(bgmac, "PHY connecton failed\n");
+		err = PTR_ERR(phy_dev);
+		goto err_unregister_bus;
+	}
+	bgmac->phy_dev = phy_dev;
+
+	return err;
+
+err_unregister_bus:
+	mdiobus_unregister(mii_bus);
+err_free_irq:
+	kfree(mii_bus->irq);
+err_free_bus:
+	mdiobus_free(mii_bus);
+	return err;
+}
+
+static void bgmac_mii_unregister(struct bgmac *bgmac)
+{
+	struct mii_bus *mii_bus = bgmac->mii_bus;
+
+	mdiobus_unregister(mii_bus);
+	kfree(mii_bus->irq);
+	mdiobus_free(mii_bus);
+}
+
+/**************************************************
+ * BCMA bus ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
+static int bgmac_probe(struct bcma_device *core)
+{
+	struct bcma_chipinfo *ci = &core->bus->chipinfo;
+	struct net_device *net_dev;
+	struct bgmac *bgmac;
+	struct ssb_sprom *sprom = &core->bus->sprom;
+	u8 *mac;
+	int err;
+
+	switch (core->core_unit) {
+	case 0:
+		mac = sprom->et0mac;
+		break;
+	case 1:
+		mac = sprom->et1mac;
+		break;
+	case 2:
+		mac = sprom->et2mac;
+		break;
+	default:
+		pr_err("Unsupported core_unit %d\n", core->core_unit);
+		return -ENOTSUPP;
+	}
+
+	if (!is_valid_ether_addr(mac)) {
+		dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
+		eth_random_addr(mac);
+		dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+	}
+
+	/* This (reset &) enable is not preset in specs or reference driver but
+	 * Broadcom does it in arch PCI code when enabling fake PCI device.
+	 */
+	bcma_core_enable(core, 0);
+
+	/* Allocation and references */
+	net_dev = alloc_etherdev(sizeof(*bgmac));
+	if (!net_dev)
+		return -ENOMEM;
+	net_dev->netdev_ops = &bgmac_netdev_ops;
+	net_dev->irq = core->irq;
+	net_dev->ethtool_ops = &bgmac_ethtool_ops;
+	bgmac = netdev_priv(net_dev);
+	bgmac->net_dev = net_dev;
+	bgmac->core = core;
+	bcma_set_drvdata(core, bgmac);
+
+	/* Defaults */
+	memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
+
+	/* On BCM4706 we need common core to access PHY */
+	if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+	    !core->bus->drv_gmac_cmn.core) {
+		bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
+		err = -ENODEV;
+		goto err_netdev_free;
+	}
+	bgmac->cmn = core->bus->drv_gmac_cmn.core;
+
+	switch (core->core_unit) {
+	case 0:
+		bgmac->phyaddr = sprom->et0phyaddr;
+		break;
+	case 1:
+		bgmac->phyaddr = sprom->et1phyaddr;
+		break;
+	case 2:
+		bgmac->phyaddr = sprom->et2phyaddr;
+		break;
+	}
+	bgmac->phyaddr &= BGMAC_PHY_MASK;
+	if (bgmac->phyaddr == BGMAC_PHY_MASK) {
+		bgmac_err(bgmac, "No PHY found\n");
+		err = -ENODEV;
+		goto err_netdev_free;
+	}
+	bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
+		   bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+
+	if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+		bgmac_err(bgmac, "PCI setup not implemented\n");
+		err = -ENOTSUPP;
+		goto err_netdev_free;
+	}
+
+	bgmac_chip_reset(bgmac);
+
+	/* For Northstar, we have to take all GMAC core out of reset */
+	if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+	    ci->id == BCMA_CHIP_ID_BCM53018) {
+		struct bcma_device *ns_core;
+		int ns_gmac;
+
+		/* Northstar has 4 GMAC cores */
+		for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
+			/* As Northstar requirement, we have to reset all GMACs
+			 * before accessing one. bgmac_chip_reset() call
+			 * bcma_core_enable() for this core. Then the other
+			 * three GMACs didn't reset.  We do it here.
+			 */
+			ns_core = bcma_find_core_unit(core->bus,
+						      BCMA_CORE_MAC_GBIT,
+						      ns_gmac);
+			if (ns_core && !bcma_core_is_enabled(ns_core))
+				bcma_core_enable(ns_core, 0);
+		}
+	}
+
+	err = bgmac_dma_alloc(bgmac);
+	if (err) {
+		bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
+		goto err_netdev_free;
+	}
+
+	bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
+	if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
+		bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
+
+	/* TODO: reset the external phy. Specs are needed */
+	bgmac_phy_reset(bgmac);
+
+	bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
+			       BGMAC_BFL_ENETROBO);
+	if (bgmac->has_robosw)
+		bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
+
+	if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+		bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+
+	netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
+	err = bgmac_mii_register(bgmac);
+	if (err) {
+		bgmac_err(bgmac, "Cannot register MDIO\n");
+		goto err_dma_free;
+	}
+
+	net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	net_dev->hw_features = net_dev->features;
+	net_dev->vlan_features = net_dev->features;
+
+	err = register_netdev(bgmac->net_dev);
+	if (err) {
+		bgmac_err(bgmac, "Cannot register net device\n");
+		goto err_mii_unregister;
+	}
+
+	netif_carrier_off(net_dev);
+
+	return 0;
+
+err_mii_unregister:
+	bgmac_mii_unregister(bgmac);
+err_dma_free:
+	bgmac_dma_free(bgmac);
+
+err_netdev_free:
+	bcma_set_drvdata(core, NULL);
+	free_netdev(net_dev);
+
+	return err;
+}
+
+static void bgmac_remove(struct bcma_device *core)
+{
+	struct bgmac *bgmac = bcma_get_drvdata(core);
+
+	unregister_netdev(bgmac->net_dev);
+	bgmac_mii_unregister(bgmac);
+	netif_napi_del(&bgmac->napi);
+	bgmac_dma_free(bgmac);
+	bcma_set_drvdata(core, NULL);
+	free_netdev(bgmac->net_dev);
+}
+
+static struct bcma_driver bgmac_bcma_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= bgmac_bcma_tbl,
+	.probe		= bgmac_probe,
+	.remove		= bgmac_remove,
+};
+
+static int __init bgmac_init(void)
+{
+	int err;
+
+	err = bcma_driver_register(&bgmac_bcma_driver);
+	if (err)
+		return err;
+	pr_info("Broadcom 47xx GBit MAC driver loaded\n");
+
+	return 0;
+}
+
+static void __exit bgmac_exit(void)
+{
+	bcma_driver_unregister(&bgmac_bcma_driver);
+}
+
+module_init(bgmac_init)
+module_exit(bgmac_exit)
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
new file mode 100644
index 0000000..4fbb093
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -0,0 +1,494 @@
+#ifndef _BGMAC_H
+#define _BGMAC_H
+
+#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt
+
+#define bgmac_err(bgmac, fmt, ...) \
+	dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_warn(bgmac, fmt, ...) \
+	dev_warn(&(bgmac)->core->dev, fmt,  ##__VA_ARGS__)
+#define bgmac_info(bgmac, fmt, ...) \
+	dev_info(&(bgmac)->core->dev, fmt,  ##__VA_ARGS__)
+#define bgmac_dbg(bgmac, fmt, ...) \
+	dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+
+#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
+#include <linux/netdevice.h>
+
+#define BGMAC_DEV_CTL				0x000
+#define  BGMAC_DC_TSM				0x00000002
+#define  BGMAC_DC_CFCO				0x00000004
+#define  BGMAC_DC_RLSS				0x00000008
+#define  BGMAC_DC_MROR				0x00000010
+#define  BGMAC_DC_FCM_MASK			0x00000060
+#define  BGMAC_DC_FCM_SHIFT			5
+#define  BGMAC_DC_NAE				0x00000080
+#define  BGMAC_DC_TF				0x00000100
+#define  BGMAC_DC_RDS_MASK			0x00030000
+#define  BGMAC_DC_RDS_SHIFT			16
+#define  BGMAC_DC_TDS_MASK			0x000c0000
+#define  BGMAC_DC_TDS_SHIFT			18
+#define BGMAC_DEV_STATUS			0x004		/* Configuration of the interface */
+#define  BGMAC_DS_RBF				0x00000001
+#define  BGMAC_DS_RDF				0x00000002
+#define  BGMAC_DS_RIF				0x00000004
+#define  BGMAC_DS_TBF				0x00000008
+#define  BGMAC_DS_TDF				0x00000010
+#define  BGMAC_DS_TIF				0x00000020
+#define  BGMAC_DS_PO				0x00000040
+#define  BGMAC_DS_MM_MASK			0x00000300	/* Mode of the interface */
+#define  BGMAC_DS_MM_SHIFT			8
+#define BGMAC_BIST_STATUS			0x00c
+#define BGMAC_INT_STATUS			0x020		/* Interrupt status */
+#define  BGMAC_IS_MRO				0x00000001
+#define  BGMAC_IS_MTO				0x00000002
+#define  BGMAC_IS_TFD				0x00000004
+#define  BGMAC_IS_LS				0x00000008
+#define  BGMAC_IS_MDIO				0x00000010
+#define  BGMAC_IS_MR				0x00000020
+#define  BGMAC_IS_MT				0x00000040
+#define  BGMAC_IS_TO				0x00000080
+#define  BGMAC_IS_DESC_ERR			0x00000400	/* Descriptor error */
+#define  BGMAC_IS_DATA_ERR			0x00000800	/* Data error */
+#define  BGMAC_IS_DESC_PROT_ERR			0x00001000	/* Descriptor protocol error */
+#define  BGMAC_IS_RX_DESC_UNDERF		0x00002000	/* Receive descriptor underflow */
+#define  BGMAC_IS_RX_F_OVERF			0x00004000	/* Receive FIFO overflow */
+#define  BGMAC_IS_TX_F_UNDERF			0x00008000	/* Transmit FIFO underflow */
+#define  BGMAC_IS_RX				0x00010000	/* Interrupt for RX queue 0 */
+#define  BGMAC_IS_TX0				0x01000000	/* Interrupt for TX queue 0 */
+#define  BGMAC_IS_TX1				0x02000000	/* Interrupt for TX queue 1 */
+#define  BGMAC_IS_TX2				0x04000000	/* Interrupt for TX queue 2 */
+#define  BGMAC_IS_TX3				0x08000000	/* Interrupt for TX queue 3 */
+#define  BGMAC_IS_TX_MASK			0x0f000000
+#define  BGMAC_IS_INTMASK			0x0f01fcff
+#define  BGMAC_IS_ERRMASK			0x0000fc00
+#define BGMAC_INT_MASK				0x024		/* Interrupt mask */
+#define BGMAC_GP_TIMER				0x028
+#define BGMAC_INT_RECV_LAZY			0x100
+#define  BGMAC_IRL_TO_MASK			0x00ffffff
+#define  BGMAC_IRL_FC_MASK			0xff000000
+#define  BGMAC_IRL_FC_SHIFT			24		/* Shift the number of interrupts triggered per received frame */
+#define BGMAC_FLOW_CTL_THRESH			0x104		/* Flow control thresholds */
+#define BGMAC_WRRTHRESH				0x108
+#define BGMAC_GMAC_IDLE_CNT_THRESH		0x10c
+#define BGMAC_PHY_ACCESS			0x180		/* PHY access address */
+#define  BGMAC_PA_DATA_MASK			0x0000ffff
+#define  BGMAC_PA_ADDR_MASK			0x001f0000
+#define  BGMAC_PA_ADDR_SHIFT			16
+#define  BGMAC_PA_REG_MASK			0x1f000000
+#define  BGMAC_PA_REG_SHIFT			24
+#define  BGMAC_PA_WRITE				0x20000000
+#define  BGMAC_PA_START				0x40000000
+#define BGMAC_PHY_CNTL				0x188		/* PHY control address */
+#define  BGMAC_PC_EPA_MASK			0x0000001f
+#define  BGMAC_PC_MCT_MASK			0x007f0000
+#define  BGMAC_PC_MCT_SHIFT			16
+#define  BGMAC_PC_MTE				0x00800000
+#define BGMAC_TXQ_CTL				0x18c
+#define  BGMAC_TXQ_CTL_DBT_MASK			0x00000fff
+#define  BGMAC_TXQ_CTL_DBT_SHIFT		0
+#define BGMAC_RXQ_CTL				0x190
+#define  BGMAC_RXQ_CTL_DBT_MASK			0x00000fff
+#define  BGMAC_RXQ_CTL_DBT_SHIFT		0
+#define  BGMAC_RXQ_CTL_PTE			0x00001000
+#define  BGMAC_RXQ_CTL_MDP_MASK			0x3f000000
+#define  BGMAC_RXQ_CTL_MDP_SHIFT		24
+#define BGMAC_GPIO_SELECT			0x194
+#define BGMAC_GPIO_OUTPUT_EN			0x198
+
+/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
+#define  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ	0x00000100
+#define  BGMAC_BCMA_CLKCTLST_MISC_PLL_ST	0x01000000
+
+#define BGMAC_HW_WAR				0x1e4
+#define BGMAC_PWR_CTL				0x1e8
+#define BGMAC_DMA_BASE0				0x200		/* Tx and Rx controller */
+#define BGMAC_DMA_BASE1				0x240		/* Tx controller only */
+#define BGMAC_DMA_BASE2				0x280		/* Tx controller only */
+#define BGMAC_DMA_BASE3				0x2C0		/* Tx controller only */
+#define BGMAC_TX_GOOD_OCTETS			0x300
+#define BGMAC_TX_GOOD_OCTETS_HIGH		0x304
+#define BGMAC_TX_GOOD_PKTS			0x308
+#define BGMAC_TX_OCTETS				0x30c
+#define BGMAC_TX_OCTETS_HIGH			0x310
+#define BGMAC_TX_PKTS				0x314
+#define BGMAC_TX_BROADCAST_PKTS			0x318
+#define BGMAC_TX_MULTICAST_PKTS			0x31c
+#define BGMAC_TX_LEN_64				0x320
+#define BGMAC_TX_LEN_65_TO_127			0x324
+#define BGMAC_TX_LEN_128_TO_255			0x328
+#define BGMAC_TX_LEN_256_TO_511			0x32c
+#define BGMAC_TX_LEN_512_TO_1023		0x330
+#define BGMAC_TX_LEN_1024_TO_1522		0x334
+#define BGMAC_TX_LEN_1523_TO_2047		0x338
+#define BGMAC_TX_LEN_2048_TO_4095		0x33c
+#define BGMAC_TX_LEN_4095_TO_8191		0x340
+#define BGMAC_TX_LEN_8192_TO_MAX		0x344
+#define BGMAC_TX_JABBER_PKTS			0x348		/* Error */
+#define BGMAC_TX_OVERSIZE_PKTS			0x34c		/* Error */
+#define BGMAC_TX_FRAGMENT_PKTS			0x350
+#define BGMAC_TX_UNDERRUNS			0x354		/* Error */
+#define BGMAC_TX_TOTAL_COLS			0x358
+#define BGMAC_TX_SINGLE_COLS			0x35c
+#define BGMAC_TX_MULTIPLE_COLS			0x360
+#define BGMAC_TX_EXCESSIVE_COLS			0x364		/* Error */
+#define BGMAC_TX_LATE_COLS			0x368		/* Error */
+#define BGMAC_TX_DEFERED			0x36c
+#define BGMAC_TX_CARRIER_LOST			0x370
+#define BGMAC_TX_PAUSE_PKTS			0x374
+#define BGMAC_TX_UNI_PKTS			0x378
+#define BGMAC_TX_Q0_PKTS			0x37c
+#define BGMAC_TX_Q0_OCTETS			0x380
+#define BGMAC_TX_Q0_OCTETS_HIGH			0x384
+#define BGMAC_TX_Q1_PKTS			0x388
+#define BGMAC_TX_Q1_OCTETS			0x38c
+#define BGMAC_TX_Q1_OCTETS_HIGH			0x390
+#define BGMAC_TX_Q2_PKTS			0x394
+#define BGMAC_TX_Q2_OCTETS			0x398
+#define BGMAC_TX_Q2_OCTETS_HIGH			0x39c
+#define BGMAC_TX_Q3_PKTS			0x3a0
+#define BGMAC_TX_Q3_OCTETS			0x3a4
+#define BGMAC_TX_Q3_OCTETS_HIGH			0x3a8
+#define BGMAC_RX_GOOD_OCTETS			0x3b0
+#define BGMAC_RX_GOOD_OCTETS_HIGH		0x3b4
+#define BGMAC_RX_GOOD_PKTS			0x3b8
+#define BGMAC_RX_OCTETS				0x3bc
+#define BGMAC_RX_OCTETS_HIGH			0x3c0
+#define BGMAC_RX_PKTS				0x3c4
+#define BGMAC_RX_BROADCAST_PKTS			0x3c8
+#define BGMAC_RX_MULTICAST_PKTS			0x3cc
+#define BGMAC_RX_LEN_64				0x3d0
+#define BGMAC_RX_LEN_65_TO_127			0x3d4
+#define BGMAC_RX_LEN_128_TO_255			0x3d8
+#define BGMAC_RX_LEN_256_TO_511			0x3dc
+#define BGMAC_RX_LEN_512_TO_1023		0x3e0
+#define BGMAC_RX_LEN_1024_TO_1522		0x3e4
+#define BGMAC_RX_LEN_1523_TO_2047		0x3e8
+#define BGMAC_RX_LEN_2048_TO_4095		0x3ec
+#define BGMAC_RX_LEN_4095_TO_8191		0x3f0
+#define BGMAC_RX_LEN_8192_TO_MAX		0x3f4
+#define BGMAC_RX_JABBER_PKTS			0x3f8		/* Error */
+#define BGMAC_RX_OVERSIZE_PKTS			0x3fc		/* Error */
+#define BGMAC_RX_FRAGMENT_PKTS			0x400
+#define BGMAC_RX_MISSED_PKTS			0x404		/* Error */
+#define BGMAC_RX_CRC_ALIGN_ERRS			0x408		/* Error */
+#define BGMAC_RX_UNDERSIZE			0x40c		/* Error */
+#define BGMAC_RX_CRC_ERRS			0x410		/* Error */
+#define BGMAC_RX_ALIGN_ERRS			0x414		/* Error */
+#define BGMAC_RX_SYMBOL_ERRS			0x418		/* Error */
+#define BGMAC_RX_PAUSE_PKTS			0x41c
+#define BGMAC_RX_NONPAUSE_PKTS			0x420
+#define BGMAC_RX_SACHANGES			0x424
+#define BGMAC_RX_UNI_PKTS			0x428
+#define BGMAC_UNIMAC_VERSION			0x800
+#define BGMAC_HDBKP_CTL				0x804
+#define BGMAC_CMDCFG				0x808		/* Configuration */
+#define  BGMAC_CMDCFG_TE			0x00000001	/* Set to activate TX */
+#define  BGMAC_CMDCFG_RE			0x00000002	/* Set to activate RX */
+#define  BGMAC_CMDCFG_ES_MASK			0x0000000c	/* Ethernet speed see gmac_speed */
+#define   BGMAC_CMDCFG_ES_10			0x00000000
+#define   BGMAC_CMDCFG_ES_100			0x00000004
+#define   BGMAC_CMDCFG_ES_1000			0x00000008
+#define   BGMAC_CMDCFG_ES_2500			0x0000000C
+#define  BGMAC_CMDCFG_PROM			0x00000010	/* Set to activate promiscuous mode */
+#define  BGMAC_CMDCFG_PAD_EN			0x00000020
+#define  BGMAC_CMDCFG_CF			0x00000040
+#define  BGMAC_CMDCFG_PF			0x00000080
+#define  BGMAC_CMDCFG_RPI			0x00000100	/* Unset to enable 802.3x tx flow control */
+#define  BGMAC_CMDCFG_TAI			0x00000200
+#define  BGMAC_CMDCFG_HD			0x00000400	/* Set if in half duplex mode */
+#define  BGMAC_CMDCFG_HD_SHIFT			10
+#define  BGMAC_CMDCFG_SR_REV0			0x00000800	/* Set to reset mode, for other revs */
+#define  BGMAC_CMDCFG_SR_REV4			0x00002000	/* Set to reset mode, only for core rev 4 */
+#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define  BGMAC_CMDCFG_ML			0x00008000	/* Set to activate mac loopback mode */
+#define  BGMAC_CMDCFG_AE			0x00400000
+#define  BGMAC_CMDCFG_CFE			0x00800000
+#define  BGMAC_CMDCFG_NLC			0x01000000
+#define  BGMAC_CMDCFG_RL			0x02000000
+#define  BGMAC_CMDCFG_RED			0x04000000
+#define  BGMAC_CMDCFG_PE			0x08000000
+#define  BGMAC_CMDCFG_TPI			0x10000000
+#define  BGMAC_CMDCFG_AT			0x20000000
+#define BGMAC_MACADDR_HIGH			0x80c		/* High 4 octets of own mac address */
+#define BGMAC_MACADDR_LOW			0x810		/* Low 2 octets of own mac address */
+#define BGMAC_RXMAX_LENGTH			0x814		/* Max receive frame length with vlan tag */
+#define BGMAC_PAUSEQUANTA			0x818
+#define BGMAC_MAC_MODE				0x844
+#define BGMAC_OUTERTAG				0x848
+#define BGMAC_INNERTAG				0x84c
+#define BGMAC_TXIPG				0x85c
+#define BGMAC_PAUSE_CTL				0xb30
+#define BGMAC_TX_FLUSH				0xb34
+#define BGMAC_RX_STATUS				0xb38
+#define BGMAC_TX_STATUS				0xb3c
+
+/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
+#define BGMAC_BCMA_IOCTL_SW_CLKEN		0x00000004	/* PHY Clock Enable */
+#define BGMAC_BCMA_IOCTL_SW_RESET		0x00000008	/* PHY Reset */
+
+/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
+#define BGMAC_BCMA_IOST_ATTACHED		0x00000800
+
+#define BGMAC_NUM_MIB_TX_REGS	\
+		(((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1)
+#define BGMAC_NUM_MIB_RX_REGS	\
+		(((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1)
+
+#define BGMAC_DMA_TX_CTL			0x00
+#define  BGMAC_DMA_TX_ENABLE			0x00000001
+#define  BGMAC_DMA_TX_SUSPEND			0x00000002
+#define  BGMAC_DMA_TX_LOOPBACK			0x00000004
+#define  BGMAC_DMA_TX_FLUSH			0x00000010
+#define  BGMAC_DMA_TX_MR_MASK			0x000000C0	/* Multiple outstanding reads */
+#define  BGMAC_DMA_TX_MR_SHIFT			6
+#define   BGMAC_DMA_TX_MR_1			0
+#define   BGMAC_DMA_TX_MR_2			1
+#define  BGMAC_DMA_TX_PARITY_DISABLE		0x00000800
+#define  BGMAC_DMA_TX_ADDREXT_MASK		0x00030000
+#define  BGMAC_DMA_TX_ADDREXT_SHIFT		16
+#define  BGMAC_DMA_TX_BL_MASK			0x001C0000	/* BurstLen bits */
+#define  BGMAC_DMA_TX_BL_SHIFT			18
+#define   BGMAC_DMA_TX_BL_16			0
+#define   BGMAC_DMA_TX_BL_32			1
+#define   BGMAC_DMA_TX_BL_64			2
+#define   BGMAC_DMA_TX_BL_128			3
+#define   BGMAC_DMA_TX_BL_256			4
+#define   BGMAC_DMA_TX_BL_512			5
+#define   BGMAC_DMA_TX_BL_1024			6
+#define  BGMAC_DMA_TX_PC_MASK			0x00E00000	/* Prefetch control */
+#define  BGMAC_DMA_TX_PC_SHIFT			21
+#define   BGMAC_DMA_TX_PC_0			0
+#define   BGMAC_DMA_TX_PC_4			1
+#define   BGMAC_DMA_TX_PC_8			2
+#define   BGMAC_DMA_TX_PC_16			3
+#define  BGMAC_DMA_TX_PT_MASK			0x03000000	/* Prefetch threshold */
+#define  BGMAC_DMA_TX_PT_SHIFT			24
+#define   BGMAC_DMA_TX_PT_1			0
+#define   BGMAC_DMA_TX_PT_2			1
+#define   BGMAC_DMA_TX_PT_4			2
+#define   BGMAC_DMA_TX_PT_8			3
+#define BGMAC_DMA_TX_INDEX			0x04
+#define BGMAC_DMA_TX_RINGLO			0x08
+#define BGMAC_DMA_TX_RINGHI			0x0C
+#define BGMAC_DMA_TX_STATUS			0x10
+#define  BGMAC_DMA_TX_STATDPTR			0x00001FFF
+#define  BGMAC_DMA_TX_STAT			0xF0000000
+#define   BGMAC_DMA_TX_STAT_DISABLED		0x00000000
+#define   BGMAC_DMA_TX_STAT_ACTIVE		0x10000000
+#define   BGMAC_DMA_TX_STAT_IDLEWAIT		0x20000000
+#define   BGMAC_DMA_TX_STAT_STOPPED		0x30000000
+#define   BGMAC_DMA_TX_STAT_SUSP		0x40000000
+#define BGMAC_DMA_TX_ERROR			0x14
+#define  BGMAC_DMA_TX_ERRDPTR			0x0001FFFF
+#define  BGMAC_DMA_TX_ERR			0xF0000000
+#define   BGMAC_DMA_TX_ERR_NOERR		0x00000000
+#define   BGMAC_DMA_TX_ERR_PROT			0x10000000
+#define   BGMAC_DMA_TX_ERR_UNDERRUN		0x20000000
+#define   BGMAC_DMA_TX_ERR_TRANSFER		0x30000000
+#define   BGMAC_DMA_TX_ERR_DESCREAD		0x40000000
+#define   BGMAC_DMA_TX_ERR_CORE			0x50000000
+#define BGMAC_DMA_RX_CTL			0x20
+#define  BGMAC_DMA_RX_ENABLE			0x00000001
+#define  BGMAC_DMA_RX_FRAME_OFFSET_MASK		0x000000FE
+#define  BGMAC_DMA_RX_FRAME_OFFSET_SHIFT	1
+#define  BGMAC_DMA_RX_DIRECT_FIFO		0x00000100
+#define  BGMAC_DMA_RX_OVERFLOW_CONT		0x00000400
+#define  BGMAC_DMA_RX_PARITY_DISABLE		0x00000800
+#define  BGMAC_DMA_RX_MR_MASK			0x000000C0	/* Multiple outstanding reads */
+#define  BGMAC_DMA_RX_MR_SHIFT			6
+#define   BGMAC_DMA_TX_MR_1			0
+#define   BGMAC_DMA_TX_MR_2			1
+#define  BGMAC_DMA_RX_ADDREXT_MASK		0x00030000
+#define  BGMAC_DMA_RX_ADDREXT_SHIFT		16
+#define  BGMAC_DMA_RX_BL_MASK			0x001C0000	/* BurstLen bits */
+#define  BGMAC_DMA_RX_BL_SHIFT			18
+#define   BGMAC_DMA_RX_BL_16			0
+#define   BGMAC_DMA_RX_BL_32			1
+#define   BGMAC_DMA_RX_BL_64			2
+#define   BGMAC_DMA_RX_BL_128			3
+#define   BGMAC_DMA_RX_BL_256			4
+#define   BGMAC_DMA_RX_BL_512			5
+#define   BGMAC_DMA_RX_BL_1024			6
+#define  BGMAC_DMA_RX_PC_MASK			0x00E00000	/* Prefetch control */
+#define  BGMAC_DMA_RX_PC_SHIFT			21
+#define   BGMAC_DMA_RX_PC_0			0
+#define   BGMAC_DMA_RX_PC_4			1
+#define   BGMAC_DMA_RX_PC_8			2
+#define   BGMAC_DMA_RX_PC_16			3
+#define  BGMAC_DMA_RX_PT_MASK			0x03000000	/* Prefetch threshold */
+#define  BGMAC_DMA_RX_PT_SHIFT			24
+#define   BGMAC_DMA_RX_PT_1			0
+#define   BGMAC_DMA_RX_PT_2			1
+#define   BGMAC_DMA_RX_PT_4			2
+#define   BGMAC_DMA_RX_PT_8			3
+#define BGMAC_DMA_RX_INDEX			0x24
+#define BGMAC_DMA_RX_RINGLO			0x28
+#define BGMAC_DMA_RX_RINGHI			0x2C
+#define BGMAC_DMA_RX_STATUS			0x30
+#define  BGMAC_DMA_RX_STATDPTR			0x00001FFF
+#define  BGMAC_DMA_RX_STAT			0xF0000000
+#define   BGMAC_DMA_RX_STAT_DISABLED		0x00000000
+#define   BGMAC_DMA_RX_STAT_ACTIVE		0x10000000
+#define   BGMAC_DMA_RX_STAT_IDLEWAIT		0x20000000
+#define   BGMAC_DMA_RX_STAT_STOPPED		0x30000000
+#define   BGMAC_DMA_RX_STAT_SUSP		0x40000000
+#define BGMAC_DMA_RX_ERROR			0x34
+#define  BGMAC_DMA_RX_ERRDPTR			0x0001FFFF
+#define  BGMAC_DMA_RX_ERR			0xF0000000
+#define   BGMAC_DMA_RX_ERR_NOERR		0x00000000
+#define   BGMAC_DMA_RX_ERR_PROT			0x10000000
+#define   BGMAC_DMA_RX_ERR_UNDERRUN		0x20000000
+#define   BGMAC_DMA_RX_ERR_TRANSFER		0x30000000
+#define   BGMAC_DMA_RX_ERR_DESCREAD		0x40000000
+#define   BGMAC_DMA_RX_ERR_CORE			0x50000000
+
+#define BGMAC_DESC_CTL0_EOT			0x10000000	/* End of ring */
+#define BGMAC_DESC_CTL0_IOC			0x20000000	/* IRQ on complete */
+#define BGMAC_DESC_CTL0_EOF			0x40000000	/* End of frame */
+#define BGMAC_DESC_CTL0_SOF			0x80000000	/* Start of frame */
+#define BGMAC_DESC_CTL1_LEN			0x00001FFF
+
+#define BGMAC_PHY_NOREGS			BRCM_PSEUDO_PHY_ADDR
+#define BGMAC_PHY_MASK				0x1F
+
+#define BGMAC_MAX_TX_RINGS			4
+#define BGMAC_MAX_RX_RINGS			1
+
+#define BGMAC_TX_RING_SLOTS			128
+#define BGMAC_RX_RING_SLOTS			512
+
+#define BGMAC_RX_HEADER_LEN			28		/* Last 24 bytes are unused. Well... */
+#define BGMAC_RX_FRAME_OFFSET			30		/* There are 2 unused bytes between header and real data */
+#define BGMAC_RX_BUF_OFFSET			(NET_SKB_PAD + NET_IP_ALIGN - \
+						 BGMAC_RX_FRAME_OFFSET)
+#define BGMAC_RX_MAX_FRAME_SIZE			1536		/* Copied from b44/tg3 */
+#define BGMAC_RX_BUF_SIZE			(BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+#define BGMAC_RX_ALLOC_SIZE			(SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
+						 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define BGMAC_BFL_ENETROBO			0x0010		/* has ephy roboswitch spi */
+#define BGMAC_BFL_ENETADM			0x0080		/* has ADMtek switch */
+#define BGMAC_BFL_ENETVLAN			0x0100		/* can do vlan */
+
+#define BGMAC_CHIPCTL_1_IF_TYPE_MASK		0x00000030
+#define BGMAC_CHIPCTL_1_IF_TYPE_RMII		0x00000000
+#define BGMAC_CHIPCTL_1_IF_TYPE_MII		0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII		0x00000020
+#define BGMAC_CHIPCTL_1_SW_TYPE_MASK		0x000000C0
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY		0x00000000
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII		0x00000040
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII	0x00000080
+#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII		0x000000C0
+#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS		0x00010000
+
+#define BGMAC_WEIGHT	64
+
+#define ETHER_MAX_LEN   1518
+
+struct bgmac_slot_info {
+	union {
+		struct sk_buff *skb;
+		void *buf;
+	};
+	dma_addr_t dma_addr;
+};
+
+struct bgmac_dma_desc {
+	__le32 ctl0;
+	__le32 ctl1;
+	__le32 addr_low;
+	__le32 addr_high;
+} __packed;
+
+enum bgmac_dma_ring_type {
+	BGMAC_DMA_RING_TX,
+	BGMAC_DMA_RING_RX,
+};
+
+/**
+ * bgmac_dma_ring - contains info about DMA ring (either TX or RX one)
+ * @start: index of the first slot containing data
+ * @end: index of a slot that can *not* be read (yet)
+ *
+ * Be really aware of the specific @end meaning. It's an index of a slot *after*
+ * the one containing data that can be read. If @start equals @end the ring is
+ * empty.
+ */
+struct bgmac_dma_ring {
+	u32 start;
+	u32 end;
+
+	struct bgmac_dma_desc *cpu_base;
+	dma_addr_t dma_base;
+	u32 index_base; /* Used for unaligned rings only, otherwise 0 */
+	u16 mmio_base;
+	bool unaligned;
+
+	struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
+};
+
+struct bgmac_rx_header {
+	__le16 len;
+	__le16 flags;
+	__le16 pad[12];
+};
+
+struct bgmac {
+	struct bcma_device *core;
+	struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
+	struct net_device *net_dev;
+	struct napi_struct napi;
+	struct mii_bus *mii_bus;
+	struct phy_device *phy_dev;
+
+	/* DMA */
+	struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
+	struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS];
+
+	/* Stats */
+	bool stats_grabbed;
+	u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS];
+	u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
+
+	/* Int */
+	u32 int_mask;
+
+	/* Current MAC state */
+	int mac_speed;
+	int mac_duplex;
+
+	u8 phyaddr;
+	bool has_robosw;
+
+	bool loopback;
+};
+
+static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+	return bcma_read32(bgmac->core, offset);
+}
+
+static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+	bcma_write32(bgmac->core, offset, value);
+}
+
+static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
+				   u32 set)
+{
+	bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set);
+}
+
+static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask)
+{
+	bgmac_maskset(bgmac, offset, mask, 0);
+}
+
+static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
+{
+	bgmac_maskset(bgmac, offset, ~0, set);
+}
+
+#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
new file mode 100644
index 0000000..8fc3f3c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -0,0 +1,8836 @@
+/* bnx2.c: QLogic bnx2 network driver.
+ *
+ * Copyright (c) 2004-2014 Broadcom Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Michael Chan  (mchan@broadcom.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/firmware.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
+#include "bnx2.h"
+#include "bnx2_fw.h"
+
+#define DRV_MODULE_NAME		"bnx2"
+#define DRV_MODULE_VERSION	"2.2.6"
+#define DRV_MODULE_RELDATE	"January 29, 2014"
+#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
+#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
+#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
+#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
+#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
+
+#define RUN_AT(x) (jiffies + (x))
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (5*HZ)
+
+static char version[] =
+	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
+MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_MIPS_FILE_06);
+MODULE_FIRMWARE(FW_RV2P_FILE_06);
+MODULE_FIRMWARE(FW_MIPS_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
+
+static int disable_msi = 0;
+
+module_param(disable_msi, int, S_IRUGO);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+typedef enum {
+	BCM5706 = 0,
+	NC370T,
+	NC370I,
+	BCM5706S,
+	NC370F,
+	BCM5708,
+	BCM5708S,
+	BCM5709,
+	BCM5709S,
+	BCM5716,
+	BCM5716S,
+} board_t;
+
+/* indexed by board_t, above */
+static struct {
+	char *name;
+} board_info[] = {
+	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
+	{ "HP NC370T Multifunction Gigabit Server Adapter" },
+	{ "HP NC370i Multifunction Gigabit Server Adapter" },
+	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
+	{ "HP NC370F Multifunction Gigabit Server Adapter" },
+	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
+	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
+	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
+	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
+	};
+
+static const struct pci_device_id bnx2_pci_tbl[] = {
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
+	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
+	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
+	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
+	{ 0, }
+};
+
+static const struct flash_spec flash_table[] =
+{
+#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
+	/* Slow EEPROM */
+	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
+	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
+	 "EEPROM - slow"},
+	/* Expansion entry 0001 */
+	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 0001"},
+	/* Saifun SA25F010 (non-buffered flash) */
+	/* strap, cfg1, & write1 need updates */
+	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
+	 "Non-buffered flash (128kB)"},
+	/* Saifun SA25F020 (non-buffered flash) */
+	/* strap, cfg1, & write1 need updates */
+	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
+	 "Non-buffered flash (256kB)"},
+	/* Expansion entry 0100 */
+	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 0100"},
+	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
+	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
+	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
+	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
+	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+	/* Saifun SA25F005 (non-buffered flash) */
+	/* strap, cfg1, & write1 need updates */
+	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
+	 "Non-buffered flash (64kB)"},
+	/* Fast EEPROM */
+	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
+	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
+	 "EEPROM - fast"},
+	/* Expansion entry 1001 */
+	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1001"},
+	/* Expansion entry 1010 */
+	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1010"},
+	/* ATMEL AT45DB011B (buffered flash) */
+	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
+	 "Buffered flash (128kB)"},
+	/* Expansion entry 1100 */
+	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1100"},
+	/* Expansion entry 1101 */
+	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1101"},
+	/* Ateml Expansion entry 1110 */
+	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1110 (Atmel)"},
+	/* ATMEL AT45DB021B (buffered flash) */
+	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
+	 "Buffered flash (256kB)"},
+};
+
+static const struct flash_spec flash_5709 = {
+	.flags		= BNX2_NV_BUFFERED,
+	.page_bits	= BCM5709_FLASH_PAGE_BITS,
+	.page_size	= BCM5709_FLASH_PAGE_SIZE,
+	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
+	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
+	.name		= "5709 Buffered flash (256kB)",
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
+
+static void bnx2_init_napi(struct bnx2 *bp);
+static void bnx2_del_napi(struct bnx2 *bp);
+
+static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
+{
+	u32 diff;
+
+	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
+	barrier();
+
+	/* The ring uses 256 indices for 255 entries, one of them
+	 * needs to be skipped.
+	 */
+	diff = txr->tx_prod - txr->tx_cons;
+	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
+		diff &= 0xffff;
+		if (diff == BNX2_TX_DESC_CNT)
+			diff = BNX2_MAX_TX_DESC_CNT;
+	}
+	return bp->tx_ring_size - diff;
+}
+
+static u32
+bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
+{
+	u32 val;
+
+	spin_lock_bh(&bp->indirect_lock);
+	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
+	spin_unlock_bh(&bp->indirect_lock);
+	return val;
+}
+
+static void
+bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
+{
+	spin_lock_bh(&bp->indirect_lock);
+	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
+	spin_unlock_bh(&bp->indirect_lock);
+}
+
+static void
+bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
+{
+	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
+}
+
+static u32
+bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
+{
+	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
+}
+
+static void
+bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
+{
+	offset += cid_addr;
+	spin_lock_bh(&bp->indirect_lock);
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		int i;
+
+		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
+		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
+			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+		for (i = 0; i < 5; i++) {
+			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
+			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+				break;
+			udelay(5);
+		}
+	} else {
+		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
+		BNX2_WR(bp, BNX2_CTX_DATA, val);
+	}
+	spin_unlock_bh(&bp->indirect_lock);
+}
+
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct drv_ctl_io *io = &info->data.io;
+
+	switch (info->cmd) {
+	case DRV_CTL_IO_WR_CMD:
+		bnx2_reg_wr_ind(bp, io->offset, io->data);
+		break;
+	case DRV_CTL_IO_RD_CMD:
+		io->data = bnx2_reg_rd_ind(bp, io->offset);
+		break;
+	case DRV_CTL_CTX_WR_CMD:
+		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	int sb_id;
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_present = 0;
+		sb_id = bp->irq_nvecs;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_tag = bnapi->last_status_idx;
+		bnapi->cnic_present = 1;
+		sb_id = 0;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+
+	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+	cp->irq_arr[0].status_blk = (void *)
+		((unsigned long) bnapi->status_blk.msi +
+		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+	cp->irq_arr[0].status_blk_num = sb_id;
+	cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			      void *data)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	if (cp->drv_state & CNIC_DRV_STATE_REGD)
+		return -EBUSY;
+
+	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
+		return -ENODEV;
+
+	bp->cnic_data = data;
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	cp->num_irq = 0;
+	cp->drv_state = CNIC_DRV_STATE_REGD;
+
+	bnx2_setup_cnic_irq_info(bp);
+
+	return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	mutex_lock(&bp->cnic_lock);
+	cp->drv_state = 0;
+	bnapi->cnic_present = 0;
+	RCU_INIT_POINTER(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_lock);
+	synchronize_rcu();
+	return 0;
+}
+
+static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (!cp->max_iscsi_conn)
+		return NULL;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = bp->chip_id;
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->drv_ctl = bnx2_drv_ctl;
+	cp->drv_register_cnic = bnx2_register_cnic;
+	cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+	return cp;
+}
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	mutex_lock(&bp->cnic_lock);
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_lock));
+	if (c_ops) {
+		info.cmd = CNIC_CTL_STOP_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	mutex_unlock(&bp->cnic_lock);
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	mutex_lock(&bp->cnic_lock);
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_lock));
+	if (c_ops) {
+		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+			bnapi->cnic_tag = bnapi->last_status_idx;
+		}
+		info.cmd = CNIC_CTL_START_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	mutex_unlock(&bp->cnic_lock);
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
+static int
+bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
+{
+	u32 val1;
+	int i, ret;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	val1 = (bp->phy_addr << 21) | (reg << 16) |
+		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
+		BNX2_EMAC_MDIO_COMM_START_BUSY;
+	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
+		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+
+			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
+			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
+
+			break;
+		}
+	}
+
+	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
+		*val = 0x0;
+		ret = -EBUSY;
+	}
+	else {
+		*val = val1;
+		ret = 0;
+	}
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	return ret;
+}
+
+static int
+bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
+{
+	u32 val1;
+	int i, ret;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
+		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
+		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
+	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
+		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+
+	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
+        	ret = -EBUSY;
+	else
+		ret = 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	return ret;
+}
+
+static void
+bnx2_disable_int(struct bnx2 *bp)
+{
+	int i;
+	struct bnx2_napi *bnapi;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		bnapi = &bp->bnx2_napi[i];
+		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	}
+	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+}
+
+static void
+bnx2_enable_int(struct bnx2 *bp)
+{
+	int i;
+	struct bnx2_napi *bnapi;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		bnapi = &bp->bnx2_napi[i];
+
+		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+			bnapi->last_status_idx);
+
+		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+			bnapi->last_status_idx);
+	}
+	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+}
+
+static void
+bnx2_disable_int_sync(struct bnx2 *bp)
+{
+	int i;
+
+	atomic_inc(&bp->intr_sem);
+	if (!netif_running(bp->dev))
+		return;
+
+	bnx2_disable_int(bp);
+	for (i = 0; i < bp->irq_nvecs; i++)
+		synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void
+bnx2_napi_disable(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		napi_disable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_napi_enable(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		napi_enable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
+{
+	if (stop_cnic)
+		bnx2_cnic_stop(bp);
+	if (netif_running(bp->dev)) {
+		bnx2_napi_disable(bp);
+		netif_tx_disable(bp->dev);
+	}
+	bnx2_disable_int_sync(bp);
+	netif_carrier_off(bp->dev);	/* prevent tx timeout */
+}
+
+static void
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
+{
+	if (atomic_dec_and_test(&bp->intr_sem)) {
+		if (netif_running(bp->dev)) {
+			netif_tx_wake_all_queues(bp->dev);
+			spin_lock_bh(&bp->phy_lock);
+			if (bp->link_up)
+				netif_carrier_on(bp->dev);
+			spin_unlock_bh(&bp->phy_lock);
+			bnx2_napi_enable(bp);
+			bnx2_enable_int(bp);
+			if (start_cnic)
+				bnx2_cnic_start(bp);
+		}
+	}
+}
+
+static void
+bnx2_free_tx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_tx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+		if (txr->tx_desc_ring) {
+			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+					  txr->tx_desc_ring,
+					  txr->tx_desc_mapping);
+			txr->tx_desc_ring = NULL;
+		}
+		kfree(txr->tx_buf_ring);
+		txr->tx_buf_ring = NULL;
+	}
+}
+
+static void
+bnx2_free_rx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_rx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+		int j;
+
+		for (j = 0; j < bp->rx_max_ring; j++) {
+			if (rxr->rx_desc_ring[j])
+				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+						  rxr->rx_desc_ring[j],
+						  rxr->rx_desc_mapping[j]);
+			rxr->rx_desc_ring[j] = NULL;
+		}
+		vfree(rxr->rx_buf_ring);
+		rxr->rx_buf_ring = NULL;
+
+		for (j = 0; j < bp->rx_max_pg_ring; j++) {
+			if (rxr->rx_pg_desc_ring[j])
+				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+						  rxr->rx_pg_desc_ring[j],
+						  rxr->rx_pg_desc_mapping[j]);
+			rxr->rx_pg_desc_ring[j] = NULL;
+		}
+		vfree(rxr->rx_pg_ring);
+		rxr->rx_pg_ring = NULL;
+	}
+}
+
+static int
+bnx2_alloc_tx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_tx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
+		if (txr->tx_buf_ring == NULL)
+			return -ENOMEM;
+
+		txr->tx_desc_ring =
+			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+					   &txr->tx_desc_mapping, GFP_KERNEL);
+		if (txr->tx_desc_ring == NULL)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static int
+bnx2_alloc_rx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_rx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+		int j;
+
+		rxr->rx_buf_ring =
+			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+		if (rxr->rx_buf_ring == NULL)
+			return -ENOMEM;
+
+		for (j = 0; j < bp->rx_max_ring; j++) {
+			rxr->rx_desc_ring[j] =
+				dma_alloc_coherent(&bp->pdev->dev,
+						   RXBD_RING_SIZE,
+						   &rxr->rx_desc_mapping[j],
+						   GFP_KERNEL);
+			if (rxr->rx_desc_ring[j] == NULL)
+				return -ENOMEM;
+
+		}
+
+		if (bp->rx_pg_ring_size) {
+			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
+						  bp->rx_max_pg_ring);
+			if (rxr->rx_pg_ring == NULL)
+				return -ENOMEM;
+
+		}
+
+		for (j = 0; j < bp->rx_max_pg_ring; j++) {
+			rxr->rx_pg_desc_ring[j] =
+				dma_alloc_coherent(&bp->pdev->dev,
+						   RXBD_RING_SIZE,
+						   &rxr->rx_pg_desc_mapping[j],
+						   GFP_KERNEL);
+			if (rxr->rx_pg_desc_ring[j] == NULL)
+				return -ENOMEM;
+
+		}
+	}
+	return 0;
+}
+
+static void
+bnx2_free_stats_blk(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->status_blk) {
+		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+				  bp->status_blk,
+				  bp->status_blk_mapping);
+		bp->status_blk = NULL;
+		bp->stats_blk = NULL;
+	}
+}
+
+static int
+bnx2_alloc_stats_blk(struct net_device *dev)
+{
+	int status_blk_size;
+	void *status_blk;
+	struct bnx2 *bp = netdev_priv(dev);
+
+	/* Combine status and statistics blocks into one allocation. */
+	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+	if (bp->flags & BNX2_FLAG_MSIX_CAP)
+		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+						 BNX2_SBLK_MSIX_ALIGN_SIZE);
+	bp->status_stats_size = status_blk_size +
+				sizeof(struct statistics_block);
+	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+					 &bp->status_blk_mapping, GFP_KERNEL);
+	if (status_blk == NULL)
+		return -ENOMEM;
+
+	bp->status_blk = status_blk;
+	bp->stats_blk = status_blk + status_blk_size;
+	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+	return 0;
+}
+
+static void
+bnx2_free_mem(struct bnx2 *bp)
+{
+	int i;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+	bnx2_free_tx_mem(bp);
+	bnx2_free_rx_mem(bp);
+
+	for (i = 0; i < bp->ctx_pages; i++) {
+		if (bp->ctx_blk[i]) {
+			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
+					  bp->ctx_blk[i],
+					  bp->ctx_blk_mapping[i]);
+			bp->ctx_blk[i] = NULL;
+		}
+	}
+
+	if (bnapi->status_blk.msi)
+		bnapi->status_blk.msi = NULL;
+}
+
+static int
+bnx2_alloc_mem(struct bnx2 *bp)
+{
+	int i, err;
+	struct bnx2_napi *bnapi;
+
+	bnapi = &bp->bnx2_napi[0];
+	bnapi->status_blk.msi = bp->status_blk;
+	bnapi->hw_tx_cons_ptr =
+		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
+	bnapi->hw_rx_cons_ptr =
+		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
+	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
+		for (i = 1; i < bp->irq_nvecs; i++) {
+			struct status_block_msix *sblk;
+
+			bnapi = &bp->bnx2_napi[i];
+
+			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+			bnapi->status_blk.msix = sblk;
+			bnapi->hw_tx_cons_ptr =
+				&sblk->status_tx_quick_consumer_index;
+			bnapi->hw_rx_cons_ptr =
+				&sblk->status_rx_quick_consumer_index;
+			bnapi->int_num = i << 24;
+		}
+	}
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
+		if (bp->ctx_pages == 0)
+			bp->ctx_pages = 1;
+		for (i = 0; i < bp->ctx_pages; i++) {
+			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
+						BNX2_PAGE_SIZE,
+						&bp->ctx_blk_mapping[i],
+						GFP_KERNEL);
+			if (bp->ctx_blk[i] == NULL)
+				goto alloc_mem_err;
+		}
+	}
+
+	err = bnx2_alloc_rx_mem(bp);
+	if (err)
+		goto alloc_mem_err;
+
+	err = bnx2_alloc_tx_mem(bp);
+	if (err)
+		goto alloc_mem_err;
+
+	return 0;
+
+alloc_mem_err:
+	bnx2_free_mem(bp);
+	return -ENOMEM;
+}
+
+static void
+bnx2_report_fw_link(struct bnx2 *bp)
+{
+	u32 fw_link_status = 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return;
+
+	if (bp->link_up) {
+		u32 bmsr;
+
+		switch (bp->line_speed) {
+		case SPEED_10:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_10HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_10FULL;
+			break;
+		case SPEED_100:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_100HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_100FULL;
+			break;
+		case SPEED_1000:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_1000HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_1000FULL;
+			break;
+		case SPEED_2500:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_2500HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_2500FULL;
+			break;
+		}
+
+		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
+
+		if (bp->autoneg) {
+			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
+
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+
+			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
+			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
+				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
+			else
+				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
+		}
+	}
+	else
+		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
+
+	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
+}
+
+static char *
+bnx2_xceiver_str(struct bnx2 *bp)
+{
+	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
+		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
+		 "Copper");
+}
+
+static void
+bnx2_report_link(struct bnx2 *bp)
+{
+	if (bp->link_up) {
+		netif_carrier_on(bp->dev);
+		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
+			    bnx2_xceiver_str(bp),
+			    bp->line_speed,
+			    bp->duplex == DUPLEX_FULL ? "full" : "half");
+
+		if (bp->flow_ctrl) {
+			if (bp->flow_ctrl & FLOW_CTRL_RX) {
+				pr_cont(", receive ");
+				if (bp->flow_ctrl & FLOW_CTRL_TX)
+					pr_cont("& transmit ");
+			}
+			else {
+				pr_cont(", transmit ");
+			}
+			pr_cont("flow control ON");
+		}
+		pr_cont("\n");
+	} else {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC %s Link is Down\n",
+			   bnx2_xceiver_str(bp));
+	}
+
+	bnx2_report_fw_link(bp);
+}
+
+static void
+bnx2_resolve_flow_ctrl(struct bnx2 *bp)
+{
+	u32 local_adv, remote_adv;
+
+	bp->flow_ctrl = 0;
+	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+
+		if (bp->duplex == DUPLEX_FULL) {
+			bp->flow_ctrl = bp->req_flow_ctrl;
+		}
+		return;
+	}
+
+	if (bp->duplex != DUPLEX_FULL) {
+		return;
+	}
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
+		u32 val;
+
+		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
+		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
+			bp->flow_ctrl |= FLOW_CTRL_TX;
+		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
+			bp->flow_ctrl |= FLOW_CTRL_RX;
+		return;
+	}
+
+	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		u32 new_local_adv = 0;
+		u32 new_remote_adv = 0;
+
+		if (local_adv & ADVERTISE_1000XPAUSE)
+			new_local_adv |= ADVERTISE_PAUSE_CAP;
+		if (local_adv & ADVERTISE_1000XPSE_ASYM)
+			new_local_adv |= ADVERTISE_PAUSE_ASYM;
+		if (remote_adv & ADVERTISE_1000XPAUSE)
+			new_remote_adv |= ADVERTISE_PAUSE_CAP;
+		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
+			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
+
+		local_adv = new_local_adv;
+		remote_adv = new_remote_adv;
+	}
+
+	/* See Table 28B-3 of 802.3ab-1999 spec. */
+	if (local_adv & ADVERTISE_PAUSE_CAP) {
+		if(local_adv & ADVERTISE_PAUSE_ASYM) {
+	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
+				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+			}
+			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
+				bp->flow_ctrl = FLOW_CTRL_RX;
+			}
+		}
+		else {
+			if (remote_adv & ADVERTISE_PAUSE_CAP) {
+				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+			}
+		}
+	}
+	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
+		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
+			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
+
+			bp->flow_ctrl = FLOW_CTRL_TX;
+		}
+	}
+}
+
+static int
+bnx2_5709s_linkup(struct bnx2 *bp)
+{
+	u32 val, speed;
+
+	bp->link_up = 1;
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
+	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
+		bp->line_speed = bp->req_line_speed;
+		bp->duplex = bp->req_duplex;
+		return 0;
+	}
+	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
+	switch (speed) {
+		case MII_BNX2_GP_TOP_AN_SPEED_10:
+			bp->line_speed = SPEED_10;
+			break;
+		case MII_BNX2_GP_TOP_AN_SPEED_100:
+			bp->line_speed = SPEED_100;
+			break;
+		case MII_BNX2_GP_TOP_AN_SPEED_1G:
+		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
+			bp->line_speed = SPEED_1000;
+			break;
+		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
+			bp->line_speed = SPEED_2500;
+			break;
+	}
+	if (val & MII_BNX2_GP_TOP_AN_FD)
+		bp->duplex = DUPLEX_FULL;
+	else
+		bp->duplex = DUPLEX_HALF;
+	return 0;
+}
+
+static int
+bnx2_5708s_linkup(struct bnx2 *bp)
+{
+	u32 val;
+
+	bp->link_up = 1;
+	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
+	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
+		case BCM5708S_1000X_STAT1_SPEED_10:
+			bp->line_speed = SPEED_10;
+			break;
+		case BCM5708S_1000X_STAT1_SPEED_100:
+			bp->line_speed = SPEED_100;
+			break;
+		case BCM5708S_1000X_STAT1_SPEED_1G:
+			bp->line_speed = SPEED_1000;
+			break;
+		case BCM5708S_1000X_STAT1_SPEED_2G5:
+			bp->line_speed = SPEED_2500;
+			break;
+	}
+	if (val & BCM5708S_1000X_STAT1_FD)
+		bp->duplex = DUPLEX_FULL;
+	else
+		bp->duplex = DUPLEX_HALF;
+
+	return 0;
+}
+
+static int
+bnx2_5706s_linkup(struct bnx2 *bp)
+{
+	u32 bmcr, local_adv, remote_adv, common;
+
+	bp->link_up = 1;
+	bp->line_speed = SPEED_1000;
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+	if (bmcr & BMCR_FULLDPLX) {
+		bp->duplex = DUPLEX_FULL;
+	}
+	else {
+		bp->duplex = DUPLEX_HALF;
+	}
+
+	if (!(bmcr & BMCR_ANENABLE)) {
+		return 0;
+	}
+
+	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+	common = local_adv & remote_adv;
+	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
+
+		if (common & ADVERTISE_1000XFULL) {
+			bp->duplex = DUPLEX_FULL;
+		}
+		else {
+			bp->duplex = DUPLEX_HALF;
+		}
+	}
+
+	return 0;
+}
+
+static int
+bnx2_copper_linkup(struct bnx2 *bp)
+{
+	u32 bmcr;
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+	if (bmcr & BMCR_ANENABLE) {
+		u32 local_adv, remote_adv, common;
+
+		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
+		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
+
+		common = local_adv & (remote_adv >> 2);
+		if (common & ADVERTISE_1000FULL) {
+			bp->line_speed = SPEED_1000;
+			bp->duplex = DUPLEX_FULL;
+		}
+		else if (common & ADVERTISE_1000HALF) {
+			bp->line_speed = SPEED_1000;
+			bp->duplex = DUPLEX_HALF;
+		}
+		else {
+			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+			common = local_adv & remote_adv;
+			if (common & ADVERTISE_100FULL) {
+				bp->line_speed = SPEED_100;
+				bp->duplex = DUPLEX_FULL;
+			}
+			else if (common & ADVERTISE_100HALF) {
+				bp->line_speed = SPEED_100;
+				bp->duplex = DUPLEX_HALF;
+			}
+			else if (common & ADVERTISE_10FULL) {
+				bp->line_speed = SPEED_10;
+				bp->duplex = DUPLEX_FULL;
+			}
+			else if (common & ADVERTISE_10HALF) {
+				bp->line_speed = SPEED_10;
+				bp->duplex = DUPLEX_HALF;
+			}
+			else {
+				bp->line_speed = 0;
+				bp->link_up = 0;
+			}
+		}
+	}
+	else {
+		if (bmcr & BMCR_SPEED100) {
+			bp->line_speed = SPEED_100;
+		}
+		else {
+			bp->line_speed = SPEED_10;
+		}
+		if (bmcr & BMCR_FULLDPLX) {
+			bp->duplex = DUPLEX_FULL;
+		}
+		else {
+			bp->duplex = DUPLEX_HALF;
+		}
+	}
+
+	if (bp->link_up) {
+		u32 ext_status;
+
+		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
+		if (ext_status & EXT_STATUS_MDIX)
+			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
+	}
+
+	return 0;
+}
+
+static void
+bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
+{
+	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
+
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
+	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
+	val |= 0x02 << 8;
+
+	if (bp->flow_ctrl & FLOW_CTRL_TX)
+		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
+
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+}
+
+static void
+bnx2_init_all_rx_contexts(struct bnx2 *bp)
+{
+	int i;
+	u32 cid;
+
+	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
+		if (i == 1)
+			cid = RX_RSS_CID;
+		bnx2_init_rx_context(bp, cid);
+	}
+}
+
+static void
+bnx2_set_mac_link(struct bnx2 *bp)
+{
+	u32 val;
+
+	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
+	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
+		(bp->duplex == DUPLEX_HALF)) {
+		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
+	}
+
+	/* Configure the EMAC mode register. */
+	val = BNX2_RD(bp, BNX2_EMAC_MODE);
+
+	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
+		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
+		BNX2_EMAC_MODE_25G_MODE);
+
+	if (bp->link_up) {
+		switch (bp->line_speed) {
+			case SPEED_10:
+				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
+					val |= BNX2_EMAC_MODE_PORT_MII_10M;
+					break;
+				}
+				/* fall through */
+			case SPEED_100:
+				val |= BNX2_EMAC_MODE_PORT_MII;
+				break;
+			case SPEED_2500:
+				val |= BNX2_EMAC_MODE_25G_MODE;
+				/* fall through */
+			case SPEED_1000:
+				val |= BNX2_EMAC_MODE_PORT_GMII;
+				break;
+		}
+	}
+	else {
+		val |= BNX2_EMAC_MODE_PORT_GMII;
+	}
+
+	/* Set the MAC to operate in the appropriate duplex mode. */
+	if (bp->duplex == DUPLEX_HALF)
+		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
+	BNX2_WR(bp, BNX2_EMAC_MODE, val);
+
+	/* Enable/disable rx PAUSE. */
+	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
+
+	if (bp->flow_ctrl & FLOW_CTRL_RX)
+		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
+	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
+
+	/* Enable/disable tx PAUSE. */
+	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
+	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
+
+	if (bp->flow_ctrl & FLOW_CTRL_TX)
+		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
+	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
+
+	/* Acknowledge the interrupt. */
+	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
+
+	bnx2_init_all_rx_contexts(bp);
+}
+
+static void
+bnx2_enable_bmsr1(struct bnx2 *bp)
+{
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_GP_STATUS);
+}
+
+static void
+bnx2_disable_bmsr1(struct bnx2 *bp)
+{
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+}
+
+static int
+bnx2_test_and_enable_2g5(struct bnx2 *bp)
+{
+	u32 up1;
+	int ret = 1;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return 0;
+
+	if (bp->autoneg & AUTONEG_SPEED)
+		bp->advertising |= ADVERTISED_2500baseX_Full;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+
+	bnx2_read_phy(bp, bp->mii_up1, &up1);
+	if (!(up1 & BCM5708S_UP1_2G5)) {
+		up1 |= BCM5708S_UP1_2G5;
+		bnx2_write_phy(bp, bp->mii_up1, up1);
+		ret = 0;
+	}
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	return ret;
+}
+
+static int
+bnx2_test_and_disable_2g5(struct bnx2 *bp)
+{
+	u32 up1;
+	int ret = 0;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return 0;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+
+	bnx2_read_phy(bp, bp->mii_up1, &up1);
+	if (up1 & BCM5708S_UP1_2G5) {
+		up1 &= ~BCM5708S_UP1_2G5;
+		bnx2_write_phy(bp, bp->mii_up1, up1);
+		ret = 1;
+	}
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	return ret;
+}
+
+static void
+bnx2_enable_forced_2g5(struct bnx2 *bp)
+{
+	u32 uninitialized_var(bmcr);
+	int err;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		u32 val;
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_SERDES_DIG);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
+			val |= MII_BNX2_SD_MISC1_FORCE |
+				MII_BNX2_SD_MISC1_FORCE_2_5G;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr |= BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
+	}
+
+	if (err)
+		return;
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		bmcr &= ~BMCR_ANENABLE;
+		if (bp->req_duplex == DUPLEX_FULL)
+			bmcr |= BMCR_FULLDPLX;
+	}
+	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+}
+
+static void
+bnx2_disable_forced_2g5(struct bnx2 *bp)
+{
+	u32 uninitialized_var(bmcr);
+	int err;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		u32 val;
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_SERDES_DIG);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
+	}
+
+	if (err)
+		return;
+
+	if (bp->autoneg & AUTONEG_SPEED)
+		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
+	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+}
+
+static void
+bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
+{
+	u32 val;
+
+	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
+	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
+	if (start)
+		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
+	else
+		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
+}
+
+static int
+bnx2_set_link(struct bnx2 *bp)
+{
+	u32 bmsr;
+	u8 link_up;
+
+	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
+		bp->link_up = 1;
+		return 0;
+	}
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return 0;
+
+	link_up = bp->link_up;
+
+	bnx2_enable_bmsr1(bp);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_disable_bmsr1(bp);
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
+		u32 val, an_dbg;
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
+			bnx2_5706s_force_link_dn(bp, 0);
+			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
+		}
+		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
+
+		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+
+		if ((val & BNX2_EMAC_STATUS_LINK) &&
+		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
+			bmsr |= BMSR_LSTATUS;
+		else
+			bmsr &= ~BMSR_LSTATUS;
+	}
+
+	if (bmsr & BMSR_LSTATUS) {
+		bp->link_up = 1;
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
+				bnx2_5706s_linkup(bp);
+			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
+				bnx2_5708s_linkup(bp);
+			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+				bnx2_5709s_linkup(bp);
+		}
+		else {
+			bnx2_copper_linkup(bp);
+		}
+		bnx2_resolve_flow_ctrl(bp);
+	}
+	else {
+		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+		    (bp->autoneg & AUTONEG_SPEED))
+			bnx2_disable_forced_2g5(bp);
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
+			u32 bmcr;
+
+			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+			bmcr |= BMCR_ANENABLE;
+			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+
+			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+		}
+		bp->link_up = 0;
+	}
+
+	if (bp->link_up != link_up) {
+		bnx2_report_link(bp);
+	}
+
+	bnx2_set_mac_link(bp);
+
+	return 0;
+}
+
+static int
+bnx2_reset_phy(struct bnx2 *bp)
+{
+	int i;
+	u32 reg;
+
+        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
+
+#define PHY_RESET_MAX_WAIT 100
+	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
+		udelay(10);
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
+		if (!(reg & BMCR_RESET)) {
+			udelay(20);
+			break;
+		}
+	}
+	if (i == PHY_RESET_MAX_WAIT) {
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static u32
+bnx2_phy_get_pause_adv(struct bnx2 *bp)
+{
+	u32 adv = 0;
+
+	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
+		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			adv = ADVERTISE_1000XPAUSE;
+		}
+		else {
+			adv = ADVERTISE_PAUSE_CAP;
+		}
+	}
+	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			adv = ADVERTISE_1000XPSE_ASYM;
+		}
+		else {
+			adv = ADVERTISE_PAUSE_ASYM;
+		}
+	}
+	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+		}
+		else {
+			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+		}
+	}
+	return adv;
+}
+
+static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
+
+static int
+bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 speed_arg = 0, pause_adv;
+
+	pause_adv = bnx2_phy_get_pause_adv(bp);
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
+		if (bp->advertising & ADVERTISED_10baseT_Half)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+		if (bp->advertising & ADVERTISED_10baseT_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+		if (bp->advertising & ADVERTISED_100baseT_Half)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+		if (bp->advertising & ADVERTISED_100baseT_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+		if (bp->advertising & ADVERTISED_1000baseT_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+		if (bp->advertising & ADVERTISED_2500baseX_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+	} else {
+		if (bp->req_line_speed == SPEED_2500)
+			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+		else if (bp->req_line_speed == SPEED_1000)
+			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+		else if (bp->req_line_speed == SPEED_100) {
+			if (bp->req_duplex == DUPLEX_FULL)
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+			else
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+		} else if (bp->req_line_speed == SPEED_10) {
+			if (bp->req_duplex == DUPLEX_FULL)
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+			else
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+		}
+	}
+
+	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
+		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
+	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
+		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
+
+	if (port == PORT_TP)
+		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
+			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
+
+	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
+
+	spin_unlock_bh(&bp->phy_lock);
+	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
+	spin_lock_bh(&bp->phy_lock);
+
+	return 0;
+}
+
+static int
+bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 adv, bmcr;
+	u32 new_adv = 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return bnx2_setup_remote_phy(bp, port);
+
+	if (!(bp->autoneg & AUTONEG_SPEED)) {
+		u32 new_bmcr;
+		int force_link_down = 0;
+
+		if (bp->req_line_speed == SPEED_2500) {
+			if (!bnx2_test_and_enable_2g5(bp))
+				force_link_down = 1;
+		} else if (bp->req_line_speed == SPEED_1000) {
+			if (bnx2_test_and_disable_2g5(bp))
+				force_link_down = 1;
+		}
+		bnx2_read_phy(bp, bp->mii_adv, &adv);
+		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		new_bmcr = bmcr & ~BMCR_ANENABLE;
+		new_bmcr |= BMCR_SPEED1000;
+
+		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+			if (bp->req_line_speed == SPEED_2500)
+				bnx2_enable_forced_2g5(bp);
+			else if (bp->req_line_speed == SPEED_1000) {
+				bnx2_disable_forced_2g5(bp);
+				new_bmcr &= ~0x2000;
+			}
+
+		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
+			if (bp->req_line_speed == SPEED_2500)
+				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+			else
+				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
+		}
+
+		if (bp->req_duplex == DUPLEX_FULL) {
+			adv |= ADVERTISE_1000XFULL;
+			new_bmcr |= BMCR_FULLDPLX;
+		}
+		else {
+			adv |= ADVERTISE_1000XHALF;
+			new_bmcr &= ~BMCR_FULLDPLX;
+		}
+		if ((new_bmcr != bmcr) || (force_link_down)) {
+			/* Force a link down visible on the other side */
+			if (bp->link_up) {
+				bnx2_write_phy(bp, bp->mii_adv, adv &
+					       ~(ADVERTISE_1000XFULL |
+						 ADVERTISE_1000XHALF));
+				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
+					BMCR_ANRESTART | BMCR_ANENABLE);
+
+				bp->link_up = 0;
+				netif_carrier_off(bp->dev);
+				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+				bnx2_report_link(bp);
+			}
+			bnx2_write_phy(bp, bp->mii_adv, adv);
+			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+		} else {
+			bnx2_resolve_flow_ctrl(bp);
+			bnx2_set_mac_link(bp);
+		}
+		return 0;
+	}
+
+	bnx2_test_and_enable_2g5(bp);
+
+	if (bp->advertising & ADVERTISED_1000baseT_Full)
+		new_adv |= ADVERTISE_1000XFULL;
+
+	new_adv |= bnx2_phy_get_pause_adv(bp);
+
+	bnx2_read_phy(bp, bp->mii_adv, &adv);
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	bp->serdes_an_pending = 0;
+	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
+		/* Force a link down visible on the other side */
+		if (bp->link_up) {
+			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+			spin_unlock_bh(&bp->phy_lock);
+			msleep(20);
+			spin_lock_bh(&bp->phy_lock);
+		}
+
+		bnx2_write_phy(bp, bp->mii_adv, new_adv);
+		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
+			BMCR_ANENABLE);
+		/* Speed up link-up time when the link partner
+		 * does not autonegotiate which is very common
+		 * in blade servers. Some blade servers use
+		 * IPMI for kerboard input and it's important
+		 * to minimize link disruptions. Autoneg. involves
+		 * exchanging base pages plus 3 next pages and
+		 * normally completes in about 120 msec.
+		 */
+		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
+		bp->serdes_an_pending = 1;
+		mod_timer(&bp->timer, jiffies + bp->current_interval);
+	} else {
+		bnx2_resolve_flow_ctrl(bp);
+		bnx2_set_mac_link(bp);
+	}
+
+	return 0;
+}
+
+#define ETHTOOL_ALL_FIBRE_SPEED						\
+	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
+		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
+		(ADVERTISED_1000baseT_Full)
+
+#define ETHTOOL_ALL_COPPER_SPEED					\
+	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
+	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
+	ADVERTISED_1000baseT_Full)
+
+#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
+
+#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
+
+static void
+bnx2_set_default_remote_link(struct bnx2 *bp)
+{
+	u32 link;
+
+	if (bp->phy_port == PORT_TP)
+		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
+	else
+		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
+
+	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
+		bp->req_line_speed = 0;
+		bp->autoneg |= AUTONEG_SPEED;
+		bp->advertising = ADVERTISED_Autoneg;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+			bp->advertising |= ADVERTISED_10baseT_Half;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+			bp->advertising |= ADVERTISED_10baseT_Full;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+			bp->advertising |= ADVERTISED_100baseT_Half;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+			bp->advertising |= ADVERTISED_100baseT_Full;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+			bp->advertising |= ADVERTISED_1000baseT_Full;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+			bp->advertising |= ADVERTISED_2500baseX_Full;
+	} else {
+		bp->autoneg = 0;
+		bp->advertising = 0;
+		bp->req_duplex = DUPLEX_FULL;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
+			bp->req_line_speed = SPEED_10;
+			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+				bp->req_duplex = DUPLEX_HALF;
+		}
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
+			bp->req_line_speed = SPEED_100;
+			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+				bp->req_duplex = DUPLEX_HALF;
+		}
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+			bp->req_line_speed = SPEED_1000;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+			bp->req_line_speed = SPEED_2500;
+	}
+}
+
+static void
+bnx2_set_default_link(struct bnx2 *bp)
+{
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		bnx2_set_default_remote_link(bp);
+		return;
+	}
+
+	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
+	bp->req_line_speed = 0;
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		u32 reg;
+
+		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
+
+		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
+		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
+		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
+			bp->autoneg = 0;
+			bp->req_line_speed = bp->line_speed = SPEED_1000;
+			bp->req_duplex = DUPLEX_FULL;
+		}
+	} else
+		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
+}
+
+static void
+bnx2_send_heart_beat(struct bnx2 *bp)
+{
+	u32 msg;
+	u32 addr;
+
+	spin_lock(&bp->indirect_lock);
+	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
+	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
+	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
+	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
+	spin_unlock(&bp->indirect_lock);
+}
+
+static void
+bnx2_remote_phy_event(struct bnx2 *bp)
+{
+	u32 msg;
+	u8 link_up = bp->link_up;
+	u8 old_port;
+
+	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+
+	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
+		bnx2_send_heart_beat(bp);
+
+	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
+
+	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
+		bp->link_up = 0;
+	else {
+		u32 speed;
+
+		bp->link_up = 1;
+		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
+		bp->duplex = DUPLEX_FULL;
+		switch (speed) {
+			case BNX2_LINK_STATUS_10HALF:
+				bp->duplex = DUPLEX_HALF;
+				/* fall through */
+			case BNX2_LINK_STATUS_10FULL:
+				bp->line_speed = SPEED_10;
+				break;
+			case BNX2_LINK_STATUS_100HALF:
+				bp->duplex = DUPLEX_HALF;
+				/* fall through */
+			case BNX2_LINK_STATUS_100BASE_T4:
+			case BNX2_LINK_STATUS_100FULL:
+				bp->line_speed = SPEED_100;
+				break;
+			case BNX2_LINK_STATUS_1000HALF:
+				bp->duplex = DUPLEX_HALF;
+				/* fall through */
+			case BNX2_LINK_STATUS_1000FULL:
+				bp->line_speed = SPEED_1000;
+				break;
+			case BNX2_LINK_STATUS_2500HALF:
+				bp->duplex = DUPLEX_HALF;
+				/* fall through */
+			case BNX2_LINK_STATUS_2500FULL:
+				bp->line_speed = SPEED_2500;
+				break;
+			default:
+				bp->line_speed = 0;
+				break;
+		}
+
+		bp->flow_ctrl = 0;
+		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+			if (bp->duplex == DUPLEX_FULL)
+				bp->flow_ctrl = bp->req_flow_ctrl;
+		} else {
+			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
+				bp->flow_ctrl |= FLOW_CTRL_TX;
+			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
+				bp->flow_ctrl |= FLOW_CTRL_RX;
+		}
+
+		old_port = bp->phy_port;
+		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
+			bp->phy_port = PORT_FIBRE;
+		else
+			bp->phy_port = PORT_TP;
+
+		if (old_port != bp->phy_port)
+			bnx2_set_default_link(bp);
+
+	}
+	if (bp->link_up != link_up)
+		bnx2_report_link(bp);
+
+	bnx2_set_mac_link(bp);
+}
+
+static int
+bnx2_set_remote_link(struct bnx2 *bp)
+{
+	u32 evt_code;
+
+	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
+	switch (evt_code) {
+		case BNX2_FW_EVT_CODE_LINK_EVENT:
+			bnx2_remote_phy_event(bp);
+			break;
+		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
+		default:
+			bnx2_send_heart_beat(bp);
+			break;
+	}
+	return 0;
+}
+
+static int
+bnx2_setup_copper_phy(struct bnx2 *bp)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 bmcr, adv_reg, new_adv = 0;
+	u32 new_bmcr;
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+		    ADVERTISE_PAUSE_ASYM);
+
+	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		u32 adv1000_reg;
+		u32 new_adv1000 = 0;
+
+		new_adv |= bnx2_phy_get_pause_adv(bp);
+
+		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
+		adv1000_reg &= PHY_ALL_1000_SPEED;
+
+		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
+		if ((adv1000_reg != new_adv1000) ||
+			(adv_reg != new_adv) ||
+			((bmcr & BMCR_ANENABLE) == 0)) {
+
+			bnx2_write_phy(bp, bp->mii_adv, new_adv);
+			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
+			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
+				BMCR_ANENABLE);
+		}
+		else if (bp->link_up) {
+			/* Flow ctrl may have changed from auto to forced */
+			/* or vice-versa. */
+
+			bnx2_resolve_flow_ctrl(bp);
+			bnx2_set_mac_link(bp);
+		}
+		return 0;
+	}
+
+	/* advertise nothing when forcing speed */
+	if (adv_reg != new_adv)
+		bnx2_write_phy(bp, bp->mii_adv, new_adv);
+
+	new_bmcr = 0;
+	if (bp->req_line_speed == SPEED_100) {
+		new_bmcr |= BMCR_SPEED100;
+	}
+	if (bp->req_duplex == DUPLEX_FULL) {
+		new_bmcr |= BMCR_FULLDPLX;
+	}
+	if (new_bmcr != bmcr) {
+		u32 bmsr;
+
+		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+
+		if (bmsr & BMSR_LSTATUS) {
+			/* Force link down */
+			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+			spin_unlock_bh(&bp->phy_lock);
+			msleep(50);
+			spin_lock_bh(&bp->phy_lock);
+
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+		}
+
+		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+
+		/* Normally, the new speed is setup after the link has
+		 * gone down and up again. In some cases, link will not go
+		 * down so we need to set up the new speed here.
+		 */
+		if (bmsr & BMSR_LSTATUS) {
+			bp->line_speed = bp->req_line_speed;
+			bp->duplex = bp->req_duplex;
+			bnx2_resolve_flow_ctrl(bp);
+			bnx2_set_mac_link(bp);
+		}
+	} else {
+		bnx2_resolve_flow_ctrl(bp);
+		bnx2_set_mac_link(bp);
+	}
+	return 0;
+}
+
+static int
+bnx2_setup_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	if (bp->loopback == MAC_LOOPBACK)
+		return 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		return bnx2_setup_serdes_phy(bp, port);
+	}
+	else {
+		return bnx2_setup_copper_phy(bp);
+	}
+}
+
+static int
+bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
+{
+	u32 val;
+
+	bp->mii_bmcr = MII_BMCR + 0x10;
+	bp->mii_bmsr = MII_BMSR + 0x10;
+	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
+	bp->mii_adv = MII_ADVERTISE + 0x10;
+	bp->mii_lpa = MII_LPA + 0x10;
+	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
+	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
+
+	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
+	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
+	val |= MII_BNX2_SD_1000XCTL1_FIBER;
+	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
+	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
+		val |= BCM5708S_UP1_2G5;
+	else
+		val &= ~BCM5708S_UP1_2G5;
+	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
+	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
+	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
+	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
+
+	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
+	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
+	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	return 0;
+}
+
+static int
+bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
+{
+	u32 val;
+
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	bp->mii_up1 = BCM5708S_UP1;
+
+	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
+	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
+	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
+
+	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
+	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
+	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
+
+	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
+	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
+	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
+		bnx2_read_phy(bp, BCM5708S_UP1, &val);
+		val |= BCM5708S_UP1_2G5;
+		bnx2_write_phy(bp, BCM5708S_UP1, val);
+	}
+
+	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
+		/* increase tx signal amplitude */
+		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+			       BCM5708S_BLK_ADDR_TX_MISC);
+		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
+		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
+		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
+		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
+	}
+
+	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
+	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
+
+	if (val) {
+		u32 is_backplane;
+
+		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
+		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
+			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+				       BCM5708S_BLK_ADDR_TX_MISC);
+			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
+			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+				       BCM5708S_BLK_ADDR_DIG);
+		}
+	}
+	return 0;
+}
+
+static int
+bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
+{
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
+		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
+
+	if (bp->dev->mtu > 1500) {
+		u32 val;
+
+		/* Set extended packet length bit */
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
+
+		bnx2_write_phy(bp, 0x1c, 0x6c00);
+		bnx2_read_phy(bp, 0x1c, &val);
+		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
+	}
+	else {
+		u32 val;
+
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, val & ~0x4007);
+
+		bnx2_write_phy(bp, 0x1c, 0x6c00);
+		bnx2_read_phy(bp, 0x1c, &val);
+		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
+	}
+
+	return 0;
+}
+
+static int
+bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
+{
+	u32 val;
+
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
+		bnx2_write_phy(bp, 0x18, 0x0c00);
+		bnx2_write_phy(bp, 0x17, 0x000a);
+		bnx2_write_phy(bp, 0x15, 0x310b);
+		bnx2_write_phy(bp, 0x17, 0x201f);
+		bnx2_write_phy(bp, 0x15, 0x9506);
+		bnx2_write_phy(bp, 0x17, 0x401f);
+		bnx2_write_phy(bp, 0x15, 0x14e2);
+		bnx2_write_phy(bp, 0x18, 0x0400);
+	}
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
+		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
+			       MII_BNX2_DSP_EXPAND_REG | 0x8);
+		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
+		val &= ~(1 << 8);
+		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
+	}
+
+	if (bp->dev->mtu > 1500) {
+		/* Set extended packet length bit */
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, val | 0x4000);
+
+		bnx2_read_phy(bp, 0x10, &val);
+		bnx2_write_phy(bp, 0x10, val | 0x1);
+	}
+	else {
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, val & ~0x4007);
+
+		bnx2_read_phy(bp, 0x10, &val);
+		bnx2_write_phy(bp, 0x10, val & ~0x1);
+	}
+
+	/* ethernet@wirespeed */
+	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
+	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
+	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
+
+	/* auto-mdix */
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
+
+	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
+	return 0;
+}
+
+
+static int
+bnx2_init_phy(struct bnx2 *bp, int reset_phy)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 val;
+	int rc = 0;
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
+	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
+
+	bp->mii_bmcr = MII_BMCR;
+	bp->mii_bmsr = MII_BMSR;
+	bp->mii_bmsr1 = MII_BMSR;
+	bp->mii_adv = MII_ADVERTISE;
+	bp->mii_lpa = MII_LPA;
+
+	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		goto setup_phy;
+
+	bnx2_read_phy(bp, MII_PHYSID1, &val);
+	bp->phy_id = val << 16;
+	bnx2_read_phy(bp, MII_PHYSID2, &val);
+	bp->phy_id |= val & 0xffff;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
+			rc = bnx2_init_5706s_phy(bp, reset_phy);
+		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
+			rc = bnx2_init_5708s_phy(bp, reset_phy);
+		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+			rc = bnx2_init_5709s_phy(bp, reset_phy);
+	}
+	else {
+		rc = bnx2_init_copper_phy(bp, reset_phy);
+	}
+
+setup_phy:
+	if (!rc)
+		rc = bnx2_setup_phy(bp, bp->phy_port);
+
+	return rc;
+}
+
+static int
+bnx2_set_mac_loopback(struct bnx2 *bp)
+{
+	u32 mac_mode;
+
+	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
+	mac_mode &= ~BNX2_EMAC_MODE_PORT;
+	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
+	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
+	bp->link_up = 1;
+	return 0;
+}
+
+static int bnx2_test_link(struct bnx2 *);
+
+static int
+bnx2_set_phy_loopback(struct bnx2 *bp)
+{
+	u32 mac_mode;
+	int rc, i;
+
+	spin_lock_bh(&bp->phy_lock);
+	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
+			    BMCR_SPEED1000);
+	spin_unlock_bh(&bp->phy_lock);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < 10; i++) {
+		if (bnx2_test_link(bp) == 0)
+			break;
+		msleep(100);
+	}
+
+	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
+	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
+		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
+		      BNX2_EMAC_MODE_25G_MODE);
+
+	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
+	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
+	bp->link_up = 1;
+	return 0;
+}
+
+static void
+bnx2_dump_mcp_state(struct bnx2 *bp)
+{
+	struct net_device *dev = bp->dev;
+	u32 mcp_p0, mcp_p1;
+
+	netdev_err(dev, "<--- start MCP states dump --->\n");
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		mcp_p0 = BNX2_MCP_STATE_P0;
+		mcp_p1 = BNX2_MCP_STATE_P1;
+	} else {
+		mcp_p0 = BNX2_MCP_STATE_P0_5708;
+		mcp_p1 = BNX2_MCP_STATE_P1_5708;
+	}
+	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
+	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
+	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
+	netdev_err(dev, "DEBUG: shmem states:\n");
+	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
+		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
+		   bnx2_shmem_rd(bp, BNX2_FW_MB),
+		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
+	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
+	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
+		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
+		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
+	pr_cont(" condition[%08x]\n",
+		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
+	DP_SHMEM_LINE(bp, 0x3cc);
+	DP_SHMEM_LINE(bp, 0x3dc);
+	DP_SHMEM_LINE(bp, 0x3ec);
+	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
+	netdev_err(dev, "<--- end MCP states dump --->\n");
+}
+
+static int
+bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
+{
+	int i;
+	u32 val;
+
+	bp->fw_wr_seq++;
+	msg_data |= bp->fw_wr_seq;
+	bp->fw_last_msg = msg_data;
+
+	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+
+	if (!ack)
+		return 0;
+
+	/* wait for an acknowledgement. */
+	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
+		msleep(10);
+
+		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
+
+		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
+			break;
+	}
+	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
+		return 0;
+
+	/* If we timed out, inform the firmware that this is the case. */
+	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
+		msg_data &= ~BNX2_DRV_MSG_CODE;
+		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
+
+		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+		if (!silent) {
+			pr_err("fw sync timeout, reset code = %x\n", msg_data);
+			bnx2_dump_mcp_state(bp);
+		}
+
+		return -EBUSY;
+	}
+
+	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
+		return -EIO;
+
+	return 0;
+}
+
+static int
+bnx2_init_5709_context(struct bnx2 *bp)
+{
+	int i, ret = 0;
+	u32 val;
+
+	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+	val |= (BNX2_PAGE_BITS - 8) << 16;
+	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
+	for (i = 0; i < 10; i++) {
+		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
+		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
+			break;
+		udelay(2);
+	}
+	if (val & BNX2_CTX_COMMAND_MEM_INIT)
+		return -EBUSY;
+
+	for (i = 0; i < bp->ctx_pages; i++) {
+		int j;
+
+		if (bp->ctx_blk[i])
+			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
+		else
+			return -ENOMEM;
+
+		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+			(bp->ctx_blk_mapping[i] & 0xffffffff) |
+			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+			(u64) bp->ctx_blk_mapping[i] >> 32);
+		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void
+bnx2_init_context(struct bnx2 *bp)
+{
+	u32 vcid;
+
+	vcid = 96;
+	while (vcid) {
+		u32 vcid_addr, pcid_addr, offset;
+		int i;
+
+		vcid--;
+
+		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+			u32 new_vcid;
+
+			vcid_addr = GET_PCID_ADDR(vcid);
+			if (vcid & 0x8) {
+				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
+			}
+			else {
+				new_vcid = vcid;
+			}
+			pcid_addr = GET_PCID_ADDR(new_vcid);
+		}
+		else {
+	    		vcid_addr = GET_CID_ADDR(vcid);
+			pcid_addr = vcid_addr;
+		}
+
+		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
+			vcid_addr += (i << PHY_CTX_SHIFT);
+			pcid_addr += (i << PHY_CTX_SHIFT);
+
+			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
+			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
+
+			/* Zero out the context. */
+			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
+				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
+		}
+	}
+}
+
+static int
+bnx2_alloc_bad_rbuf(struct bnx2 *bp)
+{
+	u16 *good_mbuf;
+	u32 good_mbuf_cnt;
+	u32 val;
+
+	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
+	if (good_mbuf == NULL)
+		return -ENOMEM;
+
+	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
+
+	good_mbuf_cnt = 0;
+
+	/* Allocate a bunch of mbufs and save the good ones in an array. */
+	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
+	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
+		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
+				BNX2_RBUF_COMMAND_ALLOC_REQ);
+
+		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
+
+		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
+
+		/* The addresses with Bit 9 set are bad memory blocks. */
+		if (!(val & (1 << 9))) {
+			good_mbuf[good_mbuf_cnt] = (u16) val;
+			good_mbuf_cnt++;
+		}
+
+		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
+	}
+
+	/* Free the good ones back to the mbuf pool thus discarding
+	 * all the bad ones. */
+	while (good_mbuf_cnt) {
+		good_mbuf_cnt--;
+
+		val = good_mbuf[good_mbuf_cnt];
+		val = (val << 9) | val | 1;
+
+		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
+	}
+	kfree(good_mbuf);
+	return 0;
+}
+
+static void
+bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+{
+	u32 val;
+
+	val = (mac_addr[0] << 8) | mac_addr[1];
+
+	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
+
+	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		(mac_addr[4] << 8) | mac_addr[5];
+
+	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
+}
+
+static inline int
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+{
+	dma_addr_t mapping;
+	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+	struct bnx2_rx_bd *rxbd =
+		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
+	struct page *page = alloc_page(gfp);
+
+	if (!page)
+		return -ENOMEM;
+	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		__free_page(page);
+		return -EIO;
+	}
+
+	rx_pg->page = page;
+	dma_unmap_addr_set(rx_pg, mapping, mapping);
+	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+	return 0;
+}
+
+static void
+bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+{
+	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+	struct page *page = rx_pg->page;
+
+	if (!page)
+		return;
+
+	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
+		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+	__free_page(page);
+	rx_pg->page = NULL;
+}
+
+static inline int
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+{
+	u8 *data;
+	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
+	dma_addr_t mapping;
+	struct bnx2_rx_bd *rxbd =
+		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
+
+	data = kmalloc(bp->rx_buf_size, gfp);
+	if (!data)
+		return -ENOMEM;
+
+	mapping = dma_map_single(&bp->pdev->dev,
+				 get_l2_fhdr(data),
+				 bp->rx_buf_use_size,
+				 PCI_DMA_FROMDEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		kfree(data);
+		return -EIO;
+	}
+
+	rx_buf->data = data;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+
+	rxr->rx_prod_bseq += bp->rx_buf_use_size;
+
+	return 0;
+}
+
+static int
+bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
+{
+	struct status_block *sblk = bnapi->status_blk.msi;
+	u32 new_link_state, old_link_state;
+	int is_set = 1;
+
+	new_link_state = sblk->status_attn_bits & event;
+	old_link_state = sblk->status_attn_bits_ack & event;
+	if (new_link_state != old_link_state) {
+		if (new_link_state)
+			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
+		else
+			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
+	} else
+		is_set = 0;
+
+	return is_set;
+}
+
+static void
+bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	spin_lock(&bp->phy_lock);
+
+	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
+		bnx2_set_link(bp);
+	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
+		bnx2_set_remote_link(bp);
+
+	spin_unlock(&bp->phy_lock);
+
+}
+
+static inline u16
+bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
+{
+	u16 cons;
+
+	/* Tell compiler that status block fields can change. */
+	barrier();
+	cons = *bnapi->hw_tx_cons_ptr;
+	barrier();
+	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
+		cons++;
+	return cons;
+}
+
+static int
+bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+{
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	u16 hw_cons, sw_cons, sw_ring_cons;
+	int tx_pkt = 0, index;
+	unsigned int tx_bytes = 0;
+	struct netdev_queue *txq;
+
+	index = (bnapi - bp->bnx2_napi);
+	txq = netdev_get_tx_queue(bp->dev, index);
+
+	hw_cons = bnx2_get_hw_tx_cons(bnapi);
+	sw_cons = txr->tx_cons;
+
+	while (sw_cons != hw_cons) {
+		struct bnx2_sw_tx_bd *tx_buf;
+		struct sk_buff *skb;
+		int i, last;
+
+		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
+
+		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
+		skb = tx_buf->skb;
+
+		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+		prefetch(&skb->end);
+
+		/* partial BD completions possible with TSO packets */
+		if (tx_buf->is_gso) {
+			u16 last_idx, last_ring_idx;
+
+			last_idx = sw_cons + tx_buf->nr_frags + 1;
+			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
+			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
+				last_idx++;
+			}
+			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
+				break;
+			}
+		}
+
+		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			skb_headlen(skb), PCI_DMA_TODEVICE);
+
+		tx_buf->skb = NULL;
+		last = tx_buf->nr_frags;
+
+		for (i = 0; i < last; i++) {
+			struct bnx2_sw_tx_bd *tx_buf;
+
+			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
+
+			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
+			dma_unmap_page(&bp->pdev->dev,
+				dma_unmap_addr(tx_buf, mapping),
+				skb_frag_size(&skb_shinfo(skb)->frags[i]),
+				PCI_DMA_TODEVICE);
+		}
+
+		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
+
+		tx_bytes += skb->len;
+		dev_kfree_skb_any(skb);
+		tx_pkt++;
+		if (tx_pkt == budget)
+			break;
+
+		if (hw_cons == sw_cons)
+			hw_cons = bnx2_get_hw_tx_cons(bnapi);
+	}
+
+	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
+	txr->hw_tx_cons = hw_cons;
+	txr->tx_cons = sw_cons;
+
+	/* Need to make the tx_cons update visible to bnx2_start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that bnx2_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq)) &&
+		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+		__netif_tx_lock(txq, smp_processor_id());
+		if ((netif_tx_queue_stopped(txq)) &&
+		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
+			netif_tx_wake_queue(txq);
+		__netif_tx_unlock(txq);
+	}
+
+	return tx_pkt;
+}
+
+static void
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+			struct sk_buff *skb, int count)
+{
+	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
+	struct bnx2_rx_bd *cons_bd, *prod_bd;
+	int i;
+	u16 hw_prod, prod;
+	u16 cons = rxr->rx_pg_cons;
+
+	cons_rx_pg = &rxr->rx_pg_ring[cons];
+
+	/* The caller was unable to allocate a new page to replace the
+	 * last one in the frags array, so we need to recycle that page
+	 * and then free the skb.
+	 */
+	if (skb) {
+		struct page *page;
+		struct skb_shared_info *shinfo;
+
+		shinfo = skb_shinfo(skb);
+		shinfo->nr_frags--;
+		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
+		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
+
+		cons_rx_pg->page = page;
+		dev_kfree_skb(skb);
+	}
+
+	hw_prod = rxr->rx_pg_prod;
+
+	for (i = 0; i < count; i++) {
+		prod = BNX2_RX_PG_RING_IDX(hw_prod);
+
+		prod_rx_pg = &rxr->rx_pg_ring[prod];
+		cons_rx_pg = &rxr->rx_pg_ring[cons];
+		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
+						[BNX2_RX_IDX(cons)];
+		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
+						[BNX2_RX_IDX(prod)];
+
+		if (prod != cons) {
+			prod_rx_pg->page = cons_rx_pg->page;
+			cons_rx_pg->page = NULL;
+			dma_unmap_addr_set(prod_rx_pg, mapping,
+				dma_unmap_addr(cons_rx_pg, mapping));
+
+			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+
+		}
+		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
+		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
+	}
+	rxr->rx_pg_prod = hw_prod;
+	rxr->rx_pg_cons = cons;
+}
+
+static inline void
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+		   u8 *data, u16 cons, u16 prod)
+{
+	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
+	struct bnx2_rx_bd *cons_bd, *prod_bd;
+
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+
+	dma_sync_single_for_device(&bp->pdev->dev,
+		dma_unmap_addr(cons_rx_buf, mapping),
+		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+
+	rxr->rx_prod_bseq += bp->rx_buf_use_size;
+
+	prod_rx_buf->data = data;
+
+	if (cons == prod)
+		return;
+
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			dma_unmap_addr(cons_rx_buf, mapping));
+
+	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
+	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
+	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+}
+
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
+	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
+	    u32 ring_idx)
+{
+	int err;
+	u16 prod = ring_idx & 0xffff;
+	struct sk_buff *skb;
+
+	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+	if (unlikely(err)) {
+		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
+		if (hdr_len) {
+			unsigned int raw_len = len + 4;
+			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
+
+			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
+		}
+		return NULL;
+	}
+
+	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+			 PCI_DMA_FROMDEVICE);
+	skb = build_skb(data, 0);
+	if (!skb) {
+		kfree(data);
+		goto error;
+	}
+	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
+	if (hdr_len == 0) {
+		skb_put(skb, len);
+		return skb;
+	} else {
+		unsigned int i, frag_len, frag_size, pages;
+		struct bnx2_sw_pg *rx_pg;
+		u16 pg_cons = rxr->rx_pg_cons;
+		u16 pg_prod = rxr->rx_pg_prod;
+
+		frag_size = len + 4 - hdr_len;
+		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
+		skb_put(skb, hdr_len);
+
+		for (i = 0; i < pages; i++) {
+			dma_addr_t mapping_old;
+
+			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
+			if (unlikely(frag_len <= 4)) {
+				unsigned int tail = 4 - frag_len;
+
+				rxr->rx_pg_cons = pg_cons;
+				rxr->rx_pg_prod = pg_prod;
+				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
+							pages - i);
+				skb->len -= tail;
+				if (i == 0) {
+					skb->tail -= tail;
+				} else {
+					skb_frag_t *frag =
+						&skb_shinfo(skb)->frags[i - 1];
+					skb_frag_size_sub(frag, tail);
+					skb->data_len -= tail;
+				}
+				return skb;
+			}
+			rx_pg = &rxr->rx_pg_ring[pg_cons];
+
+			/* Don't unmap yet.  If we're unable to allocate a new
+			 * page, we need to recycle the page and the DMA addr.
+			 */
+			mapping_old = dma_unmap_addr(rx_pg, mapping);
+			if (i == pages - 1)
+				frag_len -= 4;
+
+			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
+			rx_pg->page = NULL;
+
+			err = bnx2_alloc_rx_page(bp, rxr,
+						 BNX2_RX_PG_RING_IDX(pg_prod),
+						 GFP_ATOMIC);
+			if (unlikely(err)) {
+				rxr->rx_pg_cons = pg_cons;
+				rxr->rx_pg_prod = pg_prod;
+				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
+							pages - i);
+				return NULL;
+			}
+
+			dma_unmap_page(&bp->pdev->dev, mapping_old,
+				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+			frag_size -= frag_len;
+			skb->data_len += frag_len;
+			skb->truesize += PAGE_SIZE;
+			skb->len += frag_len;
+
+			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
+			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
+		}
+		rxr->rx_pg_prod = pg_prod;
+		rxr->rx_pg_cons = pg_cons;
+	}
+	return skb;
+}
+
+static inline u16
+bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
+{
+	u16 cons;
+
+	/* Tell compiler that status block fields can change. */
+	barrier();
+	cons = *bnapi->hw_rx_cons_ptr;
+	barrier();
+	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
+		cons++;
+	return cons;
+}
+
+static int
+bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+{
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
+	struct l2_fhdr *rx_hdr;
+	int rx_pkt = 0, pg_ring_used = 0;
+
+	if (budget <= 0)
+		return rx_pkt;
+
+	hw_cons = bnx2_get_hw_rx_cons(bnapi);
+	sw_cons = rxr->rx_cons;
+	sw_prod = rxr->rx_prod;
+
+	/* Memory barrier necessary as speculative reads of the rx
+	 * buffer can be ahead of the index in the status block
+	 */
+	rmb();
+	while (sw_cons != hw_cons) {
+		unsigned int len, hdr_len;
+		u32 status;
+		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
+		struct sk_buff *skb;
+		dma_addr_t dma_addr;
+		u8 *data;
+		u16 next_ring_idx;
+
+		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
+		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
+
+		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
+		data = rx_buf->data;
+		rx_buf->data = NULL;
+
+		rx_hdr = get_l2_fhdr(data);
+		prefetch(rx_hdr);
+
+		dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
+			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
+			PCI_DMA_FROMDEVICE);
+
+		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
+		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
+		prefetch(get_l2_fhdr(next_rx_buf->data));
+
+		len = rx_hdr->l2_fhdr_pkt_len;
+		status = rx_hdr->l2_fhdr_status;
+
+		hdr_len = 0;
+		if (status & L2_FHDR_STATUS_SPLIT) {
+			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
+			pg_ring_used = 1;
+		} else if (len > bp->rx_jumbo_thresh) {
+			hdr_len = bp->rx_jumbo_thresh;
+			pg_ring_used = 1;
+		}
+
+		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
+				       L2_FHDR_ERRORS_PHY_DECODE |
+				       L2_FHDR_ERRORS_ALIGNMENT |
+				       L2_FHDR_ERRORS_TOO_SHORT |
+				       L2_FHDR_ERRORS_GIANT_FRAME))) {
+
+			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
+					  sw_ring_prod);
+			if (pg_ring_used) {
+				int pages;
+
+				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
+
+				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
+			}
+			goto next_rx;
+		}
+
+		len -= 4;
+
+		if (len <= bp->rx_copy_thresh) {
+			skb = netdev_alloc_skb(bp->dev, len + 6);
+			if (skb == NULL) {
+				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
+						  sw_ring_prod);
+				goto next_rx;
+			}
+
+			/* aligned copy */
+			memcpy(skb->data,
+			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+			       len + 6);
+			skb_reserve(skb, 6);
+			skb_put(skb, len);
+
+			bnx2_reuse_rx_data(bp, rxr, data,
+				sw_ring_cons, sw_ring_prod);
+
+		} else {
+			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+					  (sw_ring_cons << 16) | sw_ring_prod);
+			if (!skb)
+				goto next_rx;
+		}
+		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
+
+		skb->protocol = eth_type_trans(skb, bp->dev);
+
+		if (len > (bp->dev->mtu + ETH_HLEN) &&
+		    skb->protocol != htons(0x8100) &&
+		    skb->protocol != htons(ETH_P_8021AD)) {
+
+			dev_kfree_skb(skb);
+			goto next_rx;
+
+		}
+
+		skb_checksum_none_assert(skb);
+		if ((bp->dev->features & NETIF_F_RXCSUM) &&
+			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
+			L2_FHDR_STATUS_UDP_DATAGRAM))) {
+
+			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
+					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+		}
+		if ((bp->dev->features & NETIF_F_RXHASH) &&
+		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
+		     L2_FHDR_STATUS_USE_RXHASH))
+			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
+				     PKT_HASH_TYPE_L3);
+
+		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
+		napi_gro_receive(&bnapi->napi, skb);
+		rx_pkt++;
+
+next_rx:
+		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
+		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
+
+		if ((rx_pkt == budget))
+			break;
+
+		/* Refresh hw_cons to see if there is new work */
+		if (sw_cons == hw_cons) {
+			hw_cons = bnx2_get_hw_rx_cons(bnapi);
+			rmb();
+		}
+	}
+	rxr->rx_cons = sw_cons;
+	rxr->rx_prod = sw_prod;
+
+	if (pg_ring_used)
+		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+
+	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
+
+	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+
+	mmiowb();
+
+	return rx_pkt;
+
+}
+
+/* MSI ISR - The only difference between this and the INTx ISR
+ * is that the MSI interrupt is always serviced.
+ */
+static irqreturn_t
+bnx2_msi(int irq, void *dev_instance)
+{
+	struct bnx2_napi *bnapi = dev_instance;
+	struct bnx2 *bp = bnapi->bp;
+
+	prefetch(bnapi->status_blk.msi);
+	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+	/* Return here if interrupt is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	napi_schedule(&bnapi->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnx2_msi_1shot(int irq, void *dev_instance)
+{
+	struct bnx2_napi *bnapi = dev_instance;
+	struct bnx2 *bp = bnapi->bp;
+
+	prefetch(bnapi->status_blk.msi);
+
+	/* Return here if interrupt is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	napi_schedule(&bnapi->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnx2_interrupt(int irq, void *dev_instance)
+{
+	struct bnx2_napi *bnapi = dev_instance;
+	struct bnx2 *bp = bnapi->bp;
+	struct status_block *sblk = bnapi->status_blk.msi;
+
+	/* When using INTx, it is possible for the interrupt to arrive
+	 * at the CPU before the status block posted prior to the
+	 * interrupt. Reading a register will flush the status block.
+	 * When using MSI, the MSI message will always complete after
+	 * the status block write.
+	 */
+	if ((sblk->status_idx == bnapi->last_status_idx) &&
+	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
+	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
+		return IRQ_NONE;
+
+	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+	/* Read back to deassert IRQ immediately to avoid too many
+	 * spurious interrupts.
+	 */
+	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+
+	/* Return here if interrupt is shared and is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	if (napi_schedule_prep(&bnapi->napi)) {
+		bnapi->last_status_idx = sblk->status_idx;
+		__napi_schedule(&bnapi->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static inline int
+bnx2_has_fast_work(struct bnx2_napi *bnapi)
+{
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
+	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+		return 1;
+	return 0;
+}
+
+#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
+				 STATUS_ATTN_BITS_TIMER_ABORT)
+
+static inline int
+bnx2_has_work(struct bnx2_napi *bnapi)
+{
+	struct status_block *sblk = bnapi->status_blk.msi;
+
+	if (bnx2_has_fast_work(bnapi))
+		return 1;
+
+#ifdef BCM_CNIC
+	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+		return 1;
+#endif
+
+	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
+	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
+		return 1;
+
+	return 0;
+}
+
+static void
+bnx2_chk_missed_msi(struct bnx2 *bp)
+{
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	u32 msi_ctrl;
+
+	if (bnx2_has_work(bnapi)) {
+		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
+		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
+			return;
+
+		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
+			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
+				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
+			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
+			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
+		}
+	}
+
+	bp->idle_chk_status_idx = bnapi->last_status_idx;
+}
+
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	struct cnic_ops *c_ops;
+
+	if (!bnapi->cnic_present)
+		return;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+						      bnapi->status_blk.msi);
+	rcu_read_unlock();
+}
+#endif
+
+static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	struct status_block *sblk = bnapi->status_blk.msi;
+	u32 status_attn_bits = sblk->status_attn_bits;
+	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
+
+	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
+	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
+
+		bnx2_phy_int(bp, bnapi);
+
+		/* This is needed to take care of transient status
+		 * during link changes.
+		 */
+		BNX2_WR(bp, BNX2_HC_COMMAND,
+			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+		BNX2_RD(bp, BNX2_HC_COMMAND);
+	}
+}
+
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+			  int work_done, int budget)
+{
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
+		bnx2_tx_int(bp, bnapi, 0);
+
+	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
+		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
+
+	return work_done;
+}
+
+static int bnx2_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+	struct bnx2 *bp = bnapi->bp;
+	int work_done = 0;
+	struct status_block_msix *sblk = bnapi->status_blk.msix;
+
+	while (1) {
+		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+		if (unlikely(work_done >= budget))
+			break;
+
+		bnapi->last_status_idx = sblk->status_idx;
+		/* status idx must be read before checking for more work. */
+		rmb();
+		if (likely(!bnx2_has_fast_work(bnapi))) {
+
+			napi_complete(napi);
+			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+				bnapi->last_status_idx);
+			break;
+		}
+	}
+	return work_done;
+}
+
+static int bnx2_poll(struct napi_struct *napi, int budget)
+{
+	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+	struct bnx2 *bp = bnapi->bp;
+	int work_done = 0;
+	struct status_block *sblk = bnapi->status_blk.msi;
+
+	while (1) {
+		bnx2_poll_link(bp, bnapi);
+
+		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+
+#ifdef BCM_CNIC
+		bnx2_poll_cnic(bp, bnapi);
+#endif
+
+		/* bnapi->last_status_idx is used below to tell the hw how
+		 * much work has been processed, so we must read it before
+		 * checking for more work.
+		 */
+		bnapi->last_status_idx = sblk->status_idx;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		rmb();
+		if (likely(!bnx2_has_work(bnapi))) {
+			napi_complete(napi);
+			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
+				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+					bnapi->last_status_idx);
+				break;
+			}
+			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+				bnapi->last_status_idx);
+
+			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+				bnapi->last_status_idx);
+			break;
+		}
+	}
+
+	return work_done;
+}
+
+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
+ * from set_multicast.
+ */
+static void
+bnx2_set_rx_mode(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u32 rx_mode, sort_mode;
+	struct netdev_hw_addr *ha;
+	int i;
+
+	if (!netif_running(dev))
+		return;
+
+	spin_lock_bh(&bp->phy_lock);
+
+	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
+				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
+	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
+	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
+	if (dev->flags & IFF_PROMISC) {
+		/* Promiscuous mode. */
+		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+			     BNX2_RPM_SORT_USER0_PROM_VLAN;
+	}
+	else if (dev->flags & IFF_ALLMULTI) {
+		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+				0xffffffff);
+        	}
+		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
+	}
+	else {
+		/* Accept one or more multicast(s). */
+		u32 mc_filter[NUM_MC_HASH_REGISTERS];
+		u32 regidx;
+		u32 bit;
+		u32 crc;
+
+		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
+
+		netdev_for_each_mc_addr(ha, dev) {
+			crc = ether_crc_le(ETH_ALEN, ha->addr);
+			bit = crc & 0xff;
+			regidx = (bit & 0xe0) >> 5;
+			bit &= 0x1f;
+			mc_filter[regidx] |= (1 << bit);
+		}
+
+		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+				mc_filter[i]);
+		}
+
+		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
+	}
+
+	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
+		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+			     BNX2_RPM_SORT_USER0_PROM_VLAN;
+	} else if (!(dev->flags & IFF_PROMISC)) {
+		/* Add all entries into to the match filter list */
+		i = 0;
+		netdev_for_each_uc_addr(ha, dev) {
+			bnx2_set_mac_addr(bp, ha->addr,
+					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
+			sort_mode |= (1 <<
+				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
+			i++;
+		}
+
+	}
+
+	if (rx_mode != bp->rx_mode) {
+		bp->rx_mode = rx_mode;
+		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
+	}
+
+	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
+	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
+
+	spin_unlock_bh(&bp->phy_lock);
+}
+
+static int
+check_fw_section(const struct firmware *fw,
+		 const struct bnx2_fw_file_section *section,
+		 u32 alignment, bool non_empty)
+{
+	u32 offset = be32_to_cpu(section->offset);
+	u32 len = be32_to_cpu(section->len);
+
+	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
+		return -EINVAL;
+	if ((non_empty && len == 0) || len > fw->size - offset ||
+	    len & (alignment - 1))
+		return -EINVAL;
+	return 0;
+}
+
+static int
+check_mips_fw_entry(const struct firmware *fw,
+		    const struct bnx2_mips_fw_file_entry *entry)
+{
+	if (check_fw_section(fw, &entry->text, 4, true) ||
+	    check_fw_section(fw, &entry->data, 4, false) ||
+	    check_fw_section(fw, &entry->rodata, 4, false))
+		return -EINVAL;
+	return 0;
+}
+
+static void bnx2_release_firmware(struct bnx2 *bp)
+{
+	if (bp->rv2p_firmware) {
+		release_firmware(bp->mips_firmware);
+		release_firmware(bp->rv2p_firmware);
+		bp->rv2p_firmware = NULL;
+	}
+}
+
+static int bnx2_request_uncached_firmware(struct bnx2 *bp)
+{
+	const char *mips_fw_file, *rv2p_fw_file;
+	const struct bnx2_mips_fw_file *mips_fw;
+	const struct bnx2_rv2p_fw_file *rv2p_fw;
+	int rc;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		mips_fw_file = FW_MIPS_FILE_09;
+		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
+		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
+			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
+		else
+			rv2p_fw_file = FW_RV2P_FILE_09;
+	} else {
+		mips_fw_file = FW_MIPS_FILE_06;
+		rv2p_fw_file = FW_RV2P_FILE_06;
+	}
+
+	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
+	if (rc) {
+		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
+		goto out;
+	}
+
+	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
+	if (rc) {
+		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
+		goto err_release_mips_firmware;
+	}
+	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
+	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
+	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
+		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
+		rc = -EINVAL;
+		goto err_release_firmware;
+	}
+	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
+	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
+	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
+		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
+		rc = -EINVAL;
+		goto err_release_firmware;
+	}
+out:
+	return rc;
+
+err_release_firmware:
+	release_firmware(bp->rv2p_firmware);
+	bp->rv2p_firmware = NULL;
+err_release_mips_firmware:
+	release_firmware(bp->mips_firmware);
+	goto out;
+}
+
+static int bnx2_request_firmware(struct bnx2 *bp)
+{
+	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
+}
+
+static u32
+rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
+{
+	switch (idx) {
+	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
+		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
+		rv2p_code |= RV2P_BD_PAGE_SIZE;
+		break;
+	}
+	return rv2p_code;
+}
+
+static int
+load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
+	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
+{
+	u32 rv2p_code_len, file_offset;
+	__be32 *rv2p_code;
+	int i;
+	u32 val, cmd, addr;
+
+	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
+	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
+
+	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
+
+	if (rv2p_proc == RV2P_PROC1) {
+		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
+		addr = BNX2_RV2P_PROC1_ADDR_CMD;
+	} else {
+		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
+		addr = BNX2_RV2P_PROC2_ADDR_CMD;
+	}
+
+	for (i = 0; i < rv2p_code_len; i += 8) {
+		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
+		rv2p_code++;
+		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
+		rv2p_code++;
+
+		val = (i / 8) | cmd;
+		BNX2_WR(bp, addr, val);
+	}
+
+	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
+	for (i = 0; i < 8; i++) {
+		u32 loc, code;
+
+		loc = be32_to_cpu(fw_entry->fixup[i]);
+		if (loc && ((loc * 4) < rv2p_code_len)) {
+			code = be32_to_cpu(*(rv2p_code + loc - 1));
+			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
+			code = be32_to_cpu(*(rv2p_code + loc));
+			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
+			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
+
+			val = (loc / 2) | cmd;
+			BNX2_WR(bp, addr, val);
+		}
+	}
+
+	/* Reset the processor, un-stall is done later. */
+	if (rv2p_proc == RV2P_PROC1) {
+		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
+	}
+	else {
+		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
+	}
+
+	return 0;
+}
+
+static int
+load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
+	    const struct bnx2_mips_fw_file_entry *fw_entry)
+{
+	u32 addr, len, file_offset;
+	__be32 *data;
+	u32 offset;
+	u32 val;
+
+	/* Halt the CPU. */
+	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
+	val |= cpu_reg->mode_value_halt;
+	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
+	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
+
+	/* Load the Text area. */
+	addr = be32_to_cpu(fw_entry->text.addr);
+	len = be32_to_cpu(fw_entry->text.len);
+	file_offset = be32_to_cpu(fw_entry->text.offset);
+	data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+	if (len) {
+		int j;
+
+		for (j = 0; j < (len / 4); j++, offset += 4)
+			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+	}
+
+	/* Load the Data area. */
+	addr = be32_to_cpu(fw_entry->data.addr);
+	len = be32_to_cpu(fw_entry->data.len);
+	file_offset = be32_to_cpu(fw_entry->data.offset);
+	data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+	if (len) {
+		int j;
+
+		for (j = 0; j < (len / 4); j++, offset += 4)
+			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+	}
+
+	/* Load the Read-Only area. */
+	addr = be32_to_cpu(fw_entry->rodata.addr);
+	len = be32_to_cpu(fw_entry->rodata.len);
+	file_offset = be32_to_cpu(fw_entry->rodata.offset);
+	data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+	if (len) {
+		int j;
+
+		for (j = 0; j < (len / 4); j++, offset += 4)
+			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+	}
+
+	/* Clear the pre-fetch instruction. */
+	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
+
+	val = be32_to_cpu(fw_entry->start_addr);
+	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
+
+	/* Start the CPU. */
+	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
+	val &= ~cpu_reg->mode_value_halt;
+	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
+	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
+
+	return 0;
+}
+
+static int
+bnx2_init_cpus(struct bnx2 *bp)
+{
+	const struct bnx2_mips_fw_file *mips_fw =
+		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
+	const struct bnx2_rv2p_fw_file *rv2p_fw =
+		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
+	int rc;
+
+	/* Initialize the RV2P processor. */
+	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
+	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
+
+	/* Initialize the RX Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the TX Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the TX Patch-up Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the Completion Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the Command Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
+
+init_cpu_err:
+	return rc;
+}
+
+static void
+bnx2_setup_wol(struct bnx2 *bp)
+{
+	int i;
+	u32 val, wol_msg;
+
+	if (bp->wol) {
+		u32 advertising;
+		u8 autoneg;
+
+		autoneg = bp->autoneg;
+		advertising = bp->advertising;
+
+		if (bp->phy_port == PORT_TP) {
+			bp->autoneg = AUTONEG_SPEED;
+			bp->advertising = ADVERTISED_10baseT_Half |
+				ADVERTISED_10baseT_Full |
+				ADVERTISED_100baseT_Half |
+				ADVERTISED_100baseT_Full |
+				ADVERTISED_Autoneg;
+		}
+
+		spin_lock_bh(&bp->phy_lock);
+		bnx2_setup_phy(bp, bp->phy_port);
+		spin_unlock_bh(&bp->phy_lock);
+
+		bp->autoneg = autoneg;
+		bp->advertising = advertising;
+
+		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+		val = BNX2_RD(bp, BNX2_EMAC_MODE);
+
+		/* Enable port mode. */
+		val &= ~BNX2_EMAC_MODE_PORT;
+		val |= BNX2_EMAC_MODE_MPKT_RCVD |
+		       BNX2_EMAC_MODE_ACPI_RCVD |
+		       BNX2_EMAC_MODE_MPKT;
+		if (bp->phy_port == PORT_TP) {
+			val |= BNX2_EMAC_MODE_PORT_MII;
+		} else {
+			val |= BNX2_EMAC_MODE_PORT_GMII;
+			if (bp->line_speed == SPEED_2500)
+				val |= BNX2_EMAC_MODE_25G_MODE;
+		}
+
+		BNX2_WR(bp, BNX2_EMAC_MODE, val);
+
+		/* receive all multicast */
+		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+				0xffffffff);
+		}
+		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
+
+		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
+		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
+		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
+
+		/* Need to enable EMAC and RPM for WOL. */
+		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+
+		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+
+		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+	} else {
+			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+	}
+
+	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
+		u32 val;
+
+		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
+		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
+			bnx2_fw_sync(bp, wol_msg, 1, 0);
+			return;
+		}
+		/* Tell firmware not to power down the PHY yet, otherwise
+		 * the chip will take a long time to respond to MMIO reads.
+		 */
+		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
+			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
+		bnx2_fw_sync(bp, wol_msg, 1, 0);
+		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
+	}
+
+}
+
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+	switch (state) {
+	case PCI_D0: {
+		u32 val;
+
+		pci_enable_wake(bp->pdev, PCI_D0, false);
+		pci_set_power_state(bp->pdev, PCI_D0);
+
+		val = BNX2_RD(bp, BNX2_EMAC_MODE);
+		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+		val &= ~BNX2_EMAC_MODE_MPKT;
+		BNX2_WR(bp, BNX2_EMAC_MODE, val);
+
+		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+		break;
+	}
+	case PCI_D3hot: {
+		bnx2_setup_wol(bp);
+		pci_wake_from_d3(bp->pdev, bp->wol);
+		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
+		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
+
+			if (bp->wol)
+				pci_set_power_state(bp->pdev, PCI_D3hot);
+			break;
+
+		}
+		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+			u32 val;
+
+			/* Tell firmware not to power down the PHY yet,
+			 * otherwise the other port may not respond to
+			 * MMIO reads.
+			 */
+			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+			val &= ~BNX2_CONDITION_PM_STATE_MASK;
+			val |= BNX2_CONDITION_PM_STATE_UNPREP;
+			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+		}
+		pci_set_power_state(bp->pdev, PCI_D3hot);
+
+		/* No more memory access after this point until
+		 * device is brought back to D0.
+		 */
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+bnx2_acquire_nvram_lock(struct bnx2 *bp)
+{
+	u32 val;
+	int j;
+
+	/* Request access to the flash interface. */
+	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
+		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
+			break;
+
+		udelay(5);
+	}
+
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int
+bnx2_release_nvram_lock(struct bnx2 *bp)
+{
+	int j;
+	u32 val;
+
+	/* Relinquish nvram interface. */
+	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
+
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
+		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
+			break;
+
+		udelay(5);
+	}
+
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+
+static int
+bnx2_enable_nvram_write(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = BNX2_RD(bp, BNX2_MISC_CFG);
+	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
+
+	if (bp->flash_info->flags & BNX2_NV_WREN) {
+		int j;
+
+		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+		BNX2_WR(bp, BNX2_NVM_COMMAND,
+			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
+
+		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+			udelay(5);
+
+			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
+			if (val & BNX2_NVM_COMMAND_DONE)
+				break;
+		}
+
+		if (j >= NVRAM_TIMEOUT_COUNT)
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static void
+bnx2_disable_nvram_write(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = BNX2_RD(bp, BNX2_MISC_CFG);
+	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
+}
+
+
+static void
+bnx2_enable_nvram_access(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+	/* Enable both bits, even on read. */
+	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
+}
+
+static void
+bnx2_disable_nvram_access(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+	/* Disable both bits, even after read. */
+	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
+			BNX2_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static int
+bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
+{
+	u32 cmd;
+	int j;
+
+	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
+		/* Buffered flash, no erase needed */
+		return 0;
+
+	/* Build an erase command */
+	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
+	      BNX2_NVM_COMMAND_DOIT;
+
+	/* Need to clear DONE bit separately. */
+	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+	/* Address of the NVRAM to read from. */
+	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+	/* Issue an erase command. */
+	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+	/* Wait for completion. */
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		u32 val;
+
+		udelay(5);
+
+		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
+		if (val & BNX2_NVM_COMMAND_DONE)
+			break;
+	}
+
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int
+bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
+{
+	u32 cmd;
+	int j;
+
+	/* Build the command word. */
+	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
+
+	/* Calculate an offset of a buffered flash, not needed for 5709. */
+	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
+		offset = ((offset / bp->flash_info->page_size) <<
+			   bp->flash_info->page_bits) +
+			  (offset % bp->flash_info->page_size);
+	}
+
+	/* Need to clear DONE bit separately. */
+	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+	/* Address of the NVRAM to read from. */
+	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+	/* Issue a read command. */
+	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+	/* Wait for completion. */
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		u32 val;
+
+		udelay(5);
+
+		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
+		if (val & BNX2_NVM_COMMAND_DONE) {
+			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
+			memcpy(ret_val, &v, 4);
+			break;
+		}
+	}
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+
+static int
+bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
+{
+	u32 cmd;
+	__be32 val32;
+	int j;
+
+	/* Build the command word. */
+	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
+
+	/* Calculate an offset of a buffered flash, not needed for 5709. */
+	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
+		offset = ((offset / bp->flash_info->page_size) <<
+			  bp->flash_info->page_bits) +
+			 (offset % bp->flash_info->page_size);
+	}
+
+	/* Need to clear DONE bit separately. */
+	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+	memcpy(&val32, val, 4);
+
+	/* Write the data. */
+	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
+
+	/* Address of the NVRAM to write to. */
+	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+	/* Issue the write command. */
+	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+	/* Wait for completion. */
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		udelay(5);
+
+		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
+			break;
+	}
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int
+bnx2_init_nvram(struct bnx2 *bp)
+{
+	u32 val;
+	int j, entry_count, rc = 0;
+	const struct flash_spec *flash;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		bp->flash_info = &flash_5709;
+		goto get_flash_size;
+	}
+
+	/* Determine the selected interface. */
+	val = BNX2_RD(bp, BNX2_NVM_CFG1);
+
+	entry_count = ARRAY_SIZE(flash_table);
+
+	if (val & 0x40000000) {
+
+		/* Flash interface has been reconfigured */
+		for (j = 0, flash = &flash_table[0]; j < entry_count;
+		     j++, flash++) {
+			if ((val & FLASH_BACKUP_STRAP_MASK) ==
+			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
+				bp->flash_info = flash;
+				break;
+			}
+		}
+	}
+	else {
+		u32 mask;
+		/* Not yet been reconfigured */
+
+		if (val & (1 << 23))
+			mask = FLASH_BACKUP_STRAP_MASK;
+		else
+			mask = FLASH_STRAP_MASK;
+
+		for (j = 0, flash = &flash_table[0]; j < entry_count;
+			j++, flash++) {
+
+			if ((val & mask) == (flash->strapping & mask)) {
+				bp->flash_info = flash;
+
+				/* Request access to the flash interface. */
+				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+					return rc;
+
+				/* Enable access to flash interface */
+				bnx2_enable_nvram_access(bp);
+
+				/* Reconfigure the flash interface */
+				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
+				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
+				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
+				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
+
+				/* Disable access to flash interface */
+				bnx2_disable_nvram_access(bp);
+				bnx2_release_nvram_lock(bp);
+
+				break;
+			}
+		}
+	} /* if (val & 0x40000000) */
+
+	if (j == entry_count) {
+		bp->flash_info = NULL;
+		pr_alert("Unknown flash/EEPROM type\n");
+		return -ENODEV;
+	}
+
+get_flash_size:
+	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
+	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
+	if (val)
+		bp->flash_size = val;
+	else
+		bp->flash_size = bp->flash_info->total_size;
+
+	return rc;
+}
+
+static int
+bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
+		int buf_size)
+{
+	int rc = 0;
+	u32 cmd_flags, offset32, len32, extra;
+
+	if (buf_size == 0)
+		return 0;
+
+	/* Request access to the flash interface. */
+	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+		return rc;
+
+	/* Enable access to flash interface */
+	bnx2_enable_nvram_access(bp);
+
+	len32 = buf_size;
+	offset32 = offset;
+	extra = 0;
+
+	cmd_flags = 0;
+
+	if (offset32 & 3) {
+		u8 buf[4];
+		u32 pre_len;
+
+		offset32 &= ~3;
+		pre_len = 4 - (offset & 3);
+
+		if (pre_len >= len32) {
+			pre_len = len32;
+			cmd_flags = BNX2_NVM_COMMAND_FIRST |
+				    BNX2_NVM_COMMAND_LAST;
+		}
+		else {
+			cmd_flags = BNX2_NVM_COMMAND_FIRST;
+		}
+
+		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+		if (rc)
+			return rc;
+
+		memcpy(ret_buf, buf + (offset & 3), pre_len);
+
+		offset32 += 4;
+		ret_buf += pre_len;
+		len32 -= pre_len;
+	}
+	if (len32 & 3) {
+		extra = 4 - (len32 & 3);
+		len32 = (len32 + 4) & ~3;
+	}
+
+	if (len32 == 4) {
+		u8 buf[4];
+
+		if (cmd_flags)
+			cmd_flags = BNX2_NVM_COMMAND_LAST;
+		else
+			cmd_flags = BNX2_NVM_COMMAND_FIRST |
+				    BNX2_NVM_COMMAND_LAST;
+
+		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+		memcpy(ret_buf, buf, 4 - extra);
+	}
+	else if (len32 > 0) {
+		u8 buf[4];
+
+		/* Read the first word. */
+		if (cmd_flags)
+			cmd_flags = 0;
+		else
+			cmd_flags = BNX2_NVM_COMMAND_FIRST;
+
+		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
+
+		/* Advance to the next dword. */
+		offset32 += 4;
+		ret_buf += 4;
+		len32 -= 4;
+
+		while (len32 > 4 && rc == 0) {
+			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
+
+			/* Advance to the next dword. */
+			offset32 += 4;
+			ret_buf += 4;
+			len32 -= 4;
+		}
+
+		if (rc)
+			return rc;
+
+		cmd_flags = BNX2_NVM_COMMAND_LAST;
+		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+		memcpy(ret_buf, buf, 4 - extra);
+	}
+
+	/* Disable access to flash interface */
+	bnx2_disable_nvram_access(bp);
+
+	bnx2_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int
+bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
+		int buf_size)
+{
+	u32 written, offset32, len32;
+	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
+	int rc = 0;
+	int align_start, align_end;
+
+	buf = data_buf;
+	offset32 = offset;
+	len32 = buf_size;
+	align_start = align_end = 0;
+
+	if ((align_start = (offset32 & 3))) {
+		offset32 &= ~3;
+		len32 += align_start;
+		if (len32 < 4)
+			len32 = 4;
+		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
+			return rc;
+	}
+
+	if (len32 & 3) {
+		align_end = 4 - (len32 & 3);
+		len32 += align_end;
+		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
+			return rc;
+	}
+
+	if (align_start || align_end) {
+		align_buf = kmalloc(len32, GFP_KERNEL);
+		if (align_buf == NULL)
+			return -ENOMEM;
+		if (align_start) {
+			memcpy(align_buf, start, 4);
+		}
+		if (align_end) {
+			memcpy(align_buf + len32 - 4, end, 4);
+		}
+		memcpy(align_buf + align_start, data_buf, buf_size);
+		buf = align_buf;
+	}
+
+	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+		flash_buffer = kmalloc(264, GFP_KERNEL);
+		if (flash_buffer == NULL) {
+			rc = -ENOMEM;
+			goto nvram_write_end;
+		}
+	}
+
+	written = 0;
+	while ((written < len32) && (rc == 0)) {
+		u32 page_start, page_end, data_start, data_end;
+		u32 addr, cmd_flags;
+		int i;
+
+	        /* Find the page_start addr */
+		page_start = offset32 + written;
+		page_start -= (page_start % bp->flash_info->page_size);
+		/* Find the page_end addr */
+		page_end = page_start + bp->flash_info->page_size;
+		/* Find the data_start addr */
+		data_start = (written == 0) ? offset32 : page_start;
+		/* Find the data_end addr */
+		data_end = (page_end > offset32 + len32) ?
+			(offset32 + len32) : page_end;
+
+		/* Request access to the flash interface. */
+		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+			goto nvram_write_end;
+
+		/* Enable access to flash interface */
+		bnx2_enable_nvram_access(bp);
+
+		cmd_flags = BNX2_NVM_COMMAND_FIRST;
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+			int j;
+
+			/* Read the whole page into the buffer
+			 * (non-buffer flash only) */
+			for (j = 0; j < bp->flash_info->page_size; j += 4) {
+				if (j == (bp->flash_info->page_size - 4)) {
+					cmd_flags |= BNX2_NVM_COMMAND_LAST;
+				}
+				rc = bnx2_nvram_read_dword(bp,
+					page_start + j,
+					&flash_buffer[j],
+					cmd_flags);
+
+				if (rc)
+					goto nvram_write_end;
+
+				cmd_flags = 0;
+			}
+		}
+
+		/* Enable writes to flash interface (unlock write-protect) */
+		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
+			goto nvram_write_end;
+
+		/* Loop to write back the buffer data from page_start to
+		 * data_start */
+		i = 0;
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+			/* Erase the page */
+			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
+				goto nvram_write_end;
+
+			/* Re-enable the write again for the actual write */
+			bnx2_enable_nvram_write(bp);
+
+			for (addr = page_start; addr < data_start;
+				addr += 4, i += 4) {
+
+				rc = bnx2_nvram_write_dword(bp, addr,
+					&flash_buffer[i], cmd_flags);
+
+				if (rc != 0)
+					goto nvram_write_end;
+
+				cmd_flags = 0;
+			}
+		}
+
+		/* Loop to write the new data from data_start to data_end */
+		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
+			if ((addr == page_end - 4) ||
+				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
+				 (addr == data_end - 4))) {
+
+				cmd_flags |= BNX2_NVM_COMMAND_LAST;
+			}
+			rc = bnx2_nvram_write_dword(bp, addr, buf,
+				cmd_flags);
+
+			if (rc != 0)
+				goto nvram_write_end;
+
+			cmd_flags = 0;
+			buf += 4;
+		}
+
+		/* Loop to write back the buffer data from data_end
+		 * to page_end */
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+			for (addr = data_end; addr < page_end;
+				addr += 4, i += 4) {
+
+				if (addr == page_end-4) {
+					cmd_flags = BNX2_NVM_COMMAND_LAST;
+                		}
+				rc = bnx2_nvram_write_dword(bp, addr,
+					&flash_buffer[i], cmd_flags);
+
+				if (rc != 0)
+					goto nvram_write_end;
+
+				cmd_flags = 0;
+			}
+		}
+
+		/* Disable writes to flash interface (lock write-protect) */
+		bnx2_disable_nvram_write(bp);
+
+		/* Disable access to flash interface */
+		bnx2_disable_nvram_access(bp);
+		bnx2_release_nvram_lock(bp);
+
+		/* Increment written */
+		written += data_end - data_start;
+	}
+
+nvram_write_end:
+	kfree(flash_buffer);
+	kfree(align_buf);
+	return rc;
+}
+
+static void
+bnx2_init_fw_cap(struct bnx2 *bp)
+{
+	u32 val, sig = 0;
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
+	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
+
+	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+
+	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
+	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
+		return;
+
+	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
+		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
+	}
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
+		u32 link;
+
+		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
+
+		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+		if (link & BNX2_LINK_STATUS_SERDES_LINK)
+			bp->phy_port = PORT_FIBRE;
+		else
+			bp->phy_port = PORT_TP;
+
+		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
+		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
+	}
+
+	if (netif_running(bp->dev) && sig)
+		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
+}
+
+static void
+bnx2_setup_msix_tbl(struct bnx2 *bp)
+{
+	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
+
+	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
+	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
+}
+
+static int
+bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+{
+	u32 val;
+	int i, rc = 0;
+	u8 old_port;
+
+	/* Wait for the current PCI transaction to complete before
+	 * issuing a reset. */
+	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
+	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
+		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+		udelay(5);
+	} else {  /* 5709 */
+		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+		for (i = 0; i < 100; i++) {
+			msleep(1);
+			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+				break;
+		}
+	}
+
+	/* Wait for the firmware to tell us it is ok to issue a reset. */
+	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
+
+	/* Deposit a driver reset signature so the firmware knows that
+	 * this is a soft reset. */
+	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
+		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
+
+	/* Do a dummy read to force the chip to complete all current transaction
+	 * before we issue a reset. */
+	val = BNX2_RD(bp, BNX2_MISC_ID);
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+		BNX2_RD(bp, BNX2_MISC_COMMAND);
+		udelay(5);
+
+		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+	} else {
+		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+		/* Chip reset. */
+		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+		/* Reading back any register after chip reset will hang the
+		 * bus on 5706 A0 and A1.  The msleep below provides plenty
+		 * of margin for write posting.
+		 */
+		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
+		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
+			msleep(20);
+
+		/* Reset takes approximate 30 usec */
+		for (i = 0; i < 10; i++) {
+			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
+				break;
+			udelay(10);
+		}
+
+		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
+			pr_err("Chip reset did not complete\n");
+			return -EBUSY;
+		}
+	}
+
+	/* Make sure byte swapping is properly configured. */
+	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
+	if (val != 0x01020304) {
+		pr_err("Chip not in correct endian mode\n");
+		return -ENODEV;
+	}
+
+	/* Wait for the firmware to finish its initialization. */
+	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
+	if (rc)
+		return rc;
+
+	spin_lock_bh(&bp->phy_lock);
+	old_port = bp->phy_port;
+	bnx2_init_fw_cap(bp);
+	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
+	    old_port != bp->phy_port)
+		bnx2_set_default_remote_link(bp);
+	spin_unlock_bh(&bp->phy_lock);
+
+	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+		/* Adjust the voltage regular to two steps lower.  The default
+		 * of this register is 0x0000000e. */
+		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
+
+		/* Remove bad rbuf memory from the free pool. */
+		rc = bnx2_alloc_bad_rbuf(bp);
+	}
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		bnx2_setup_msix_tbl(bp);
+		/* Prevent MSIX table reads and write from timing out */
+		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
+			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+	}
+
+	return rc;
+}
+
+static int
+bnx2_init_chip(struct bnx2 *bp)
+{
+	u32 val, mtu;
+	int rc, i;
+
+	/* Make sure the interrupt is not active. */
+	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
+	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
+#ifdef __BIG_ENDIAN
+	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
+#endif
+	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
+	      DMA_READ_CHANS << 12 |
+	      DMA_WRITE_CHANS << 16;
+
+	val |= (0x2 << 20) | (1 << 11);
+
+	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
+		val |= (1 << 23);
+
+	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
+	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
+	    !(bp->flags & BNX2_FLAG_PCIX))
+		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
+
+	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
+
+	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
+		val |= BNX2_TDMA_CONFIG_ONE_DMA;
+		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
+	}
+
+	if (bp->flags & BNX2_FLAG_PCIX) {
+		u16 val16;
+
+		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
+				     &val16);
+		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
+				      val16 & ~PCI_X_CMD_ERO);
+	}
+
+	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
+		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
+		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
+
+	/* Initialize context mapping and zero out the quick contexts.  The
+	 * context block must have already been enabled. */
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		rc = bnx2_init_5709_context(bp);
+		if (rc)
+			return rc;
+	} else
+		bnx2_init_context(bp);
+
+	if ((rc = bnx2_init_cpus(bp)) != 0)
+		return rc;
+
+	bnx2_init_nvram(bp);
+
+	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
+			val |= BNX2_MQ_CONFIG_HALT_DIS;
+	}
+
+	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
+
+	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
+	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
+	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
+
+	val = (BNX2_PAGE_BITS - 8) << 24;
+	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
+
+	/* Configure page size. */
+	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
+	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
+	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
+	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
+
+	val = bp->mac_addr[0] +
+	      (bp->mac_addr[1] << 8) +
+	      (bp->mac_addr[2] << 16) +
+	      bp->mac_addr[3] +
+	      (bp->mac_addr[4] << 8) +
+	      (bp->mac_addr[5] << 16);
+	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
+
+	/* Program the MTU.  Also include 4 bytes for CRC32. */
+	mtu = bp->dev->mtu;
+	val = mtu + ETH_HLEN + ETH_FCS_LEN;
+	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
+		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
+	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
+
+	if (mtu < 1500)
+		mtu = 1500;
+
+	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
+	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
+	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
+
+	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
+	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
+		bp->bnx2_napi[i].last_status_idx = 0;
+
+	bp->idle_chk_status_idx = 0xffff;
+
+	/* Set up how to generate a link change interrupt. */
+	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+
+	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
+		(u64) bp->status_blk_mapping & 0xffffffff);
+	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
+
+	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
+		(u64) bp->stats_blk_mapping & 0xffffffff);
+	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
+		(u64) bp->stats_blk_mapping >> 32);
+
+	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
+		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
+
+	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
+		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
+
+	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
+		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
+
+	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
+
+	BNX2_WR(bp, BNX2_HC_COM_TICKS,
+		(bp->com_ticks_int << 16) | bp->com_ticks);
+
+	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
+		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
+
+	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
+		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
+	else
+		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
+	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
+
+	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
+		val = BNX2_HC_CONFIG_COLLECT_STATS;
+	else {
+		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
+		      BNX2_HC_CONFIG_COLLECT_STATS;
+	}
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
+			BNX2_HC_MSIX_BIT_VECTOR_VAL);
+
+		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+	}
+
+	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
+		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
+
+	BNX2_WR(bp, BNX2_HC_CONFIG, val);
+
+	if (bp->rx_ticks < 25)
+		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
+	else
+		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
+
+	for (i = 1; i < bp->irq_nvecs; i++) {
+		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		BNX2_WR(bp, base,
+			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
+			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
+			(bp->tx_quick_cons_trip_int << 16) |
+			 bp->tx_quick_cons_trip);
+
+		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
+			(bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+			(bp->rx_quick_cons_trip_int << 16) |
+			bp->rx_quick_cons_trip);
+
+		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+			(bp->rx_ticks_int << 16) | bp->rx_ticks);
+	}
+
+	/* Clear internal stats counters. */
+	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
+
+	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
+
+	/* Initialize the receive filter. */
+	bnx2_set_rx_mode(bp->dev);
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+	}
+	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
+			  1, 0);
+
+	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
+	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
+
+	udelay(20);
+
+	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
+
+	return rc;
+}
+
+static void
+bnx2_clear_ring_states(struct bnx2 *bp)
+{
+	struct bnx2_napi *bnapi;
+	struct bnx2_tx_ring_info *txr;
+	struct bnx2_rx_ring_info *rxr;
+	int i;
+
+	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+		bnapi = &bp->bnx2_napi[i];
+		txr = &bnapi->tx_ring;
+		rxr = &bnapi->rx_ring;
+
+		txr->tx_cons = 0;
+		txr->hw_tx_cons = 0;
+		rxr->rx_prod_bseq = 0;
+		rxr->rx_prod = 0;
+		rxr->rx_cons = 0;
+		rxr->rx_pg_prod = 0;
+		rxr->rx_pg_cons = 0;
+	}
+}
+
+static void
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
+{
+	u32 val, offset0, offset1, offset2, offset3;
+	u32 cid_addr = GET_CID_ADDR(cid);
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	bnx2_ctx_wr(bp, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	bnx2_ctx_wr(bp, cid_addr, offset1, val);
+
+	val = (u64) txr->tx_desc_mapping >> 32;
+	bnx2_ctx_wr(bp, cid_addr, offset2, val);
+
+	val = (u64) txr->tx_desc_mapping & 0xffffffff;
+	bnx2_ctx_wr(bp, cid_addr, offset3, val);
+}
+
+static void
+bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
+{
+	struct bnx2_tx_bd *txbd;
+	u32 cid = TX_CID;
+	struct bnx2_napi *bnapi;
+	struct bnx2_tx_ring_info *txr;
+
+	bnapi = &bp->bnx2_napi[ring_num];
+	txr = &bnapi->tx_ring;
+
+	if (ring_num == 0)
+		cid = TX_CID;
+	else
+		cid = TX_TSS_CID + ring_num - 1;
+
+	bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
+	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
+
+	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
+	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
+
+	txr->tx_prod = 0;
+	txr->tx_prod_bseq = 0;
+
+	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
+
+	bnx2_init_tx_context(bp, cid, txr);
+}
+
+static void
+bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
+		     u32 buf_size, int num_rings)
+{
+	int i;
+	struct bnx2_rx_bd *rxbd;
+
+	for (i = 0; i < num_rings; i++) {
+		int j;
+
+		rxbd = &rx_ring[i][0];
+		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
+			rxbd->rx_bd_len = buf_size;
+			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		}
+		if (i == (num_rings - 1))
+			j = 0;
+		else
+			j = i + 1;
+		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
+	}
+}
+
+static void
+bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
+{
+	int i;
+	u16 prod, ring_prod;
+	u32 cid, rx_cid_addr, val;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	if (ring_num == 0)
+		cid = RX_CID;
+	else
+		cid = RX_RSS_CID + ring_num - 1;
+
+	rx_cid_addr = GET_CID_ADDR(cid);
+
+	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
+			     bp->rx_buf_use_size, bp->rx_max_ring);
+
+	bnx2_init_rx_context(bp, cid);
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
+		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
+	}
+
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
+	if (bp->rx_pg_ring_size) {
+		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
+				     rxr->rx_pg_desc_mapping,
+				     PAGE_SIZE, bp->rx_max_pg_ring);
+		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
+		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
+
+		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
+
+		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
+
+		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
+	}
+
+	val = (u64) rxr->rx_desc_mapping[0] >> 32;
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+
+	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+
+	ring_prod = prod = rxr->rx_pg_prod;
+	for (i = 0; i < bp->rx_pg_ring_size; i++) {
+		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
+				    ring_num, i, bp->rx_pg_ring_size);
+			break;
+		}
+		prod = BNX2_NEXT_RX_BD(prod);
+		ring_prod = BNX2_RX_PG_RING_IDX(prod);
+	}
+	rxr->rx_pg_prod = prod;
+
+	ring_prod = prod = rxr->rx_prod;
+	for (i = 0; i < bp->rx_ring_size; i++) {
+		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
+				    ring_num, i, bp->rx_ring_size);
+			break;
+		}
+		prod = BNX2_NEXT_RX_BD(prod);
+		ring_prod = BNX2_RX_RING_IDX(prod);
+	}
+	rxr->rx_prod = prod;
+
+	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
+	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
+	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
+
+	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
+
+	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+}
+
+static void
+bnx2_init_all_rings(struct bnx2 *bp)
+{
+	int i;
+	u32 val;
+
+	bnx2_clear_ring_states(bp);
+
+	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
+	for (i = 0; i < bp->num_tx_rings; i++)
+		bnx2_init_tx_ring(bp, i);
+
+	if (bp->num_tx_rings > 1)
+		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
+			(TX_TSS_CID << 7));
+
+	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
+
+	for (i = 0; i < bp->num_rx_rings; i++)
+		bnx2_init_rx_ring(bp, i);
+
+	if (bp->num_rx_rings > 1) {
+		u32 tbl_32 = 0;
+
+		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
+			int shift = (i % 8) << 2;
+
+			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
+			if ((i % 8) == 7) {
+				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
+				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
+					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
+					BNX2_RLUP_RSS_COMMAND_WRITE |
+					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
+				tbl_32 = 0;
+			}
+		}
+
+		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
+		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
+
+		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+
+	}
+}
+
+static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
+{
+	u32 max, num_rings = 1;
+
+	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
+		ring_size -= BNX2_MAX_RX_DESC_CNT;
+		num_rings++;
+	}
+	/* round to next power of 2 */
+	max = max_size;
+	while ((max & num_rings) == 0)
+		max >>= 1;
+
+	if (num_rings != max)
+		max <<= 1;
+
+	return max;
+}
+
+static void
+bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+{
+	u32 rx_size, rx_space, jumbo_size;
+
+	/* 8 for CRC and VLAN */
+	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
+
+	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
+	bp->rx_pg_ring_size = 0;
+	bp->rx_max_pg_ring = 0;
+	bp->rx_max_pg_ring_idx = 0;
+	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
+		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+
+		jumbo_size = size * pages;
+		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
+			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
+
+		bp->rx_pg_ring_size = jumbo_size;
+		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
+							BNX2_MAX_RX_PG_RINGS);
+		bp->rx_max_pg_ring_idx =
+			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
+		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
+		bp->rx_copy_thresh = 0;
+	}
+
+	bp->rx_buf_use_size = rx_size;
+	/* hw alignment + build_skb() overhead*/
+	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
+	bp->rx_ring_size = size;
+	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
+	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
+}
+
+static void
+bnx2_free_tx_skbs(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_tx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+		int j;
+
+		if (txr->tx_buf_ring == NULL)
+			continue;
+
+		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
+			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+			struct sk_buff *skb = tx_buf->skb;
+			int k, last;
+
+			if (skb == NULL) {
+				j = BNX2_NEXT_TX_BD(j);
+				continue;
+			}
+
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(tx_buf, mapping),
+					 skb_headlen(skb),
+					 PCI_DMA_TODEVICE);
+
+			tx_buf->skb = NULL;
+
+			last = tx_buf->nr_frags;
+			j = BNX2_NEXT_TX_BD(j);
+			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
+				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
+				dma_unmap_page(&bp->pdev->dev,
+					dma_unmap_addr(tx_buf, mapping),
+					skb_frag_size(&skb_shinfo(skb)->frags[k]),
+					PCI_DMA_TODEVICE);
+			}
+			dev_kfree_skb(skb);
+		}
+		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+	}
+}
+
+static void
+bnx2_free_rx_skbs(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_rx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+		int j;
+
+		if (rxr->rx_buf_ring == NULL)
+			return;
+
+		for (j = 0; j < bp->rx_max_ring_idx; j++) {
+			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
+			u8 *data = rx_buf->data;
+
+			if (data == NULL)
+				continue;
+
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_use_size,
+					 PCI_DMA_FROMDEVICE);
+
+			rx_buf->data = NULL;
+
+			kfree(data);
+		}
+		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
+			bnx2_free_rx_page(bp, rxr, j);
+	}
+}
+
+static void
+bnx2_free_skbs(struct bnx2 *bp)
+{
+	bnx2_free_tx_skbs(bp);
+	bnx2_free_rx_skbs(bp);
+}
+
+static int
+bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
+{
+	int rc;
+
+	rc = bnx2_reset_chip(bp, reset_code);
+	bnx2_free_skbs(bp);
+	if (rc)
+		return rc;
+
+	if ((rc = bnx2_init_chip(bp)) != 0)
+		return rc;
+
+	bnx2_init_all_rings(bp);
+	return 0;
+}
+
+static int
+bnx2_init_nic(struct bnx2 *bp, int reset_phy)
+{
+	int rc;
+
+	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
+		return rc;
+
+	spin_lock_bh(&bp->phy_lock);
+	bnx2_init_phy(bp, reset_phy);
+	bnx2_set_link(bp);
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		bnx2_remote_phy_event(bp);
+	spin_unlock_bh(&bp->phy_lock);
+	return 0;
+}
+
+static int
+bnx2_shutdown_chip(struct bnx2 *bp)
+{
+	u32 reset_code;
+
+	if (bp->flags & BNX2_FLAG_NO_WOL)
+		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
+	else if (bp->wol)
+		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+	else
+		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+
+	return bnx2_reset_chip(bp, reset_code);
+}
+
+static int
+bnx2_test_registers(struct bnx2 *bp)
+{
+	int ret;
+	int i, is_5709;
+	static const struct {
+		u16   offset;
+		u16   flags;
+#define BNX2_FL_NOT_5709	1
+		u32   rw_mask;
+		u32   ro_mask;
+	} reg_tbl[] = {
+		{ 0x006c, 0, 0x00000000, 0x0000003f },
+		{ 0x0090, 0, 0xffffffff, 0x00000000 },
+		{ 0x0094, 0, 0x00000000, 0x00000000 },
+
+		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
+		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
+		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
+		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
+		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
+		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+
+		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+
+		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
+		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
+		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
+
+		{ 0x1000, 0, 0x00000000, 0x00000001 },
+		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
+
+		{ 0x1408, 0, 0x01c00800, 0x00000000 },
+		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
+		{ 0x14a8, 0, 0x00000000, 0x000001ff },
+		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
+		{ 0x14b0, 0, 0x00000002, 0x00000001 },
+		{ 0x14b8, 0, 0x00000000, 0x00000000 },
+		{ 0x14c0, 0, 0x00000000, 0x00000009 },
+		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
+		{ 0x14cc, 0, 0x00000000, 0x00000001 },
+		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
+
+		{ 0x1800, 0, 0x00000000, 0x00000001 },
+		{ 0x1804, 0, 0x00000000, 0x00000003 },
+
+		{ 0x2800, 0, 0x00000000, 0x00000001 },
+		{ 0x2804, 0, 0x00000000, 0x00003f01 },
+		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
+		{ 0x2810, 0, 0xffff0000, 0x00000000 },
+		{ 0x2814, 0, 0xffff0000, 0x00000000 },
+		{ 0x2818, 0, 0xffff0000, 0x00000000 },
+		{ 0x281c, 0, 0xffff0000, 0x00000000 },
+		{ 0x2834, 0, 0xffffffff, 0x00000000 },
+		{ 0x2840, 0, 0x00000000, 0xffffffff },
+		{ 0x2844, 0, 0x00000000, 0xffffffff },
+		{ 0x2848, 0, 0xffffffff, 0x00000000 },
+		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
+
+		{ 0x2c00, 0, 0x00000000, 0x00000011 },
+		{ 0x2c04, 0, 0x00000000, 0x00030007 },
+
+		{ 0x3c00, 0, 0x00000000, 0x00000001 },
+		{ 0x3c04, 0, 0x00000000, 0x00070000 },
+		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
+		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
+		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
+		{ 0x3c14, 0, 0x00000000, 0xffffffff },
+		{ 0x3c18, 0, 0x00000000, 0xffffffff },
+		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
+		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
+
+		{ 0x5004, 0, 0x00000000, 0x0000007f },
+		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
+
+		{ 0x5c00, 0, 0x00000000, 0x00000001 },
+		{ 0x5c04, 0, 0x00000000, 0x0003000f },
+		{ 0x5c08, 0, 0x00000003, 0x00000000 },
+		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
+		{ 0x5c10, 0, 0x00000000, 0xffffffff },
+		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
+		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
+		{ 0x5c88, 0, 0x00000000, 0x00077373 },
+		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
+
+		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
+		{ 0x680c, 0, 0xffffffff, 0x00000000 },
+		{ 0x6810, 0, 0xffffffff, 0x00000000 },
+		{ 0x6814, 0, 0xffffffff, 0x00000000 },
+		{ 0x6818, 0, 0xffffffff, 0x00000000 },
+		{ 0x681c, 0, 0xffffffff, 0x00000000 },
+		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
+		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
+		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
+		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
+		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
+		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
+		{ 0x684c, 0, 0xffffffff, 0x00000000 },
+		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
+		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
+
+		{ 0xffff, 0, 0x00000000, 0x00000000 },
+	};
+
+	ret = 0;
+	is_5709 = 0;
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		is_5709 = 1;
+
+	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+		u32 offset, rw_mask, ro_mask, save_val, val;
+		u16 flags = reg_tbl[i].flags;
+
+		if (is_5709 && (flags & BNX2_FL_NOT_5709))
+			continue;
+
+		offset = (u32) reg_tbl[i].offset;
+		rw_mask = reg_tbl[i].rw_mask;
+		ro_mask = reg_tbl[i].ro_mask;
+
+		save_val = readl(bp->regview + offset);
+
+		writel(0, bp->regview + offset);
+
+		val = readl(bp->regview + offset);
+		if ((val & rw_mask) != 0) {
+			goto reg_test_err;
+		}
+
+		if ((val & ro_mask) != (save_val & ro_mask)) {
+			goto reg_test_err;
+		}
+
+		writel(0xffffffff, bp->regview + offset);
+
+		val = readl(bp->regview + offset);
+		if ((val & rw_mask) != rw_mask) {
+			goto reg_test_err;
+		}
+
+		if ((val & ro_mask) != (save_val & ro_mask)) {
+			goto reg_test_err;
+		}
+
+		writel(save_val, bp->regview + offset);
+		continue;
+
+reg_test_err:
+		writel(save_val, bp->regview + offset);
+		ret = -ENODEV;
+		break;
+	}
+	return ret;
+}
+
+static int
+bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
+{
+	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
+		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
+	int i;
+
+	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
+		u32 offset;
+
+		for (offset = 0; offset < size; offset += 4) {
+
+			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
+
+			if (bnx2_reg_rd_ind(bp, start + offset) !=
+				test_pattern[i]) {
+				return -ENODEV;
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+bnx2_test_memory(struct bnx2 *bp)
+{
+	int ret = 0;
+	int i;
+	static struct mem_entry {
+		u32   offset;
+		u32   len;
+	} mem_tbl_5706[] = {
+		{ 0x60000,  0x4000 },
+		{ 0xa0000,  0x3000 },
+		{ 0xe0000,  0x4000 },
+		{ 0x120000, 0x4000 },
+		{ 0x1a0000, 0x4000 },
+		{ 0x160000, 0x4000 },
+		{ 0xffffffff, 0    },
+	},
+	mem_tbl_5709[] = {
+		{ 0x60000,  0x4000 },
+		{ 0xa0000,  0x3000 },
+		{ 0xe0000,  0x4000 },
+		{ 0x120000, 0x4000 },
+		{ 0x1a0000, 0x4000 },
+		{ 0xffffffff, 0    },
+	};
+	struct mem_entry *mem_tbl;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		mem_tbl = mem_tbl_5709;
+	else
+		mem_tbl = mem_tbl_5706;
+
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
+			mem_tbl[i].len)) != 0) {
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+#define BNX2_MAC_LOOPBACK	0
+#define BNX2_PHY_LOOPBACK	1
+
+static int
+bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
+{
+	unsigned int pkt_size, num_pkts, i;
+	struct sk_buff *skb;
+	u8 *data;
+	unsigned char *packet;
+	u16 rx_start_idx, rx_idx;
+	dma_addr_t map;
+	struct bnx2_tx_bd *txbd;
+	struct bnx2_sw_bd *rx_buf;
+	struct l2_fhdr *rx_hdr;
+	int ret = -ENODEV;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	tx_napi = bnapi;
+
+	txr = &tx_napi->tx_ring;
+	rxr = &bnapi->rx_ring;
+	if (loopback_mode == BNX2_MAC_LOOPBACK) {
+		bp->loopback = MAC_LOOPBACK;
+		bnx2_set_mac_loopback(bp);
+	}
+	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
+		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+			return 0;
+
+		bp->loopback = PHY_LOOPBACK;
+		bnx2_set_phy_loopback(bp);
+	}
+	else
+		return -EINVAL;
+
+	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
+	skb = netdev_alloc_skb(bp->dev, pkt_size);
+	if (!skb)
+		return -ENOMEM;
+	packet = skb_put(skb, pkt_size);
+	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+	memset(packet + ETH_ALEN, 0x0, 8);
+	for (i = 14; i < pkt_size; i++)
+		packet[i] = (unsigned char) (i & 0xff);
+
+	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+			     PCI_DMA_TODEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, map)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	BNX2_WR(bp, BNX2_HC_COMMAND,
+		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+
+	BNX2_RD(bp, BNX2_HC_COMMAND);
+
+	udelay(5);
+	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
+
+	num_pkts = 0;
+
+	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
+
+	txbd->tx_bd_haddr_hi = (u64) map >> 32;
+	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
+	txbd->tx_bd_mss_nbytes = pkt_size;
+	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
+
+	num_pkts++;
+	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
+	txr->tx_prod_bseq += pkt_size;
+
+	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
+	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+
+	udelay(100);
+
+	BNX2_WR(bp, BNX2_HC_COMMAND,
+		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+
+	BNX2_RD(bp, BNX2_HC_COMMAND);
+
+	udelay(5);
+
+	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+	dev_kfree_skb(skb);
+
+	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
+		goto loopback_test_done;
+
+	rx_idx = bnx2_get_hw_rx_cons(bnapi);
+	if (rx_idx != rx_start_idx + num_pkts) {
+		goto loopback_test_done;
+	}
+
+	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
+	data = rx_buf->data;
+
+	rx_hdr = get_l2_fhdr(data);
+	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
+
+	dma_sync_single_for_cpu(&bp->pdev->dev,
+		dma_unmap_addr(rx_buf, mapping),
+		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+	if (rx_hdr->l2_fhdr_status &
+		(L2_FHDR_ERRORS_BAD_CRC |
+		L2_FHDR_ERRORS_PHY_DECODE |
+		L2_FHDR_ERRORS_ALIGNMENT |
+		L2_FHDR_ERRORS_TOO_SHORT |
+		L2_FHDR_ERRORS_GIANT_FRAME)) {
+
+		goto loopback_test_done;
+	}
+
+	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
+		goto loopback_test_done;
+	}
+
+	for (i = 14; i < pkt_size; i++) {
+		if (*(data + i) != (unsigned char) (i & 0xff)) {
+			goto loopback_test_done;
+		}
+	}
+
+	ret = 0;
+
+loopback_test_done:
+	bp->loopback = 0;
+	return ret;
+}
+
+#define BNX2_MAC_LOOPBACK_FAILED	1
+#define BNX2_PHY_LOOPBACK_FAILED	2
+#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
+					 BNX2_PHY_LOOPBACK_FAILED)
+
+static int
+bnx2_test_loopback(struct bnx2 *bp)
+{
+	int rc = 0;
+
+	if (!netif_running(bp->dev))
+		return BNX2_LOOPBACK_FAILED;
+
+	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+	spin_lock_bh(&bp->phy_lock);
+	bnx2_init_phy(bp, 1);
+	spin_unlock_bh(&bp->phy_lock);
+	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
+		rc |= BNX2_MAC_LOOPBACK_FAILED;
+	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
+		rc |= BNX2_PHY_LOOPBACK_FAILED;
+	return rc;
+}
+
+#define NVRAM_SIZE 0x200
+#define CRC32_RESIDUAL 0xdebb20e3
+
+static int
+bnx2_test_nvram(struct bnx2 *bp)
+{
+	__be32 buf[NVRAM_SIZE / 4];
+	u8 *data = (u8 *) buf;
+	int rc = 0;
+	u32 magic, csum;
+
+	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
+		goto test_nvram_done;
+
+        magic = be32_to_cpu(buf[0]);
+	if (magic != 0x669955aa) {
+		rc = -ENODEV;
+		goto test_nvram_done;
+	}
+
+	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
+		goto test_nvram_done;
+
+	csum = ether_crc_le(0x100, data);
+	if (csum != CRC32_RESIDUAL) {
+		rc = -ENODEV;
+		goto test_nvram_done;
+	}
+
+	csum = ether_crc_le(0x100, data + 0x100);
+	if (csum != CRC32_RESIDUAL) {
+		rc = -ENODEV;
+	}
+
+test_nvram_done:
+	return rc;
+}
+
+static int
+bnx2_test_link(struct bnx2 *bp)
+{
+	u32 bmsr;
+
+	if (!netif_running(bp->dev))
+		return -ENODEV;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		if (bp->link_up)
+			return 0;
+		return -ENODEV;
+	}
+	spin_lock_bh(&bp->phy_lock);
+	bnx2_enable_bmsr1(bp);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_disable_bmsr1(bp);
+	spin_unlock_bh(&bp->phy_lock);
+
+	if (bmsr & BMSR_LSTATUS) {
+		return 0;
+	}
+	return -ENODEV;
+}
+
+static int
+bnx2_test_intr(struct bnx2 *bp)
+{
+	int i;
+	u16 status_idx;
+
+	if (!netif_running(bp->dev))
+		return -ENODEV;
+
+	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
+
+	/* This register is not touched during run-time. */
+	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+	BNX2_RD(bp, BNX2_HC_COMMAND);
+
+	for (i = 0; i < 10; i++) {
+		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
+			status_idx) {
+
+			break;
+		}
+
+		msleep_interruptible(10);
+	}
+	if (i < 10)
+		return 0;
+
+	return -ENODEV;
+}
+
+/* Determining link for parallel detection. */
+static int
+bnx2_5706_serdes_has_link(struct bnx2 *bp)
+{
+	u32 mode_ctl, an_dbg, exp;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
+		return 0;
+
+	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
+	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
+
+	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
+		return 0;
+
+	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+
+	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
+		return 0;
+
+	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
+	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
+	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
+
+	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
+		return 0;
+
+	return 1;
+}
+
+static void
+bnx2_5706_serdes_timer(struct bnx2 *bp)
+{
+	int check_link = 1;
+
+	spin_lock(&bp->phy_lock);
+	if (bp->serdes_an_pending) {
+		bp->serdes_an_pending--;
+		check_link = 0;
+	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+		u32 bmcr;
+
+		bp->current_interval = BNX2_TIMER_INTERVAL;
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+		if (bmcr & BMCR_ANENABLE) {
+			if (bnx2_5706_serdes_has_link(bp)) {
+				bmcr &= ~BMCR_ANENABLE;
+				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
+			}
+		}
+	}
+	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
+		u32 phy2;
+
+		bnx2_write_phy(bp, 0x17, 0x0f01);
+		bnx2_read_phy(bp, 0x15, &phy2);
+		if (phy2 & 0x20) {
+			u32 bmcr;
+
+			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+			bmcr |= BMCR_ANENABLE;
+			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+
+			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+		}
+	} else
+		bp->current_interval = BNX2_TIMER_INTERVAL;
+
+	if (check_link) {
+		u32 val;
+
+		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
+
+		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
+			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
+				bnx2_5706s_force_link_dn(bp, 1);
+				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
+			} else
+				bnx2_set_link(bp);
+		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
+			bnx2_set_link(bp);
+	}
+	spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_5708_serdes_timer(struct bnx2 *bp)
+{
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return;
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
+		bp->serdes_an_pending = 0;
+		return;
+	}
+
+	spin_lock(&bp->phy_lock);
+	if (bp->serdes_an_pending)
+		bp->serdes_an_pending--;
+	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+		u32 bmcr;
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (bmcr & BMCR_ANENABLE) {
+			bnx2_enable_forced_2g5(bp);
+			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
+		} else {
+			bnx2_disable_forced_2g5(bp);
+			bp->serdes_an_pending = 2;
+			bp->current_interval = BNX2_TIMER_INTERVAL;
+		}
+
+	} else
+		bp->current_interval = BNX2_TIMER_INTERVAL;
+
+	spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_timer(unsigned long data)
+{
+	struct bnx2 *bp = (struct bnx2 *) data;
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		goto bnx2_restart_timer;
+
+	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
+	     BNX2_FLAG_USING_MSI)
+		bnx2_chk_missed_msi(bp);
+
+	bnx2_send_heart_beat(bp);
+
+	bp->stats_blk->stat_FwRxDrop =
+		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
+
+	/* workaround occasional corrupted counters */
+	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
+		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
+			BNX2_HC_COMMAND_STATS_NOW);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
+			bnx2_5706_serdes_timer(bp);
+		else
+			bnx2_5708_serdes_timer(bp);
+	}
+
+bnx2_restart_timer:
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static int
+bnx2_request_irq(struct bnx2 *bp)
+{
+	unsigned long flags;
+	struct bnx2_irq *irq;
+	int rc = 0, i;
+
+	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
+		flags = 0;
+	else
+		flags = IRQF_SHARED;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		irq = &bp->irq_tbl[i];
+		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+				 &bp->bnx2_napi[i]);
+		if (rc)
+			break;
+		irq->requested = 1;
+	}
+	return rc;
+}
+
+static void
+__bnx2_free_irq(struct bnx2 *bp)
+{
+	struct bnx2_irq *irq;
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		irq = &bp->irq_tbl[i];
+		if (irq->requested)
+			free_irq(irq->vector, &bp->bnx2_napi[i]);
+		irq->requested = 0;
+	}
+}
+
+static void
+bnx2_free_irq(struct bnx2 *bp)
+{
+
+	__bnx2_free_irq(bp);
+	if (bp->flags & BNX2_FLAG_USING_MSI)
+		pci_disable_msi(bp->pdev);
+	else if (bp->flags & BNX2_FLAG_USING_MSIX)
+		pci_disable_msix(bp->pdev);
+
+	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
+}
+
+static void
+bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
+{
+	int i, total_vecs;
+	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
+	struct net_device *dev = bp->dev;
+	const int len = sizeof(bp->irq_tbl[0].name);
+
+	bnx2_setup_msix_tbl(bp);
+	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
+	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
+	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+
+	/*  Need to flush the previous three writes to ensure MSI-X
+	 *  is setup properly */
+	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
+	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+		msix_ent[i].entry = i;
+		msix_ent[i].vector = 0;
+	}
+
+	total_vecs = msix_vecs;
+#ifdef BCM_CNIC
+	total_vecs++;
+#endif
+	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
+					   BNX2_MIN_MSIX_VEC, total_vecs);
+	if (total_vecs < 0)
+		return;
+
+	msix_vecs = total_vecs;
+#ifdef BCM_CNIC
+	msix_vecs--;
+#endif
+	bp->irq_nvecs = msix_vecs;
+	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
+	for (i = 0; i < total_vecs; i++) {
+		bp->irq_tbl[i].vector = msix_ent[i].vector;
+		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
+		bp->irq_tbl[i].handler = bnx2_msi_1shot;
+	}
+}
+
+static int
+bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
+{
+	int cpus = netif_get_num_default_rss_queues();
+	int msix_vecs;
+
+	if (!bp->num_req_rx_rings)
+		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
+	else if (!bp->num_req_tx_rings)
+		msix_vecs = max(cpus, bp->num_req_rx_rings);
+	else
+		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
+
+	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
+
+	bp->irq_tbl[0].handler = bnx2_interrupt;
+	strcpy(bp->irq_tbl[0].name, bp->dev->name);
+	bp->irq_nvecs = 1;
+	bp->irq_tbl[0].vector = bp->pdev->irq;
+
+	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
+		bnx2_enable_msix(bp, msix_vecs);
+
+	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
+	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
+		if (pci_enable_msi(bp->pdev) == 0) {
+			bp->flags |= BNX2_FLAG_USING_MSI;
+			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
+				bp->irq_tbl[0].handler = bnx2_msi_1shot;
+			} else
+				bp->irq_tbl[0].handler = bnx2_msi;
+
+			bp->irq_tbl[0].vector = bp->pdev->irq;
+		}
+	}
+
+	if (!bp->num_req_tx_rings)
+		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+	else
+		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
+
+	if (!bp->num_req_rx_rings)
+		bp->num_rx_rings = bp->irq_nvecs;
+	else
+		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
+
+	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
+
+	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_open(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	rc = bnx2_request_firmware(bp);
+	if (rc < 0)
+		goto out;
+
+	netif_carrier_off(dev);
+
+	bnx2_disable_int(bp);
+
+	rc = bnx2_setup_int_mode(bp, disable_msi);
+	if (rc)
+		goto open_err;
+	bnx2_init_napi(bp);
+	bnx2_napi_enable(bp);
+	rc = bnx2_alloc_mem(bp);
+	if (rc)
+		goto open_err;
+
+	rc = bnx2_request_irq(bp);
+	if (rc)
+		goto open_err;
+
+	rc = bnx2_init_nic(bp, 1);
+	if (rc)
+		goto open_err;
+
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+	atomic_set(&bp->intr_sem, 0);
+
+	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
+	bnx2_enable_int(bp);
+
+	if (bp->flags & BNX2_FLAG_USING_MSI) {
+		/* Test MSI to make sure it is working
+		 * If MSI test fails, go back to INTx mode
+		 */
+		if (bnx2_test_intr(bp) != 0) {
+			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
+
+			bnx2_disable_int(bp);
+			bnx2_free_irq(bp);
+
+			bnx2_setup_int_mode(bp, 1);
+
+			rc = bnx2_init_nic(bp, 0);
+
+			if (!rc)
+				rc = bnx2_request_irq(bp);
+
+			if (rc) {
+				del_timer_sync(&bp->timer);
+				goto open_err;
+			}
+			bnx2_enable_int(bp);
+		}
+	}
+	if (bp->flags & BNX2_FLAG_USING_MSI)
+		netdev_info(dev, "using MSI\n");
+	else if (bp->flags & BNX2_FLAG_USING_MSIX)
+		netdev_info(dev, "using MSIX\n");
+
+	netif_tx_start_all_queues(dev);
+out:
+	return rc;
+
+open_err:
+	bnx2_napi_disable(bp);
+	bnx2_free_skbs(bp);
+	bnx2_free_irq(bp);
+	bnx2_free_mem(bp);
+	bnx2_del_napi(bp);
+	bnx2_release_firmware(bp);
+	goto out;
+}
+
+static void
+bnx2_reset_task(struct work_struct *work)
+{
+	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
+	int rc;
+	u16 pcicmd;
+
+	rtnl_lock();
+	if (!netif_running(bp->dev)) {
+		rtnl_unlock();
+		return;
+	}
+
+	bnx2_netif_stop(bp, true);
+
+	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
+	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
+		/* in case PCI block has reset */
+		pci_restore_state(bp->pdev);
+		pci_save_state(bp->pdev);
+	}
+	rc = bnx2_init_nic(bp, 1);
+	if (rc) {
+		netdev_err(bp->dev, "failed to reset NIC, closing\n");
+		bnx2_napi_enable(bp);
+		dev_close(bp->dev);
+		rtnl_unlock();
+		return;
+	}
+
+	atomic_set(&bp->intr_sem, 1);
+	bnx2_netif_start(bp, true);
+	rtnl_unlock();
+}
+
+#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
+
+static void
+bnx2_dump_ftq(struct bnx2 *bp)
+{
+	int i;
+	u32 reg, bdidx, cid, valid;
+	struct net_device *dev = bp->dev;
+	static const struct ftq_reg {
+		char *name;
+		u32 off;
+	} ftq_arr[] = {
+		BNX2_FTQ_ENTRY(RV2P_P),
+		BNX2_FTQ_ENTRY(RV2P_T),
+		BNX2_FTQ_ENTRY(RV2P_M),
+		BNX2_FTQ_ENTRY(TBDR_),
+		BNX2_FTQ_ENTRY(TDMA_),
+		BNX2_FTQ_ENTRY(TXP_),
+		BNX2_FTQ_ENTRY(TXP_),
+		BNX2_FTQ_ENTRY(TPAT_),
+		BNX2_FTQ_ENTRY(RXP_C),
+		BNX2_FTQ_ENTRY(RXP_),
+		BNX2_FTQ_ENTRY(COM_COMXQ_),
+		BNX2_FTQ_ENTRY(COM_COMTQ_),
+		BNX2_FTQ_ENTRY(COM_COMQ_),
+		BNX2_FTQ_ENTRY(CP_CPQ_),
+	};
+
+	netdev_err(dev, "<--- start FTQ dump --->\n");
+	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
+		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
+			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
+
+	netdev_err(dev, "CPU states:\n");
+	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
+		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
+			   reg, bnx2_reg_rd_ind(bp, reg),
+			   bnx2_reg_rd_ind(bp, reg + 4),
+			   bnx2_reg_rd_ind(bp, reg + 8),
+			   bnx2_reg_rd_ind(bp, reg + 0x1c),
+			   bnx2_reg_rd_ind(bp, reg + 0x1c),
+			   bnx2_reg_rd_ind(bp, reg + 0x20));
+
+	netdev_err(dev, "<--- end FTQ dump --->\n");
+	netdev_err(dev, "<--- start TBDC dump --->\n");
+	netdev_err(dev, "TBDC free cnt: %ld\n",
+		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
+	for (i = 0; i < 0x20; i++) {
+		int j = 0;
+
+		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
+		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
+			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
+			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
+			j++;
+
+		cid = BNX2_RD(bp, BNX2_TBDC_CID);
+		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
+		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
+		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
+			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
+			   bdidx >> 24, (valid >> 8) & 0x0ff);
+	}
+	netdev_err(dev, "<--- end TBDC dump --->\n");
+}
+
+static void
+bnx2_dump_state(struct bnx2 *bp)
+{
+	struct net_device *dev = bp->dev;
+	u32 val1, val2;
+
+	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
+	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
+		   atomic_read(&bp->intr_sem), val1);
+	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
+	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
+	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
+	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
+		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
+		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
+	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
+		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+	if (bp->flags & BNX2_FLAG_USING_MSIX)
+		netdev_err(dev, "DEBUG: PBA[%08x]\n",
+			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
+}
+
+static void
+bnx2_tx_timeout(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bnx2_dump_ftq(bp);
+	bnx2_dump_state(bp);
+	bnx2_dump_mcp_state(bp);
+
+	/* This allows the netif to be shutdown gracefully before resetting */
+	schedule_work(&bp->reset_task);
+}
+
+/* Called with netif_tx_lock.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
+ */
+static netdev_tx_t
+bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	dma_addr_t mapping;
+	struct bnx2_tx_bd *txbd;
+	struct bnx2_sw_tx_bd *tx_buf;
+	u32 len, vlan_tag_flags, last_frag, mss;
+	u16 prod, ring_prod;
+	int i;
+	struct bnx2_napi *bnapi;
+	struct bnx2_tx_ring_info *txr;
+	struct netdev_queue *txq;
+
+	/*  Determine which tx ring we will be placed on */
+	i = skb_get_queue_mapping(skb);
+	bnapi = &bp->bnx2_napi[i];
+	txr = &bnapi->tx_ring;
+	txq = netdev_get_tx_queue(dev, i);
+
+	if (unlikely(bnx2_tx_avail(bp, txr) <
+	    (skb_shinfo(skb)->nr_frags + 1))) {
+		netif_tx_stop_queue(txq);
+		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
+
+		return NETDEV_TX_BUSY;
+	}
+	len = skb_headlen(skb);
+	prod = txr->tx_prod;
+	ring_prod = BNX2_TX_RING_IDX(prod);
+
+	vlan_tag_flags = 0;
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		vlan_tag_flags |=
+			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
+	}
+
+	if ((mss = skb_shinfo(skb)->gso_size)) {
+		u32 tcp_opt_len;
+		struct iphdr *iph;
+
+		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
+
+		tcp_opt_len = tcp_optlen(skb);
+
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+			u32 tcp_off = skb_transport_offset(skb) -
+				      sizeof(struct ipv6hdr) - ETH_HLEN;
+
+			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
+					  TX_BD_FLAGS_SW_FLAGS;
+			if (likely(tcp_off == 0))
+				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
+			else {
+				tcp_off >>= 3;
+				vlan_tag_flags |= ((tcp_off & 0x3) <<
+						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
+						  ((tcp_off & 0x10) <<
+						   TX_BD_FLAGS_TCP6_OFF4_SHL);
+				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
+			}
+		} else {
+			iph = ip_hdr(skb);
+			if (tcp_opt_len || (iph->ihl > 5)) {
+				vlan_tag_flags |= ((iph->ihl - 5) +
+						   (tcp_opt_len >> 2)) << 8;
+			}
+		}
+	} else
+		mss = 0;
+
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	tx_buf = &txr->tx_buf_ring[ring_prod];
+	tx_buf->skb = skb;
+	dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+	txbd = &txr->tx_desc_ring[ring_prod];
+
+	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
+	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+	txbd->tx_bd_mss_nbytes = len | (mss << 16);
+	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
+
+	last_frag = skb_shinfo(skb)->nr_frags;
+	tx_buf->nr_frags = last_frag;
+	tx_buf->is_gso = skb_is_gso(skb);
+
+	for (i = 0; i < last_frag; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		prod = BNX2_NEXT_TX_BD(prod);
+		ring_prod = BNX2_TX_RING_IDX(prod);
+		txbd = &txr->tx_desc_ring[ring_prod];
+
+		len = skb_frag_size(frag);
+		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(&bp->pdev->dev, mapping))
+			goto dma_error;
+		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+				   mapping);
+
+		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
+		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+		txbd->tx_bd_mss_nbytes = len | (mss << 16);
+		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
+
+	}
+	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+
+	/* Sync BD data before updating TX mailbox */
+	wmb();
+
+	netdev_tx_sent_queue(txq, skb->len);
+
+	prod = BNX2_NEXT_TX_BD(prod);
+	txr->tx_prod_bseq += skb->len;
+
+	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
+	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+
+	mmiowb();
+
+	txr->tx_prod = prod;
+
+	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * tx index in bnx2_tx_avail() below, because in
+		 * bnx2_tx_int(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
+			netif_tx_wake_queue(txq);
+	}
+
+	return NETDEV_TX_OK;
+dma_error:
+	/* save value of frag that failed */
+	last_frag = i;
+
+	/* start back at beginning and unmap skb */
+	prod = txr->tx_prod;
+	ring_prod = BNX2_TX_RING_IDX(prod);
+	tx_buf = &txr->tx_buf_ring[ring_prod];
+	tx_buf->skb = NULL;
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			 skb_headlen(skb), PCI_DMA_TODEVICE);
+
+	/* unmap remaining mapped pages */
+	for (i = 0; i < last_frag; i++) {
+		prod = BNX2_NEXT_TX_BD(prod);
+		ring_prod = BNX2_TX_RING_IDX(prod);
+		tx_buf = &txr->tx_buf_ring[ring_prod];
+		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
+			       PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_close(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bnx2_disable_int_sync(bp);
+	bnx2_napi_disable(bp);
+	netif_tx_disable(dev);
+	del_timer_sync(&bp->timer);
+	bnx2_shutdown_chip(bp);
+	bnx2_free_irq(bp);
+	bnx2_free_skbs(bp);
+	bnx2_free_mem(bp);
+	bnx2_del_napi(bp);
+	bp->link_up = 0;
+	netif_carrier_off(bp->dev);
+	return 0;
+}
+
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+	u32 *hw_stats = (u32 *) bp->stats_blk;
+	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+	int i;
+
+	/* The 1st 10 counters are 64-bit counters */
+	for (i = 0; i < 20; i += 2) {
+		u32 hi;
+		u64 lo;
+
+		hi = temp_stats[i] + hw_stats[i];
+		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
+		if (lo > 0xffffffff)
+			hi++;
+		temp_stats[i] = hi;
+		temp_stats[i + 1] = lo & 0xffffffff;
+	}
+
+	for ( ; i < sizeof(struct statistics_block) / 4; i++)
+		temp_stats[i] += hw_stats[i];
+}
+
+#define GET_64BIT_NET_STATS64(ctr)		\
+	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
+
+#define GET_64BIT_NET_STATS(ctr)				\
+	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
+	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
+
+#define GET_32BIT_NET_STATS(ctr)				\
+	(unsigned long) (bp->stats_blk->ctr +			\
+			 bp->temp_stats_blk->ctr)
+
+static struct rtnl_link_stats64 *
+bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->stats_blk == NULL)
+		return net_stats;
+
+	net_stats->rx_packets =
+		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
+
+	net_stats->tx_packets =
+		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
+
+	net_stats->rx_bytes =
+		GET_64BIT_NET_STATS(stat_IfHCInOctets);
+
+	net_stats->tx_bytes =
+		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
+
+	net_stats->multicast =
+		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
+
+	net_stats->collisions =
+		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
+
+	net_stats->rx_length_errors =
+		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
+
+	net_stats->rx_over_errors =
+		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
+
+	net_stats->rx_frame_errors =
+		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
+
+	net_stats->rx_crc_errors =
+		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
+
+	net_stats->rx_errors = net_stats->rx_length_errors +
+		net_stats->rx_over_errors + net_stats->rx_frame_errors +
+		net_stats->rx_crc_errors;
+
+	net_stats->tx_aborted_errors =
+		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
+
+	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
+		net_stats->tx_carrier_errors = 0;
+	else {
+		net_stats->tx_carrier_errors =
+			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
+	}
+
+	net_stats->tx_errors =
+		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
+		net_stats->tx_aborted_errors +
+		net_stats->tx_carrier_errors;
+
+	net_stats->rx_missed_errors =
+		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+		GET_32BIT_NET_STATS(stat_FwRxDrop);
+
+	return net_stats;
+}
+
+/* All ethtool functions called with rtnl_lock */
+
+static int
+bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int support_serdes = 0, support_copper = 0;
+
+	cmd->supported = SUPPORTED_Autoneg;
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		support_serdes = 1;
+		support_copper = 1;
+	} else if (bp->phy_port == PORT_FIBRE)
+		support_serdes = 1;
+	else
+		support_copper = 1;
+
+	if (support_serdes) {
+		cmd->supported |= SUPPORTED_1000baseT_Full |
+			SUPPORTED_FIBRE;
+		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
+			cmd->supported |= SUPPORTED_2500baseX_Full;
+
+	}
+	if (support_copper) {
+		cmd->supported |= SUPPORTED_10baseT_Half |
+			SUPPORTED_10baseT_Full |
+			SUPPORTED_100baseT_Half |
+			SUPPORTED_100baseT_Full |
+			SUPPORTED_1000baseT_Full |
+			SUPPORTED_TP;
+
+	}
+
+	spin_lock_bh(&bp->phy_lock);
+	cmd->port = bp->phy_port;
+	cmd->advertising = bp->advertising;
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->autoneg = AUTONEG_DISABLE;
+	}
+
+	if (netif_carrier_ok(dev)) {
+		ethtool_cmd_speed_set(cmd, bp->line_speed);
+		cmd->duplex = bp->duplex;
+		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
+			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
+				cmd->eth_tp_mdix = ETH_TP_MDI_X;
+			else
+				cmd->eth_tp_mdix = ETH_TP_MDI;
+		}
+	}
+	else {
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
+	spin_unlock_bh(&bp->phy_lock);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->phy_address = bp->phy_addr;
+
+	return 0;
+}
+
+static int
+bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u8 autoneg = bp->autoneg;
+	u8 req_duplex = bp->req_duplex;
+	u16 req_line_speed = bp->req_line_speed;
+	u32 advertising = bp->advertising;
+	int err = -EINVAL;
+
+	spin_lock_bh(&bp->phy_lock);
+
+	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
+		goto err_out_unlock;
+
+	if (cmd->port != bp->phy_port &&
+	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
+		goto err_out_unlock;
+
+	/* If device is down, we can store the settings only if the user
+	 * is setting the currently active port.
+	 */
+	if (!netif_running(dev) && cmd->port != bp->phy_port)
+		goto err_out_unlock;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		autoneg |= AUTONEG_SPEED;
+
+		advertising = cmd->advertising;
+		if (cmd->port == PORT_TP) {
+			advertising &= ETHTOOL_ALL_COPPER_SPEED;
+			if (!advertising)
+				advertising = ETHTOOL_ALL_COPPER_SPEED;
+		} else {
+			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
+			if (!advertising)
+				advertising = ETHTOOL_ALL_FIBRE_SPEED;
+		}
+		advertising |= ADVERTISED_Autoneg;
+	}
+	else {
+		u32 speed = ethtool_cmd_speed(cmd);
+		if (cmd->port == PORT_FIBRE) {
+			if ((speed != SPEED_1000 &&
+			     speed != SPEED_2500) ||
+			    (cmd->duplex != DUPLEX_FULL))
+				goto err_out_unlock;
+
+			if (speed == SPEED_2500 &&
+			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+				goto err_out_unlock;
+		} else if (speed == SPEED_1000 || speed == SPEED_2500)
+			goto err_out_unlock;
+
+		autoneg &= ~AUTONEG_SPEED;
+		req_line_speed = speed;
+		req_duplex = cmd->duplex;
+		advertising = 0;
+	}
+
+	bp->autoneg = autoneg;
+	bp->advertising = advertising;
+	bp->req_line_speed = req_line_speed;
+	bp->req_duplex = req_duplex;
+
+	err = 0;
+	/* If device is down, the new settings will be picked up when it is
+	 * brought up.
+	 */
+	if (netif_running(dev))
+		err = bnx2_setup_phy(bp, cmd->port);
+
+err_out_unlock:
+	spin_unlock_bh(&bp->phy_lock);
+
+	return err;
+}
+
+static void
+bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+}
+
+#define BNX2_REGDUMP_LEN		(32 * 1024)
+
+static int
+bnx2_get_regs_len(struct net_device *dev)
+{
+	return BNX2_REGDUMP_LEN;
+}
+
+static void
+bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+	u32 *p = _p, i, offset;
+	u8 *orig_p = _p;
+	struct bnx2 *bp = netdev_priv(dev);
+	static const u32 reg_boundaries[] = {
+		0x0000, 0x0098, 0x0400, 0x045c,
+		0x0800, 0x0880, 0x0c00, 0x0c10,
+		0x0c30, 0x0d08, 0x1000, 0x101c,
+		0x1040, 0x1048, 0x1080, 0x10a4,
+		0x1400, 0x1490, 0x1498, 0x14f0,
+		0x1500, 0x155c, 0x1580, 0x15dc,
+		0x1600, 0x1658, 0x1680, 0x16d8,
+		0x1800, 0x1820, 0x1840, 0x1854,
+		0x1880, 0x1894, 0x1900, 0x1984,
+		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
+		0x1c80, 0x1c94, 0x1d00, 0x1d84,
+		0x2000, 0x2030, 0x23c0, 0x2400,
+		0x2800, 0x2820, 0x2830, 0x2850,
+		0x2b40, 0x2c10, 0x2fc0, 0x3058,
+		0x3c00, 0x3c94, 0x4000, 0x4010,
+		0x4080, 0x4090, 0x43c0, 0x4458,
+		0x4c00, 0x4c18, 0x4c40, 0x4c54,
+		0x4fc0, 0x5010, 0x53c0, 0x5444,
+		0x5c00, 0x5c18, 0x5c80, 0x5c90,
+		0x5fc0, 0x6000, 0x6400, 0x6428,
+		0x6800, 0x6848, 0x684c, 0x6860,
+		0x6888, 0x6910, 0x8000
+	};
+
+	regs->version = 0;
+
+	memset(p, 0, BNX2_REGDUMP_LEN);
+
+	if (!netif_running(bp->dev))
+		return;
+
+	i = 0;
+	offset = reg_boundaries[0];
+	p += offset;
+	while (offset < BNX2_REGDUMP_LEN) {
+		*p++ = BNX2_RD(bp, offset);
+		offset += 4;
+		if (offset == reg_boundaries[i + 1]) {
+			offset = reg_boundaries[i + 2];
+			p = (u32 *) (orig_p + offset);
+			i += 2;
+		}
+	}
+}
+
+static void
+bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->flags & BNX2_FLAG_NO_WOL) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	}
+	else {
+		wol->supported = WAKE_MAGIC;
+		if (bp->wol)
+			wol->wolopts = WAKE_MAGIC;
+		else
+			wol->wolopts = 0;
+	}
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (bp->flags & BNX2_FLAG_NO_WOL)
+			return -EINVAL;
+
+		bp->wol = 1;
+	}
+	else {
+		bp->wol = 0;
+	}
+
+	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
+	return 0;
+}
+
+static int
+bnx2_nway_reset(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u32 bmcr;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	if (!(bp->autoneg & AUTONEG_SPEED)) {
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&bp->phy_lock);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		int rc;
+
+		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
+		spin_unlock_bh(&bp->phy_lock);
+		return rc;
+	}
+
+	/* Force a link down visible on the other side */
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+		spin_unlock_bh(&bp->phy_lock);
+
+		msleep(20);
+
+		spin_lock_bh(&bp->phy_lock);
+
+		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
+		bp->serdes_an_pending = 1;
+		mod_timer(&bp->timer, jiffies + bp->current_interval);
+	}
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+	bmcr &= ~BMCR_LOOPBACK;
+	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
+
+	spin_unlock_bh(&bp->phy_lock);
+
+	return 0;
+}
+
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	return bp->link_up;
+}
+
+static int
+bnx2_get_eeprom_len(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->flash_info == NULL)
+		return 0;
+
+	return (int) bp->flash_size;
+}
+
+static int
+bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		u8 *eebuf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	/* parameters already validated in ethtool_get_eeprom */
+
+	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int
+bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		u8 *eebuf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	/* parameters already validated in ethtool_set_eeprom */
+
+	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int
+bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+	coal->rx_coalesce_usecs = bp->rx_ticks;
+	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
+	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
+	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
+
+	coal->tx_coalesce_usecs = bp->tx_ticks;
+	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
+	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
+	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
+
+	coal->stats_block_coalesce_usecs = bp->stats_ticks;
+
+	return 0;
+}
+
+static int
+bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
+	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
+
+	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
+	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
+
+	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
+	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
+
+	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
+	if (bp->rx_quick_cons_trip_int > 0xff)
+		bp->rx_quick_cons_trip_int = 0xff;
+
+	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
+	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
+
+	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
+	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
+
+	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
+	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
+
+	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
+	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
+		0xff;
+
+	bp->stats_ticks = coal->stats_block_coalesce_usecs;
+	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
+		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
+			bp->stats_ticks = USEC_PER_SEC;
+	}
+	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+
+	if (netif_running(bp->dev)) {
+		bnx2_netif_stop(bp, true);
+		bnx2_init_nic(bp, 0);
+		bnx2_netif_start(bp, true);
+	}
+
+	return 0;
+}
+
+static void
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
+	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
+
+	ering->rx_pending = bp->rx_ring_size;
+	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
+
+	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int
+bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
+{
+	if (netif_running(bp->dev)) {
+		/* Reset will erase chipset stats; save them */
+		bnx2_save_stats(bp);
+
+		bnx2_netif_stop(bp, true);
+		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
+		if (reset_irq) {
+			bnx2_free_irq(bp);
+			bnx2_del_napi(bp);
+		} else {
+			__bnx2_free_irq(bp);
+		}
+		bnx2_free_skbs(bp);
+		bnx2_free_mem(bp);
+	}
+
+	bnx2_set_rx_ring_size(bp, rx);
+	bp->tx_ring_size = tx;
+
+	if (netif_running(bp->dev)) {
+		int rc = 0;
+
+		if (reset_irq) {
+			rc = bnx2_setup_int_mode(bp, disable_msi);
+			bnx2_init_napi(bp);
+		}
+
+		if (!rc)
+			rc = bnx2_alloc_mem(bp);
+
+		if (!rc)
+			rc = bnx2_request_irq(bp);
+
+		if (!rc)
+			rc = bnx2_init_nic(bp, 0);
+
+		if (rc) {
+			bnx2_napi_enable(bp);
+			dev_close(bp->dev);
+			return rc;
+		}
+#ifdef BCM_CNIC
+		mutex_lock(&bp->cnic_lock);
+		/* Let cnic know about the new status block. */
+		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
+			bnx2_setup_cnic_irq_info(bp);
+		mutex_unlock(&bp->cnic_lock);
+#endif
+		bnx2_netif_start(bp, true);
+	}
+	return 0;
+}
+
+static int
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
+		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
+		(ering->tx_pending <= MAX_SKB_FRAGS)) {
+
+		return -EINVAL;
+	}
+	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
+				   false);
+	return rc;
+}
+
+static void
+bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
+	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
+	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
+}
+
+static int
+bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bp->req_flow_ctrl = 0;
+	if (epause->rx_pause)
+		bp->req_flow_ctrl |= FLOW_CTRL_RX;
+	if (epause->tx_pause)
+		bp->req_flow_ctrl |= FLOW_CTRL_TX;
+
+	if (epause->autoneg) {
+		bp->autoneg |= AUTONEG_FLOW_CTRL;
+	}
+	else {
+		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
+	}
+
+	if (netif_running(dev)) {
+		spin_lock_bh(&bp->phy_lock);
+		bnx2_setup_phy(bp, bp->phy_port);
+		spin_unlock_bh(&bp->phy_lock);
+	}
+
+	return 0;
+}
+
+static struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2_stats_str_arr[] = {
+	{ "rx_bytes" },
+	{ "rx_error_bytes" },
+	{ "tx_bytes" },
+	{ "tx_error_bytes" },
+	{ "rx_ucast_packets" },
+	{ "rx_mcast_packets" },
+	{ "rx_bcast_packets" },
+	{ "tx_ucast_packets" },
+	{ "tx_mcast_packets" },
+	{ "tx_bcast_packets" },
+	{ "tx_mac_errors" },
+	{ "tx_carrier_errors" },
+	{ "rx_crc_errors" },
+	{ "rx_align_errors" },
+	{ "tx_single_collisions" },
+	{ "tx_multi_collisions" },
+	{ "tx_deferred" },
+	{ "tx_excess_collisions" },
+	{ "tx_late_collisions" },
+	{ "tx_total_collisions" },
+	{ "rx_fragments" },
+	{ "rx_jabbers" },
+	{ "rx_undersize_packets" },
+	{ "rx_oversize_packets" },
+	{ "rx_64_byte_packets" },
+	{ "rx_65_to_127_byte_packets" },
+	{ "rx_128_to_255_byte_packets" },
+	{ "rx_256_to_511_byte_packets" },
+	{ "rx_512_to_1023_byte_packets" },
+	{ "rx_1024_to_1522_byte_packets" },
+	{ "rx_1523_to_9022_byte_packets" },
+	{ "tx_64_byte_packets" },
+	{ "tx_65_to_127_byte_packets" },
+	{ "tx_128_to_255_byte_packets" },
+	{ "tx_256_to_511_byte_packets" },
+	{ "tx_512_to_1023_byte_packets" },
+	{ "tx_1024_to_1522_byte_packets" },
+	{ "tx_1523_to_9022_byte_packets" },
+	{ "rx_xon_frames" },
+	{ "rx_xoff_frames" },
+	{ "tx_xon_frames" },
+	{ "tx_xoff_frames" },
+	{ "rx_mac_ctrl_frames" },
+	{ "rx_filtered_packets" },
+	{ "rx_ftq_discards" },
+	{ "rx_discards" },
+	{ "rx_fw_discards" },
+};
+
+#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
+
+#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
+
+static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
+    STATS_OFFSET32(stat_IfHCInOctets_hi),
+    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
+    STATS_OFFSET32(stat_IfHCOutOctets_hi),
+    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
+    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
+    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
+    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
+    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
+    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
+    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
+    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
+    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
+    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
+    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
+    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
+    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
+    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
+    STATS_OFFSET32(stat_EtherStatsCollisions),
+    STATS_OFFSET32(stat_EtherStatsFragments),
+    STATS_OFFSET32(stat_EtherStatsJabbers),
+    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
+    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
+    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
+    STATS_OFFSET32(stat_XonPauseFramesReceived),
+    STATS_OFFSET32(stat_XoffPauseFramesReceived),
+    STATS_OFFSET32(stat_OutXonSent),
+    STATS_OFFSET32(stat_OutXoffSent),
+    STATS_OFFSET32(stat_MacControlFramesReceived),
+    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
+    STATS_OFFSET32(stat_IfInFTQDiscards),
+    STATS_OFFSET32(stat_IfInMBUFDiscards),
+    STATS_OFFSET32(stat_FwRxDrop),
+};
+
+/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
+ * skipped because of errata.
+ */
+static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
+	8,0,8,8,8,8,8,8,8,8,
+	4,0,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,
+};
+
+static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
+	8,0,8,8,8,8,8,8,8,8,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,
+};
+
+#define BNX2_NUM_TESTS 6
+
+static struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
+	{ "register_test (offline)" },
+	{ "memory_test (offline)" },
+	{ "loopback_test (offline)" },
+	{ "nvram_test (online)" },
+	{ "interrupt_test (online)" },
+	{ "link_test (online)" },
+};
+
+static int
+bnx2_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return BNX2_NUM_TESTS;
+	case ETH_SS_STATS:
+		return BNX2_NUM_STATS;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void
+bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int i;
+
+		bnx2_netif_stop(bp, true);
+		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
+		bnx2_free_skbs(bp);
+
+		if (bnx2_test_registers(bp) != 0) {
+			buf[0] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if (bnx2_test_memory(bp) != 0) {
+			buf[1] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		if (!netif_running(bp->dev))
+			bnx2_shutdown_chip(bp);
+		else {
+			bnx2_init_nic(bp, 1);
+			bnx2_netif_start(bp, true);
+		}
+
+		/* wait for link up */
+		for (i = 0; i < 7; i++) {
+			if (bp->link_up)
+				break;
+			msleep_interruptible(1000);
+		}
+	}
+
+	if (bnx2_test_nvram(bp) != 0) {
+		buf[3] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	if (bnx2_test_intr(bp) != 0) {
+		buf[4] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (bnx2_test_link(bp) != 0) {
+		buf[5] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+
+	}
+}
+
+static void
+bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, bnx2_stats_str_arr,
+			sizeof(bnx2_stats_str_arr));
+		break;
+	case ETH_SS_TEST:
+		memcpy(buf, bnx2_tests_str_arr,
+			sizeof(bnx2_tests_str_arr));
+		break;
+	}
+}
+
+static void
+bnx2_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, u64 *buf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int i;
+	u32 *hw_stats = (u32 *) bp->stats_blk;
+	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+	u8 *stats_len_arr = NULL;
+
+	if (hw_stats == NULL) {
+		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
+		return;
+	}
+
+	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
+		stats_len_arr = bnx2_5706_stats_len_arr;
+	else
+		stats_len_arr = bnx2_5708_stats_len_arr;
+
+	for (i = 0; i < BNX2_NUM_STATS; i++) {
+		unsigned long offset;
+
+		if (stats_len_arr[i] == 0) {
+			/* skip this counter */
+			buf[i] = 0;
+			continue;
+		}
+
+		offset = bnx2_stats_offset_arr[i];
+		if (stats_len_arr[i] == 4) {
+			/* 4-byte counter */
+			buf[i] = (u64) *(hw_stats + offset) +
+				 *(temp_stats + offset);
+			continue;
+		}
+		/* 8-byte counter */
+		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+			 *(hw_stats + offset + 1) +
+			 (((u64) *(temp_stats + offset)) << 32) +
+			 *(temp_stats + offset + 1);
+	}
+}
+
+static int
+bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
+		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_ON:
+		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
+			BNX2_EMAC_LED_1000MB_OVERRIDE |
+			BNX2_EMAC_LED_100MB_OVERRIDE |
+			BNX2_EMAC_LED_10MB_OVERRIDE |
+			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
+			BNX2_EMAC_LED_TRAFFIC);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		BNX2_WR(bp, BNX2_EMAC_LED, 0);
+		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	/* TSO with VLAN tag won't work with current firmware */
+	if (features & NETIF_F_HW_VLAN_CTAG_TX)
+		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
+	else
+		dev->vlan_features &= ~NETIF_F_ALL_TSO;
+
+	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
+	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
+	    netif_running(dev)) {
+		bnx2_netif_stop(bp, false);
+		dev->features = features;
+		bnx2_set_rx_mode(dev);
+		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
+		bnx2_netif_start(bp, false);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void bnx2_get_channels(struct net_device *dev,
+			      struct ethtool_channels *channels)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u32 max_rx_rings = 1;
+	u32 max_tx_rings = 1;
+
+	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
+		max_rx_rings = RX_MAX_RINGS;
+		max_tx_rings = TX_MAX_RINGS;
+	}
+
+	channels->max_rx = max_rx_rings;
+	channels->max_tx = max_tx_rings;
+	channels->max_other = 0;
+	channels->max_combined = 0;
+	channels->rx_count = bp->num_rx_rings;
+	channels->tx_count = bp->num_tx_rings;
+	channels->other_count = 0;
+	channels->combined_count = 0;
+}
+
+static int bnx2_set_channels(struct net_device *dev,
+			      struct ethtool_channels *channels)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u32 max_rx_rings = 1;
+	u32 max_tx_rings = 1;
+	int rc = 0;
+
+	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
+		max_rx_rings = RX_MAX_RINGS;
+		max_tx_rings = TX_MAX_RINGS;
+	}
+	if (channels->rx_count > max_rx_rings ||
+	    channels->tx_count > max_tx_rings)
+		return -EINVAL;
+
+	bp->num_req_rx_rings = channels->rx_count;
+	bp->num_req_tx_rings = channels->tx_count;
+
+	if (netif_running(dev))
+		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
+					   bp->tx_ring_size, true);
+
+	return rc;
+}
+
+static const struct ethtool_ops bnx2_ethtool_ops = {
+	.get_settings		= bnx2_get_settings,
+	.set_settings		= bnx2_set_settings,
+	.get_drvinfo		= bnx2_get_drvinfo,
+	.get_regs_len		= bnx2_get_regs_len,
+	.get_regs		= bnx2_get_regs,
+	.get_wol		= bnx2_get_wol,
+	.set_wol		= bnx2_set_wol,
+	.nway_reset		= bnx2_nway_reset,
+	.get_link		= bnx2_get_link,
+	.get_eeprom_len		= bnx2_get_eeprom_len,
+	.get_eeprom		= bnx2_get_eeprom,
+	.set_eeprom		= bnx2_set_eeprom,
+	.get_coalesce		= bnx2_get_coalesce,
+	.set_coalesce		= bnx2_set_coalesce,
+	.get_ringparam		= bnx2_get_ringparam,
+	.set_ringparam		= bnx2_set_ringparam,
+	.get_pauseparam		= bnx2_get_pauseparam,
+	.set_pauseparam		= bnx2_set_pauseparam,
+	.self_test		= bnx2_self_test,
+	.get_strings		= bnx2_get_strings,
+	.set_phys_id		= bnx2_set_phys_id,
+	.get_ethtool_stats	= bnx2_get_ethtool_stats,
+	.get_sset_count		= bnx2_get_sset_count,
+	.get_channels		= bnx2_get_channels,
+	.set_channels		= bnx2_set_channels,
+};
+
+/* Called with rtnl_lock */
+static int
+bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct bnx2 *bp = netdev_priv(dev);
+	int err;
+
+	switch(cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = bp->phy_addr;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u32 mii_regval;
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+			return -EOPNOTSUPP;
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&bp->phy_lock);
+		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
+		spin_unlock_bh(&bp->phy_lock);
+
+		data->val_out = mii_regval;
+
+		return err;
+	}
+
+	case SIOCSMIIREG:
+		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+			return -EOPNOTSUPP;
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&bp->phy_lock);
+		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
+		spin_unlock_bh(&bp->phy_lock);
+
+		return err;
+
+	default:
+		/* do nothing */
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev))
+		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+	return 0;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
+				     false);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+poll_bnx2(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+		disable_irq(irq->vector);
+		irq->handler(irq->vector, &bp->bnx2_napi[i]);
+		enable_irq(irq->vector);
+	}
+}
+#endif
+
+static void
+bnx2_get_5709_media(struct bnx2 *bp)
+{
+	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
+	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
+	u32 strap;
+
+	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+		return;
+	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
+		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+		return;
+	}
+
+	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
+		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
+	else
+		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
+
+	if (bp->func == 0) {
+		switch (strap) {
+		case 0x4:
+		case 0x5:
+		case 0x6:
+			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+			return;
+		}
+	} else {
+		switch (strap) {
+		case 0x1:
+		case 0x2:
+		case 0x4:
+			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+			return;
+		}
+	}
+}
+
+static void
+bnx2_get_pci_speed(struct bnx2 *bp)
+{
+	u32 reg;
+
+	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
+	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
+		u32 clkreg;
+
+		bp->flags |= BNX2_FLAG_PCIX;
+
+		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
+
+		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
+		switch (clkreg) {
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
+			bp->bus_speed_mhz = 133;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
+			bp->bus_speed_mhz = 100;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
+			bp->bus_speed_mhz = 66;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
+			bp->bus_speed_mhz = 50;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
+			bp->bus_speed_mhz = 33;
+			break;
+		}
+	}
+	else {
+		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
+			bp->bus_speed_mhz = 66;
+		else
+			bp->bus_speed_mhz = 33;
+	}
+
+	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
+		bp->flags |= BNX2_FLAG_PCI_32BIT;
+
+}
+
+static void
+bnx2_read_vpd_fw_ver(struct bnx2 *bp)
+{
+	int rc, i, j;
+	u8 *data;
+	unsigned int block_end, rosize, len;
+
+#define BNX2_VPD_NVRAM_OFFSET	0x300
+#define BNX2_VPD_LEN		128
+#define BNX2_MAX_VER_SLEN	30
+
+	data = kmalloc(256, GFP_KERNEL);
+	if (!data)
+		return;
+
+	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
+			     BNX2_VPD_LEN);
+	if (rc)
+		goto vpd_done;
+
+	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
+		data[i] = data[i + BNX2_VPD_LEN + 3];
+		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
+		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
+		data[i + 3] = data[i + BNX2_VPD_LEN];
+	}
+
+	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto vpd_done;
+
+	rosize = pci_vpd_lrdt_size(&data[i]);
+	i += PCI_VPD_LRDT_TAG_SIZE;
+	block_end = i + rosize;
+
+	if (block_end > BNX2_VPD_LEN)
+		goto vpd_done;
+
+	j = pci_vpd_find_info_keyword(data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (j < 0)
+		goto vpd_done;
+
+	len = pci_vpd_info_field_size(&data[j]);
+
+	j += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (j + len > block_end || len != 4 ||
+	    memcmp(&data[j], "1028", 4))
+		goto vpd_done;
+
+	j = pci_vpd_find_info_keyword(data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_VENDOR0);
+	if (j < 0)
+		goto vpd_done;
+
+	len = pci_vpd_info_field_size(&data[j]);
+
+	j += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+		goto vpd_done;
+
+	memcpy(bp->fw_version, &data[j], len);
+	bp->fw_version[len] = ' ';
+
+vpd_done:
+	kfree(data);
+}
+
+static int
+bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+	struct bnx2 *bp;
+	int rc, i, j;
+	u32 reg;
+	u64 dma_mask, persist_dma_mask;
+	int err;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	bp = netdev_priv(dev);
+
+	bp->flags = 0;
+	bp->phy_flags = 0;
+
+	bp->temp_stats_blk =
+		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+	if (bp->temp_stats_blk == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	/* enable device (incl. PCI PM wakeup), and bus-mastering */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		goto err_out;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+		goto err_out_disable;
+	}
+
+	pci_set_master(pdev);
+
+	bp->pm_cap = pdev->pm_cap;
+	if (bp->pm_cap == 0) {
+		dev_err(&pdev->dev,
+			"Cannot find power management capability, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+
+	spin_lock_init(&bp->phy_lock);
+	spin_lock_init(&bp->indirect_lock);
+#ifdef BCM_CNIC
+	mutex_init(&bp->cnic_lock);
+#endif
+	INIT_WORK(&bp->reset_task, bnx2_reset_task);
+
+	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
+							 TX_MAX_TSS_RINGS + 1));
+	if (!bp->regview) {
+		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
+		rc = -ENOMEM;
+		goto err_out_release;
+	}
+
+	/* Configure byte swap and enable write to the reg_window registers.
+	 * Rely on CPU to do target byte swapping on big endian systems
+	 * The chip's target access swapping will not swap all accesses
+	 */
+	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
+
+	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+		if (!pci_is_pcie(pdev)) {
+			dev_err(&pdev->dev, "Not PCIE, aborting\n");
+			rc = -EIO;
+			goto err_out_unmap;
+		}
+		bp->flags |= BNX2_FLAG_PCIE;
+		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
+			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
+
+		/* AER (Advanced Error Reporting) hooks */
+		err = pci_enable_pcie_error_reporting(pdev);
+		if (!err)
+			bp->flags |= BNX2_FLAG_AER_ENABLED;
+
+	} else {
+		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+		if (bp->pcix_cap == 0) {
+			dev_err(&pdev->dev,
+				"Cannot find PCIX capability, aborting\n");
+			rc = -EIO;
+			goto err_out_unmap;
+		}
+		bp->flags |= BNX2_FLAG_BROKEN_STATS;
+	}
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
+	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
+		if (pdev->msix_cap)
+			bp->flags |= BNX2_FLAG_MSIX_CAP;
+	}
+
+	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
+	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
+		if (pdev->msi_cap)
+			bp->flags |= BNX2_FLAG_MSI_CAP;
+	}
+
+	/* 5708 cannot support DMA addresses > 40-bit.  */
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+	else
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+	/* Configure DMA attributes. */
+	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
+		dev->features |= NETIF_F_HIGHDMA;
+		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"pci_set_consistent_dma_mask failed, aborting\n");
+			goto err_out_unmap;
+		}
+	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+		goto err_out_unmap;
+	}
+
+	if (!(bp->flags & BNX2_FLAG_PCIE))
+		bnx2_get_pci_speed(bp);
+
+	/* 5706A0 may falsely detect SERR and PERR. */
+	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+		reg = BNX2_RD(bp, PCI_COMMAND);
+		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+		BNX2_WR(bp, PCI_COMMAND, reg);
+	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
+		!(bp->flags & BNX2_FLAG_PCIX)) {
+
+		dev_err(&pdev->dev,
+			"5706 A1 can only be used in a PCIX bus, aborting\n");
+		goto err_out_unmap;
+	}
+
+	bnx2_init_nvram(bp);
+
+	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+
+	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
+		bp->func = 1;
+
+	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
+	    BNX2_SHM_HDR_SIGNATURE_SIG) {
+		u32 off = bp->func << 2;
+
+		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
+	} else
+		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
+
+	/* Get the permanent MAC address.  First we need to make sure the
+	 * firmware is actually running.
+	 */
+	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
+
+	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
+	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
+		dev_err(&pdev->dev, "Firmware not running, aborting\n");
+		rc = -ENODEV;
+		goto err_out_unmap;
+	}
+
+	bnx2_read_vpd_fw_ver(bp);
+
+	j = strlen(bp->fw_version);
+	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
+	for (i = 0; i < 3 && j < 24; i++) {
+		u8 num, k, skip0;
+
+		if (i == 0) {
+			bp->fw_version[j++] = 'b';
+			bp->fw_version[j++] = 'c';
+			bp->fw_version[j++] = ' ';
+		}
+		num = (u8) (reg >> (24 - (i * 8)));
+		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
+			if (num >= k || !skip0 || k == 1) {
+				bp->fw_version[j++] = (num / k) + '0';
+				skip0 = 0;
+			}
+		}
+		if (i != 2)
+			bp->fw_version[j++] = '.';
+	}
+	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
+		bp->wol = 1;
+
+	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
+		bp->flags |= BNX2_FLAG_ASF_ENABLE;
+
+		for (i = 0; i < 30; i++) {
+			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+				break;
+			msleep(10);
+		}
+	}
+	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+	reg &= BNX2_CONDITION_MFW_RUN_MASK;
+	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
+	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
+		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
+
+		if (j < 32)
+			bp->fw_version[j++] = ' ';
+		for (i = 0; i < 3 && j < 28; i++) {
+			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
+			reg = be32_to_cpu(reg);
+			memcpy(&bp->fw_version[j], &reg, 4);
+			j += 4;
+		}
+	}
+
+	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
+	bp->mac_addr[0] = (u8) (reg >> 8);
+	bp->mac_addr[1] = (u8) reg;
+
+	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
+	bp->mac_addr[2] = (u8) (reg >> 24);
+	bp->mac_addr[3] = (u8) (reg >> 16);
+	bp->mac_addr[4] = (u8) (reg >> 8);
+	bp->mac_addr[5] = (u8) reg;
+
+	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
+	bnx2_set_rx_ring_size(bp, 255);
+
+	bp->tx_quick_cons_trip_int = 2;
+	bp->tx_quick_cons_trip = 20;
+	bp->tx_ticks_int = 18;
+	bp->tx_ticks = 80;
+
+	bp->rx_quick_cons_trip_int = 2;
+	bp->rx_quick_cons_trip = 12;
+	bp->rx_ticks_int = 18;
+	bp->rx_ticks = 18;
+
+	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+
+	bp->current_interval = BNX2_TIMER_INTERVAL;
+
+	bp->phy_addr = 1;
+
+	/* allocate stats_blk */
+	rc = bnx2_alloc_stats_blk(dev);
+	if (rc)
+		goto err_out_unmap;
+
+	/* Disable WOL support if we are running on a SERDES chip. */
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		bnx2_get_5709_media(bp);
+	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
+		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+
+	bp->phy_port = PORT_TP;
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		bp->phy_port = PORT_FIBRE;
+		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
+		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
+			bp->flags |= BNX2_FLAG_NO_WOL;
+			bp->wol = 0;
+		}
+		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
+			/* Don't do parallel detect on this board because of
+			 * some board problems.  The link will not go down
+			 * if we do parallel detect.
+			 */
+			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+			    pdev->subsystem_device == 0x310c)
+				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
+		} else {
+			bp->phy_addr = 2;
+			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
+				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
+		}
+	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
+		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
+		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
+	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
+		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
+		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
+		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
+
+	bnx2_init_fw_cap(bp);
+
+	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
+	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
+	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
+		bp->flags |= BNX2_FLAG_NO_WOL;
+		bp->wol = 0;
+	}
+
+	if (bp->flags & BNX2_FLAG_NO_WOL)
+		device_set_wakeup_capable(&bp->pdev->dev, false);
+	else
+		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
+	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+		bp->tx_quick_cons_trip_int =
+			bp->tx_quick_cons_trip;
+		bp->tx_ticks_int = bp->tx_ticks;
+		bp->rx_quick_cons_trip_int =
+			bp->rx_quick_cons_trip;
+		bp->rx_ticks_int = bp->rx_ticks;
+		bp->comp_prod_trip_int = bp->comp_prod_trip;
+		bp->com_ticks_int = bp->com_ticks;
+		bp->cmd_ticks_int = bp->cmd_ticks;
+	}
+
+	/* Disable MSI on 5706 if AMD 8132 bridge is found.
+	 *
+	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
+	 * with byte enables disabled on the unused 32-bit word.  This is legal
+	 * but causes problems on the AMD 8132 which will eventually stop
+	 * responding after a while.
+	 *
+	 * AMD believes this incompatibility is unique to the 5706, and
+	 * prefers to locally disable MSI rather than globally disabling it.
+	 */
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
+		struct pci_dev *amd_8132 = NULL;
+
+		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
+						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
+						  amd_8132))) {
+
+			if (amd_8132->revision >= 0x10 &&
+			    amd_8132->revision <= 0x13) {
+				disable_msi = 1;
+				pci_dev_put(amd_8132);
+				break;
+			}
+		}
+	}
+
+	bnx2_set_default_link(bp);
+	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
+
+	init_timer(&bp->timer);
+	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = bnx2_timer;
+
+#ifdef BCM_CNIC
+	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
+		bp->cnic_eth_dev.max_iscsi_conn =
+			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
+			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
+	bp->cnic_probe = bnx2_cnic_probe;
+#endif
+	pci_save_state(pdev);
+
+	return 0;
+
+err_out_unmap:
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
+		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
+
+	pci_iounmap(pdev, bp->regview);
+	bp->regview = NULL;
+
+err_out_release:
+	pci_release_regions(pdev);
+
+err_out_disable:
+	pci_disable_device(pdev);
+
+err_out:
+	kfree(bp->temp_stats_blk);
+
+	return rc;
+}
+
+static char *
+bnx2_bus_string(struct bnx2 *bp, char *str)
+{
+	char *s = str;
+
+	if (bp->flags & BNX2_FLAG_PCIE) {
+		s += sprintf(s, "PCI Express");
+	} else {
+		s += sprintf(s, "PCI");
+		if (bp->flags & BNX2_FLAG_PCIX)
+			s += sprintf(s, "-X");
+		if (bp->flags & BNX2_FLAG_PCI_32BIT)
+			s += sprintf(s, " 32-bit");
+		else
+			s += sprintf(s, " 64-bit");
+		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
+	}
+	return str;
+}
+
+static void
+bnx2_del_napi(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		netif_napi_del(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_init_napi(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		int (*poll)(struct napi_struct *, int);
+
+		if (i == 0)
+			poll = bnx2_poll;
+		else
+			poll = bnx2_poll_msix;
+
+		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+		bnapi->bp = bp;
+	}
+}
+
+static const struct net_device_ops bnx2_netdev_ops = {
+	.ndo_open		= bnx2_open,
+	.ndo_start_xmit		= bnx2_start_xmit,
+	.ndo_stop		= bnx2_close,
+	.ndo_get_stats64	= bnx2_get_stats64,
+	.ndo_set_rx_mode	= bnx2_set_rx_mode,
+	.ndo_do_ioctl		= bnx2_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= bnx2_change_mac_addr,
+	.ndo_change_mtu		= bnx2_change_mtu,
+	.ndo_set_features	= bnx2_set_features,
+	.ndo_tx_timeout		= bnx2_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= poll_bnx2,
+#endif
+};
+
+static int
+bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int version_printed = 0;
+	struct net_device *dev;
+	struct bnx2 *bp;
+	int rc;
+	char str[40];
+
+	if (version_printed++ == 0)
+		pr_info("%s", version);
+
+	/* dev zeroed in init_etherdev */
+	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
+	if (!dev)
+		return -ENOMEM;
+
+	rc = bnx2_init_board(pdev, dev);
+	if (rc < 0)
+		goto err_free;
+
+	dev->netdev_ops = &bnx2_netdev_ops;
+	dev->watchdog_timeo = TX_TIMEOUT;
+	dev->ethtool_ops = &bnx2_ethtool_ops;
+
+	bp = netdev_priv(dev);
+
+	pci_set_drvdata(pdev, dev);
+
+	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
+
+	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+		NETIF_F_TSO | NETIF_F_TSO_ECN |
+		NETIF_F_RXHASH | NETIF_F_RXCSUM;
+
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+	dev->vlan_features = dev->hw_features;
+	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+	dev->features |= dev->hw_features;
+	dev->priv_flags |= IFF_UNICAST_FLT;
+
+	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+	if ((rc = register_netdev(dev))) {
+		dev_err(&pdev->dev, "Cannot register net device\n");
+		goto error;
+	}
+
+	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
+		    "node addr %pM\n", board_info[ent->driver_data].name,
+		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
+		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
+		    pdev->irq, dev->dev_addr);
+
+	return 0;
+
+error:
+	pci_iounmap(pdev, bp->regview);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+err_free:
+	bnx2_free_stats_blk(dev);
+	free_netdev(dev);
+	return rc;
+}
+
+static void
+bnx2_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+
+	del_timer_sync(&bp->timer);
+	cancel_work_sync(&bp->reset_task);
+
+	pci_iounmap(bp->pdev, bp->regview);
+
+	bnx2_free_stats_blk(dev);
+	kfree(bp->temp_stats_blk);
+
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
+		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
+
+	bnx2_release_firmware(bp);
+
+	free_netdev(dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int
+bnx2_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (netif_running(dev)) {
+		cancel_work_sync(&bp->reset_task);
+		bnx2_netif_stop(bp, true);
+		netif_device_detach(dev);
+		del_timer_sync(&bp->timer);
+		bnx2_shutdown_chip(bp);
+		__bnx2_free_irq(bp);
+		bnx2_free_skbs(bp);
+	}
+	bnx2_setup_wol(bp);
+	return 0;
+}
+
+static int
+bnx2_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return 0;
+
+	bnx2_set_power_state(bp, PCI_D0);
+	netif_device_attach(dev);
+	bnx2_request_irq(bp);
+	bnx2_init_nic(bp, 1);
+	bnx2_netif_start(bp, true);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
+#define BNX2_PM_OPS (&bnx2_pm_ops)
+
+#else
+
+#define BNX2_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+/**
+ * bnx2_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	rtnl_lock();
+	netif_device_detach(dev);
+
+	if (state == pci_channel_io_perm_failure) {
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (netif_running(dev)) {
+		bnx2_netif_stop(bp, true);
+		del_timer_sync(&bp->timer);
+		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+	}
+
+	pci_disable_device(pdev);
+	rtnl_unlock();
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+	int err = 0;
+
+	rtnl_lock();
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset\n");
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+
+		if (netif_running(dev))
+			err = bnx2_init_nic(bp, 1);
+
+		if (!err)
+			result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
+		bnx2_napi_enable(bp);
+		dev_close(dev);
+	}
+	rtnl_unlock();
+
+	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
+		return result;
+
+	err = pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+			 err); /* non-fatal, continue */
+	}
+
+	return result;
+}
+
+/**
+ * bnx2_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	rtnl_lock();
+	if (netif_running(dev))
+		bnx2_netif_start(bp, true);
+
+	netif_device_attach(dev);
+	rtnl_unlock();
+}
+
+static void bnx2_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp;
+
+	if (!dev)
+		return;
+
+	bp = netdev_priv(dev);
+	if (!bp)
+		return;
+
+	rtnl_lock();
+	if (netif_running(dev))
+		dev_close(bp->dev);
+
+	if (system_state == SYSTEM_POWER_OFF)
+		bnx2_set_power_state(bp, PCI_D3hot);
+
+	rtnl_unlock();
+}
+
+static const struct pci_error_handlers bnx2_err_handler = {
+	.error_detected	= bnx2_io_error_detected,
+	.slot_reset	= bnx2_io_slot_reset,
+	.resume		= bnx2_io_resume,
+};
+
+static struct pci_driver bnx2_pci_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= bnx2_pci_tbl,
+	.probe		= bnx2_init_one,
+	.remove		= bnx2_remove_one,
+	.driver.pm	= BNX2_PM_OPS,
+	.err_handler	= &bnx2_err_handler,
+	.shutdown	= bnx2_shutdown,
+};
+
+module_pci_driver(bnx2_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
new file mode 100644
index 0000000..380234d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -0,0 +1,7466 @@
+/* bnx2.h: QLogic bnx2 network driver.
+ *
+ * Copyright (c) 2004-2014 Broadcom Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Michael Chan  (mchan@broadcom.com)
+ */
+
+
+#ifndef BNX2_H
+#define BNX2_H
+
+/* Hardware data structures and register definitions automatically
+ * generated from RTL code. Do not modify.
+ */
+
+/*
+ *  tx_bd definition
+ */
+struct bnx2_tx_bd {
+	u32 tx_bd_haddr_hi;
+	u32 tx_bd_haddr_lo;
+	u32 tx_bd_mss_nbytes;
+		#define TX_BD_TCP6_OFF2_SHL		(14)
+	u32 tx_bd_vlan_tag_flags;
+		#define TX_BD_FLAGS_CONN_FAULT		(1<<0)
+		#define TX_BD_FLAGS_TCP6_OFF0_MSK	(3<<1)
+		#define TX_BD_FLAGS_TCP6_OFF0_SHL	(1)
+		#define TX_BD_FLAGS_TCP_UDP_CKSUM	(1<<1)
+		#define TX_BD_FLAGS_IP_CKSUM		(1<<2)
+		#define TX_BD_FLAGS_VLAN_TAG		(1<<3)
+		#define TX_BD_FLAGS_COAL_NOW		(1<<4)
+		#define TX_BD_FLAGS_DONT_GEN_CRC	(1<<5)
+		#define TX_BD_FLAGS_END			(1<<6)
+		#define TX_BD_FLAGS_START		(1<<7)
+		#define TX_BD_FLAGS_SW_OPTION_WORD	(0x1f<<8)
+		#define TX_BD_FLAGS_TCP6_OFF4_SHL	(12)
+		#define TX_BD_FLAGS_SW_FLAGS		(1<<13)
+		#define TX_BD_FLAGS_SW_SNAP		(1<<14)
+		#define TX_BD_FLAGS_SW_LSO		(1<<15)
+
+};
+
+
+/*
+ *  rx_bd definition
+ */
+struct bnx2_rx_bd {
+	u32 rx_bd_haddr_hi;
+	u32 rx_bd_haddr_lo;
+	u32 rx_bd_len;
+	u32 rx_bd_flags;
+		#define RX_BD_FLAGS_NOPUSH		(1<<0)
+		#define RX_BD_FLAGS_DUMMY		(1<<1)
+		#define RX_BD_FLAGS_END			(1<<2)
+		#define RX_BD_FLAGS_START		(1<<3)
+
+};
+
+#define BNX2_RX_ALIGN			16
+
+/*
+ *  status_block definition
+ */
+struct status_block {
+	u32 status_attn_bits;
+		#define STATUS_ATTN_BITS_LINK_STATE		(1L<<0)
+		#define STATUS_ATTN_BITS_TX_SCHEDULER_ABORT	(1L<<1)
+		#define STATUS_ATTN_BITS_TX_BD_READ_ABORT	(1L<<2)
+		#define STATUS_ATTN_BITS_TX_BD_CACHE_ABORT	(1L<<3)
+		#define STATUS_ATTN_BITS_TX_PROCESSOR_ABORT	(1L<<4)
+		#define STATUS_ATTN_BITS_TX_DMA_ABORT		(1L<<5)
+		#define STATUS_ATTN_BITS_TX_PATCHUP_ABORT	(1L<<6)
+		#define STATUS_ATTN_BITS_TX_ASSEMBLER_ABORT	(1L<<7)
+		#define STATUS_ATTN_BITS_RX_PARSER_MAC_ABORT	(1L<<8)
+		#define STATUS_ATTN_BITS_RX_PARSER_CATCHUP_ABORT	(1L<<9)
+		#define STATUS_ATTN_BITS_RX_MBUF_ABORT		(1L<<10)
+		#define STATUS_ATTN_BITS_RX_LOOKUP_ABORT	(1L<<11)
+		#define STATUS_ATTN_BITS_RX_PROCESSOR_ABORT	(1L<<12)
+		#define STATUS_ATTN_BITS_RX_V2P_ABORT		(1L<<13)
+		#define STATUS_ATTN_BITS_RX_BD_CACHE_ABORT	(1L<<14)
+		#define STATUS_ATTN_BITS_RX_DMA_ABORT		(1L<<15)
+		#define STATUS_ATTN_BITS_COMPLETION_ABORT	(1L<<16)
+		#define STATUS_ATTN_BITS_HOST_COALESCE_ABORT	(1L<<17)
+		#define STATUS_ATTN_BITS_MAILBOX_QUEUE_ABORT	(1L<<18)
+		#define STATUS_ATTN_BITS_CONTEXT_ABORT		(1L<<19)
+		#define STATUS_ATTN_BITS_CMD_SCHEDULER_ABORT	(1L<<20)
+		#define STATUS_ATTN_BITS_CMD_PROCESSOR_ABORT	(1L<<21)
+		#define STATUS_ATTN_BITS_MGMT_PROCESSOR_ABORT	(1L<<22)
+		#define STATUS_ATTN_BITS_MAC_ABORT		(1L<<23)
+		#define STATUS_ATTN_BITS_TIMER_ABORT		(1L<<24)
+		#define STATUS_ATTN_BITS_DMAE_ABORT		(1L<<25)
+		#define STATUS_ATTN_BITS_FLSH_ABORT		(1L<<26)
+		#define STATUS_ATTN_BITS_GRC_ABORT		(1L<<27)
+		#define STATUS_ATTN_BITS_EPB_ERROR		(1L<<30)
+		#define STATUS_ATTN_BITS_PARITY_ERROR		(1L<<31)
+
+	u32 status_attn_bits_ack;
+#if defined(__BIG_ENDIAN)
+	u16 status_tx_quick_consumer_index0;
+	u16 status_tx_quick_consumer_index1;
+	u16 status_tx_quick_consumer_index2;
+	u16 status_tx_quick_consumer_index3;
+	u16 status_rx_quick_consumer_index0;
+	u16 status_rx_quick_consumer_index1;
+	u16 status_rx_quick_consumer_index2;
+	u16 status_rx_quick_consumer_index3;
+	u16 status_rx_quick_consumer_index4;
+	u16 status_rx_quick_consumer_index5;
+	u16 status_rx_quick_consumer_index6;
+	u16 status_rx_quick_consumer_index7;
+	u16 status_rx_quick_consumer_index8;
+	u16 status_rx_quick_consumer_index9;
+	u16 status_rx_quick_consumer_index10;
+	u16 status_rx_quick_consumer_index11;
+	u16 status_rx_quick_consumer_index12;
+	u16 status_rx_quick_consumer_index13;
+	u16 status_rx_quick_consumer_index14;
+	u16 status_rx_quick_consumer_index15;
+	u16 status_completion_producer_index;
+	u16 status_cmd_consumer_index;
+	u16 status_idx;
+	u8 status_unused;
+	u8 status_blk_num;
+#elif defined(__LITTLE_ENDIAN)
+	u16 status_tx_quick_consumer_index1;
+	u16 status_tx_quick_consumer_index0;
+	u16 status_tx_quick_consumer_index3;
+	u16 status_tx_quick_consumer_index2;
+	u16 status_rx_quick_consumer_index1;
+	u16 status_rx_quick_consumer_index0;
+	u16 status_rx_quick_consumer_index3;
+	u16 status_rx_quick_consumer_index2;
+	u16 status_rx_quick_consumer_index5;
+	u16 status_rx_quick_consumer_index4;
+	u16 status_rx_quick_consumer_index7;
+	u16 status_rx_quick_consumer_index6;
+	u16 status_rx_quick_consumer_index9;
+	u16 status_rx_quick_consumer_index8;
+	u16 status_rx_quick_consumer_index11;
+	u16 status_rx_quick_consumer_index10;
+	u16 status_rx_quick_consumer_index13;
+	u16 status_rx_quick_consumer_index12;
+	u16 status_rx_quick_consumer_index15;
+	u16 status_rx_quick_consumer_index14;
+	u16 status_cmd_consumer_index;
+	u16 status_completion_producer_index;
+	u8 status_blk_num;
+	u8 status_unused;
+	u16 status_idx;
+#endif
+};
+
+/*
+ *  status_block definition
+ */
+struct status_block_msix {
+#if defined(__BIG_ENDIAN)
+	u16 status_tx_quick_consumer_index;
+	u16 status_rx_quick_consumer_index;
+	u16 status_completion_producer_index;
+	u16 status_cmd_consumer_index;
+	u32 status_unused;
+	u16 status_idx;
+	u8 status_unused2;
+	u8 status_blk_num;
+#elif defined(__LITTLE_ENDIAN)
+	u16 status_rx_quick_consumer_index;
+	u16 status_tx_quick_consumer_index;
+	u16 status_cmd_consumer_index;
+	u16 status_completion_producer_index;
+	u32 status_unused;
+	u8 status_blk_num;
+	u8 status_unused2;
+	u16 status_idx;
+#endif
+};
+
+#define BNX2_SBLK_MSIX_ALIGN_SIZE	128
+
+
+/*
+ *  statistics_block definition
+ */
+struct statistics_block {
+	u32 stat_IfHCInOctets_hi;
+	u32 stat_IfHCInOctets_lo;
+	u32 stat_IfHCInBadOctets_hi;
+	u32 stat_IfHCInBadOctets_lo;
+	u32 stat_IfHCOutOctets_hi;
+	u32 stat_IfHCOutOctets_lo;
+	u32 stat_IfHCOutBadOctets_hi;
+	u32 stat_IfHCOutBadOctets_lo;
+	u32 stat_IfHCInUcastPkts_hi;
+	u32 stat_IfHCInUcastPkts_lo;
+	u32 stat_IfHCInMulticastPkts_hi;
+	u32 stat_IfHCInMulticastPkts_lo;
+	u32 stat_IfHCInBroadcastPkts_hi;
+	u32 stat_IfHCInBroadcastPkts_lo;
+	u32 stat_IfHCOutUcastPkts_hi;
+	u32 stat_IfHCOutUcastPkts_lo;
+	u32 stat_IfHCOutMulticastPkts_hi;
+	u32 stat_IfHCOutMulticastPkts_lo;
+	u32 stat_IfHCOutBroadcastPkts_hi;
+	u32 stat_IfHCOutBroadcastPkts_lo;
+	u32 stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
+	u32 stat_Dot3StatsCarrierSenseErrors;
+	u32 stat_Dot3StatsFCSErrors;
+	u32 stat_Dot3StatsAlignmentErrors;
+	u32 stat_Dot3StatsSingleCollisionFrames;
+	u32 stat_Dot3StatsMultipleCollisionFrames;
+	u32 stat_Dot3StatsDeferredTransmissions;
+	u32 stat_Dot3StatsExcessiveCollisions;
+	u32 stat_Dot3StatsLateCollisions;
+	u32 stat_EtherStatsCollisions;
+	u32 stat_EtherStatsFragments;
+	u32 stat_EtherStatsJabbers;
+	u32 stat_EtherStatsUndersizePkts;
+	u32 stat_EtherStatsOverrsizePkts;
+	u32 stat_EtherStatsPktsRx64Octets;
+	u32 stat_EtherStatsPktsRx65Octetsto127Octets;
+	u32 stat_EtherStatsPktsRx128Octetsto255Octets;
+	u32 stat_EtherStatsPktsRx256Octetsto511Octets;
+	u32 stat_EtherStatsPktsRx512Octetsto1023Octets;
+	u32 stat_EtherStatsPktsRx1024Octetsto1522Octets;
+	u32 stat_EtherStatsPktsRx1523Octetsto9022Octets;
+	u32 stat_EtherStatsPktsTx64Octets;
+	u32 stat_EtherStatsPktsTx65Octetsto127Octets;
+	u32 stat_EtherStatsPktsTx128Octetsto255Octets;
+	u32 stat_EtherStatsPktsTx256Octetsto511Octets;
+	u32 stat_EtherStatsPktsTx512Octetsto1023Octets;
+	u32 stat_EtherStatsPktsTx1024Octetsto1522Octets;
+	u32 stat_EtherStatsPktsTx1523Octetsto9022Octets;
+	u32 stat_XonPauseFramesReceived;
+	u32 stat_XoffPauseFramesReceived;
+	u32 stat_OutXonSent;
+	u32 stat_OutXoffSent;
+	u32 stat_FlowControlDone;
+	u32 stat_MacControlFramesReceived;
+	u32 stat_XoffStateEntered;
+	u32 stat_IfInFramesL2FilterDiscards;
+	u32 stat_IfInRuleCheckerDiscards;
+	u32 stat_IfInFTQDiscards;
+	u32 stat_IfInMBUFDiscards;
+	u32 stat_IfInRuleCheckerP4Hit;
+	u32 stat_CatchupInRuleCheckerDiscards;
+	u32 stat_CatchupInFTQDiscards;
+	u32 stat_CatchupInMBUFDiscards;
+	u32 stat_CatchupInRuleCheckerP4Hit;
+	u32 stat_GenStat00;
+	u32 stat_GenStat01;
+	u32 stat_GenStat02;
+	u32 stat_GenStat03;
+	u32 stat_GenStat04;
+	u32 stat_GenStat05;
+	u32 stat_GenStat06;
+	u32 stat_GenStat07;
+	u32 stat_GenStat08;
+	u32 stat_GenStat09;
+	u32 stat_GenStat10;
+	u32 stat_GenStat11;
+	u32 stat_GenStat12;
+	u32 stat_GenStat13;
+	u32 stat_GenStat14;
+	u32 stat_GenStat15;
+	u32 stat_FwRxDrop;
+};
+
+
+/*
+ *  l2_fhdr definition
+ */
+struct l2_fhdr {
+	u32 l2_fhdr_status;
+		#define L2_FHDR_STATUS_RULE_CLASS	(0x7<<0)
+		#define L2_FHDR_STATUS_RULE_P2		(1<<3)
+		#define L2_FHDR_STATUS_RULE_P3		(1<<4)
+		#define L2_FHDR_STATUS_RULE_P4		(1<<5)
+		#define L2_FHDR_STATUS_L2_VLAN_TAG	(1<<6)
+		#define L2_FHDR_STATUS_L2_LLC_SNAP	(1<<7)
+		#define L2_FHDR_STATUS_RSS_HASH		(1<<8)
+		#define L2_FHDR_STATUS_IP_DATAGRAM	(1<<13)
+		#define L2_FHDR_STATUS_TCP_SEGMENT	(1<<14)
+		#define L2_FHDR_STATUS_UDP_DATAGRAM	(1<<15)
+
+		#define L2_FHDR_STATUS_SPLIT		(1<<16)
+		#define L2_FHDR_ERRORS_BAD_CRC		(1<<17)
+		#define L2_FHDR_ERRORS_PHY_DECODE	(1<<18)
+		#define L2_FHDR_ERRORS_ALIGNMENT	(1<<19)
+		#define L2_FHDR_ERRORS_TOO_SHORT	(1<<20)
+		#define L2_FHDR_ERRORS_GIANT_FRAME	(1<<21)
+		#define L2_FHDR_ERRORS_TCP_XSUM		(1<<28)
+		#define L2_FHDR_ERRORS_UDP_XSUM		(1<<31)
+
+		#define L2_FHDR_STATUS_USE_RXHASH	\
+			(L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH)
+
+	u32 l2_fhdr_hash;
+#if defined(__BIG_ENDIAN)
+	u16 l2_fhdr_pkt_len;
+	u16 l2_fhdr_vlan_tag;
+	u16 l2_fhdr_ip_xsum;
+	u16 l2_fhdr_tcp_udp_xsum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 l2_fhdr_vlan_tag;
+	u16 l2_fhdr_pkt_len;
+	u16 l2_fhdr_tcp_udp_xsum;
+	u16 l2_fhdr_ip_xsum;
+#endif
+};
+
+#define BNX2_RX_OFFSET		(sizeof(struct l2_fhdr) + 2)
+
+/*
+ *  l2_context definition
+ */
+#define BNX2_L2CTX_TYPE					0x00000000
+#define BNX2_L2CTX_TYPE_SIZE_L2				 ((0xc0/0x20)<<16)
+#define BNX2_L2CTX_TYPE_TYPE				 (0xf<<28)
+#define BNX2_L2CTX_TYPE_TYPE_EMPTY			 (0<<28)
+#define BNX2_L2CTX_TYPE_TYPE_L2				 (1<<28)
+
+#define BNX2_L2CTX_TX_HOST_BIDX				0x00000088
+#define BNX2_L2CTX_EST_NBD				0x00000088
+#define BNX2_L2CTX_CMD_TYPE				0x00000088
+#define BNX2_L2CTX_CMD_TYPE_TYPE			 (0xf<<24)
+#define BNX2_L2CTX_CMD_TYPE_TYPE_L2			 (0<<24)
+#define BNX2_L2CTX_CMD_TYPE_TYPE_TCP			 (1<<24)
+
+#define BNX2_L2CTX_TX_HOST_BSEQ				0x00000090
+#define BNX2_L2CTX_TSCH_BSEQ				0x00000094
+#define BNX2_L2CTX_TBDR_BSEQ				0x00000098
+#define BNX2_L2CTX_TBDR_BOFF				0x0000009c
+#define BNX2_L2CTX_TBDR_BIDX				0x0000009c
+#define BNX2_L2CTX_TBDR_BHADDR_HI			0x000000a0
+#define BNX2_L2CTX_TBDR_BHADDR_LO			0x000000a4
+#define BNX2_L2CTX_TXP_BOFF				0x000000a8
+#define BNX2_L2CTX_TXP_BIDX				0x000000a8
+#define BNX2_L2CTX_TXP_BSEQ				0x000000ac
+
+#define BNX2_L2CTX_TYPE_XI				0x00000080
+#define BNX2_L2CTX_CMD_TYPE_XI				0x00000240
+#define BNX2_L2CTX_TBDR_BHADDR_HI_XI			0x00000258
+#define BNX2_L2CTX_TBDR_BHADDR_LO_XI			0x0000025c
+
+/*
+ *  l2_bd_chain_context definition
+ */
+#define BNX2_L2CTX_BD_PRE_READ				0x00000000
+#define BNX2_L2CTX_CTX_SIZE				0x00000000
+#define BNX2_L2CTX_CTX_TYPE				0x00000000
+#define BNX2_L2CTX_FLOW_CTRL_ENABLE			 0x000000ff
+#define BNX2_L2CTX_CTX_TYPE_SIZE_L2			 ((0x20/20)<<16)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE		 (0xf<<28)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED	 (0<<28)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE	 (1<<28)
+
+#define BNX2_L2CTX_HOST_BDIDX				0x00000004
+#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT			 16
+#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT			 24
+#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id)		\
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id)		\
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_HOST_BSEQ				0x00000008
+#define BNX2_L2CTX_NX_BSEQ				0x0000000c
+#define BNX2_L2CTX_NX_BDHADDR_HI			0x00000010
+#define BNX2_L2CTX_NX_BDHADDR_LO			0x00000014
+#define BNX2_L2CTX_NX_BDIDX				0x00000018
+
+#define BNX2_L2CTX_HOST_PG_BDIDX			0x00000044
+#define BNX2_L2CTX_PG_BUF_SIZE				0x00000048
+#define BNX2_L2CTX_RBDC_KEY				0x0000004c
+#define BNX2_L2CTX_RBDC_JUMBO_KEY			 0x3ffe
+#define BNX2_L2CTX_NX_PG_BDHADDR_HI			0x00000050
+#define BNX2_L2CTX_NX_PG_BDHADDR_LO			0x00000054
+
+/*
+ *  pci_config_l definition
+ *  offset: 0000
+ */
+#define BNX2_PCICFG_MSI_CONTROL				0x00000058
+#define BNX2_PCICFG_MSI_CONTROL_ENABLE			 (1L<<16)
+
+#define BNX2_PCICFG_MISC_CONFIG				0x00000068
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP	 (1L<<2)
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP	 (1L<<3)
+#define BNX2_PCICFG_MISC_CONFIG_RESERVED1		 (1L<<4)
+#define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA		 (1L<<5)
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP	 (1L<<6)
+#define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA		 (1L<<7)
+#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ		 (1L<<8)
+#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY		 (1L<<9)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN	 (1L<<10)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN	 (1L<<11)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN	 (1L<<12)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV		 (0xffL<<16)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV		 (0xfL<<24)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_ID			 (0xfL<<28)
+
+#define BNX2_PCICFG_MISC_STATUS				0x0000006c
+#define BNX2_PCICFG_MISC_STATUS_INTA_VALUE		 (1L<<0)
+#define BNX2_PCICFG_MISC_STATUS_32BIT_DET		 (1L<<1)
+#define BNX2_PCICFG_MISC_STATUS_M66EN			 (1L<<2)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_DET		 (1L<<3)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED		 (0x3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_66		 (0L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100		 (1L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133		 (2L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE	 (3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE	 (1L<<8)
+
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS		0x00000070
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET	 (0xfL<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ	 (0L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ	 (1L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ	 (2L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ	 (3L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ	 (4L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ	 (5L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ	 (6L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ	 (7L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW	 (0xfL<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE	 (1L<<6)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT	 (1L<<7)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC	 (0x7L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF	 (0L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12	 (1L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6	 (2L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62	 (4L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER	 (1L<<11)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED	 (0xfL<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100	 (0L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80	 (1L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50	 (2L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40	 (4L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25	 (8L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP	 (1L<<16)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17	 (1L<<17)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18	 (1L<<18)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19	 (1L<<19)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED	 (0xfffL<<20)
+
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS			0x00000078
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL		 (0xfffffL<<2)
+
+#define BNX2_PCICFG_REG_WINDOW				0x00000080
+#define BNX2_PCICFG_INT_ACK_CMD				0x00000084
+#define BNX2_PCICFG_INT_ACK_CMD_INDEX			 (0xffffL<<0)
+#define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID		 (1L<<16)
+#define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM	 (1L<<17)
+#define BNX2_PCICFG_INT_ACK_CMD_MASK_INT		 (1L<<18)
+#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM		 (0xfL<<24)
+#define BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT		 24
+
+#define BNX2_PCICFG_STATUS_BIT_SET_CMD			0x00000088
+#define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD		0x0000008c
+#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR			0x00000090
+#define BNX2_PCICFG_MAILBOX_QUEUE_DATA			0x00000094
+
+#define BNX2_PCICFG_DEVICE_CONTROL			0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND		 ((1L<<5)<<16)
+
+/*
+ *  pci_reg definition
+ *  offset: 0x400
+ */
+#define BNX2_PCI_GRC_WINDOW_ADDR			0x00000400
+#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE			 (0x1ffL<<13)
+#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN		 (1L<<31)
+
+#define BNX2_PCI_GRC_WINDOW2_BASE		 	 0xc000
+#define BNX2_PCI_GRC_WINDOW3_BASE		 	 0xe000
+
+#define BNX2_PCI_CONFIG_1				0x00000404
+#define BNX2_PCI_CONFIG_1_RESERVED0			 (0xffL<<0)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY			 (0x7L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF		 (0L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16		 (1L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_32		 (2L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_64		 (3L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_128		 (4L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_256		 (5L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_512		 (6L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_1024		 (7L<<8)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY		 (0x7L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_OFF		 (0L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_16		 (1L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_32		 (2L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_64		 (3L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_128		 (4L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256		 (5L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512		 (6L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024		 (7L<<11)
+#define BNX2_PCI_CONFIG_1_RESERVED1			 (0x3ffffL<<14)
+
+#define BNX2_PCI_CONFIG_2				0x00000408
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE			 (0xfL<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_DISABLED		 (0L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64K			 (1L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128K		 (2L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256K		 (3L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512K		 (4L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1M			 (5L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_2M			 (6L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_4M			 (7L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_8M			 (8L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_16M			 (9L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_32M			 (10L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64M			 (11L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128M		 (12L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256M		 (13L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512M		 (14L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1G			 (15L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_64ENA			 (1L<<4)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_RETRY			 (1L<<5)
+#define BNX2_PCI_CONFIG_2_CFG_CYCLE_RETRY		 (1L<<6)
+#define BNX2_PCI_CONFIG_2_FIRST_CFG_DONE		 (1L<<7)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE			 (0xffL<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED		 (0L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1K		 (1L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2K		 (2L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4K		 (3L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8K		 (4L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16K		 (5L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_32K		 (6L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_64K		 (7L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_128K		 (8L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_256K		 (9L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_512K		 (10L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1M		 (11L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2M		 (12L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4M		 (13L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8M		 (14L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16M		 (15L<<8)
+#define BNX2_PCI_CONFIG_2_MAX_SPLIT_LIMIT		 (0x1fL<<16)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT		 (0x3L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_512		 (0L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_1K		 (1L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_2K		 (2L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_4K		 (3L<<21)
+#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR		 (1L<<23)
+#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT		 (1L<<24)
+#define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT		 (1L<<25)
+#define BNX2_PCI_CONFIG_2_RESERVED0			 (0x3fL<<26)
+#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI		 (1L<<16)
+#define BNX2_PCI_CONFIG_2_RESERVED0_XI			 (0x7fffL<<17)
+
+#define BNX2_PCI_CONFIG_3				0x0000040c
+#define BNX2_PCI_CONFIG_3_STICKY_BYTE			 (0xffL<<0)
+#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE		 (0xffL<<8)
+#define BNX2_PCI_CONFIG_3_FORCE_PME			 (1L<<24)
+#define BNX2_PCI_CONFIG_3_PME_STATUS			 (1L<<25)
+#define BNX2_PCI_CONFIG_3_PME_ENABLE			 (1L<<26)
+#define BNX2_PCI_CONFIG_3_PM_STATE			 (0x3L<<27)
+#define BNX2_PCI_CONFIG_3_VAUX_PRESET			 (1L<<30)
+#define BNX2_PCI_CONFIG_3_PCI_POWER			 (1L<<31)
+
+#define BNX2_PCI_PM_DATA_A				0x00000410
+#define BNX2_PCI_PM_DATA_A_PM_DATA_0_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_1_PRG		 (0xffL<<8)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_2_PRG		 (0xffL<<16)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_3_PRG		 (0xffL<<24)
+
+#define BNX2_PCI_PM_DATA_B				0x00000414
+#define BNX2_PCI_PM_DATA_B_PM_DATA_4_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_5_PRG		 (0xffL<<8)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_6_PRG		 (0xffL<<16)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_7_PRG		 (0xffL<<24)
+
+#define BNX2_PCI_SWAP_DIAG0				0x00000418
+#define BNX2_PCI_SWAP_DIAG1				0x0000041c
+#define BNX2_PCI_EXP_ROM_ADDR				0x00000420
+#define BNX2_PCI_EXP_ROM_ADDR_ADDRESS			 (0x3fffffL<<2)
+#define BNX2_PCI_EXP_ROM_ADDR_REQ			 (1L<<31)
+
+#define BNX2_PCI_EXP_ROM_DATA				0x00000424
+#define BNX2_PCI_VPD_INTF				0x00000428
+#define BNX2_PCI_VPD_INTF_INTF_REQ			 (1L<<0)
+
+#define BNX2_PCI_VPD_ADDR_FLAG				0x0000042c
+#define BNX2_PCI_VPD_ADDR_FLAG_MSK			0x0000ffff
+#define BNX2_PCI_VPD_ADDR_FLAG_SL			0L
+#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS			 (0x1fffL<<2)
+#define BNX2_PCI_VPD_ADDR_FLAG_WR			 (1L<<15)
+
+#define BNX2_PCI_VPD_DATA				0x00000430
+#define BNX2_PCI_ID_VAL1				0x00000434
+#define BNX2_PCI_ID_VAL1_DEVICE_ID			 (0xffffL<<0)
+#define BNX2_PCI_ID_VAL1_VENDOR_ID			 (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL2				0x00000438
+#define BNX2_PCI_ID_VAL2_SUBSYSTEM_VENDOR_ID		 (0xffffL<<0)
+#define BNX2_PCI_ID_VAL2_SUBSYSTEM_ID			 (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL3				0x0000043c
+#define BNX2_PCI_ID_VAL3_CLASS_CODE			 (0xffffffL<<0)
+#define BNX2_PCI_ID_VAL3_REVISION_ID			 (0xffL<<24)
+
+#define BNX2_PCI_ID_VAL4				0x00000440
+#define BNX2_PCI_ID_VAL4_CAP_ENA			 (0xfL<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_0			 (0L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_1			 (1L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_2			 (2L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_3			 (3L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_4			 (4L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_5			 (5L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_6			 (6L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_7			 (7L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_8			 (8L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_9			 (9L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_10			 (10L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_11			 (11L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_12			 (12L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_13			 (13L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_14			 (14L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_15			 (15L<<0)
+#define BNX2_PCI_ID_VAL4_RESERVED0			 (0x3L<<4)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG			 (0x3L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0			 (0L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1			 (1L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2			 (2L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3			 (3L<<6)
+#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP		 (1L<<8)
+#define BNX2_PCI_ID_VAL4_MSI_LIMIT			 (0x7L<<9)
+#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP			 (0x7L<<12)
+#define BNX2_PCI_ID_VAL4_MSI_ENABLE			 (1L<<15)
+#define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE		 (1L<<16)
+#define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE		 (1L<<17)
+#define BNX2_PCI_ID_VAL4_RESERVED2			 (0x7L<<18)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21	 (0x3L<<21)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21		 (0x3L<<23)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0		 (1L<<25)
+#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10		 (0x3L<<26)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0		 (1L<<28)
+#define BNX2_PCI_ID_VAL4_RESERVED3			 (0x7L<<29)
+#define BNX2_PCI_ID_VAL4_RESERVED3_XI			 (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL5				0x00000444
+#define BNX2_PCI_ID_VAL5_D1_SUPPORT			 (1L<<0)
+#define BNX2_PCI_ID_VAL5_D2_SUPPORT			 (1L<<1)
+#define BNX2_PCI_ID_VAL5_PME_IN_D0			 (1L<<2)
+#define BNX2_PCI_ID_VAL5_PME_IN_D1			 (1L<<3)
+#define BNX2_PCI_ID_VAL5_PME_IN_D2			 (1L<<4)
+#define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT			 (1L<<5)
+#define BNX2_PCI_ID_VAL5_RESERVED0_TE			 (0x3ffffffL<<6)
+#define BNX2_PCI_ID_VAL5_PM_VERSION_XI			 (0x7L<<6)
+#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI		 (1L<<9)
+#define BNX2_PCI_ID_VAL5_RESERVED0_XI			 (0x3fffffL<<10)
+
+#define BNX2_PCI_PCIX_EXTENDED_STATUS			0x00000448
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP		 (1L<<8)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_LONG_BURST	 (1L<<9)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_CLASS	 (0xfL<<16)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_IDX	 (0xffL<<24)
+
+#define BNX2_PCI_ID_VAL6				0x0000044c
+#define BNX2_PCI_ID_VAL6_MAX_LAT			 (0xffL<<0)
+#define BNX2_PCI_ID_VAL6_MIN_GNT			 (0xffL<<8)
+#define BNX2_PCI_ID_VAL6_BIST				 (0xffL<<16)
+#define BNX2_PCI_ID_VAL6_RESERVED0			 (0xffL<<24)
+
+#define BNX2_PCI_MSI_DATA				0x00000450
+#define BNX2_PCI_MSI_DATA_MSI_DATA			 (0xffffL<<0)
+
+#define BNX2_PCI_MSI_ADDR_H				0x00000454
+#define BNX2_PCI_MSI_ADDR_L				0x00000458
+#define BNX2_PCI_MSI_ADDR_L_VAL				 (0x3fffffffL<<2)
+
+#define BNX2_PCI_CFG_ACCESS_CMD				0x0000045c
+#define BNX2_PCI_CFG_ACCESS_CMD_ADR			 (0x3fL<<2)
+#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ			 (1L<<27)
+#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ			 (0xfL<<28)
+
+#define BNX2_PCI_CFG_ACCESS_DATA			0x00000460
+#define BNX2_PCI_MSI_MASK				0x00000464
+#define BNX2_PCI_MSI_MASK_MSI_MASK			 (0xffffffffL<<0)
+
+#define BNX2_PCI_MSI_PEND				0x00000468
+#define BNX2_PCI_MSI_PEND_MSI_PEND			 (0xffffffffL<<0)
+
+#define BNX2_PCI_PM_DATA_C				0x0000046c
+#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_C_RESERVED0			 (0xffffffL<<8)
+
+#define BNX2_PCI_MSIX_CONTROL				0x000004c0
+#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ		 (0x7ffL<<0)
+#define BNX2_PCI_MSIX_CONTROL_RESERVED0			 (0x1fffffL<<11)
+
+#define BNX2_PCI_MSIX_TBL_OFF_BIR			0x000004c4
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR		 (0x7L<<0)
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF		 (0x1fffffffL<<3)
+
+#define BNX2_PCI_MSIX_PBA_OFF_BIT			0x000004c8
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR		 (0x7L<<0)
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF		 (0x1fffffffL<<3)
+
+#define BNX2_PCI_PCIE_CAPABILITY			0x000004d0
+#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM	 (0x1fL<<0)
+#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1	 (1L<<5)
+
+#define BNX2_PCI_DEVICE_CAPABILITY			0x000004d4
+#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED	 (0x7L<<0)
+#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT	 (1L<<5)
+#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY	 (0x7L<<6)
+#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY	 (0x7L<<9)
+#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT	 (1L<<15)
+
+#define BNX2_PCI_LINK_CAPABILITY			0x000004dc
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED		 (0xfL<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001	 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010	 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH		 (0x1fL<<4)
+#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT		 (1L<<9)
+#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT		 (0x3L<<10)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT		 (0x7L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101	 (5L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110	 (6L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT		 (0x7L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001	 (1L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010	 (2L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT	 (0x7L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101	 (5L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110	 (6L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT	 (0x7L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001	 (1L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010	 (2L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM		 (0xffL<<24)
+
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2		0x000004e4
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP	 (0xfL<<0)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP	 (1L<<4)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED	 (0x7ffffffL<<5)
+
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2			0x000004e8
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED	 (0xffffffffL<<0)
+
+#define BNX2_PCI_GRC_WINDOW1_ADDR			0x00000610
+#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW2_ADDR			0x00000614
+#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW3_ADDR			0x00000618
+#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_MSIX_TABLE_ADDR				 0x318000
+#define BNX2_MSIX_PBA_ADDR				 0x31c000
+
+/*
+ *  misc_reg definition
+ *  offset: 0x800
+ */
+#define BNX2_MISC_COMMAND				0x00000800
+#define BNX2_MISC_COMMAND_ENABLE_ALL			 (1L<<0)
+#define BNX2_MISC_COMMAND_DISABLE_ALL			 (1L<<1)
+#define BNX2_MISC_COMMAND_SW_RESET			 (1L<<4)
+#define BNX2_MISC_COMMAND_POR_RESET			 (1L<<5)
+#define BNX2_MISC_COMMAND_HD_RESET			 (1L<<6)
+#define BNX2_MISC_COMMAND_CMN_SW_RESET			 (1L<<7)
+#define BNX2_MISC_COMMAND_PAR_ERROR			 (1L<<8)
+#define BNX2_MISC_COMMAND_CS16_ERR			 (1L<<9)
+#define BNX2_MISC_COMMAND_CS16_ERR_LOC			 (0xfL<<12)
+#define BNX2_MISC_COMMAND_PAR_ERR_RAM			 (0x7fL<<16)
+#define BNX2_MISC_COMMAND_POWERDOWN_EVENT		 (1L<<23)
+#define BNX2_MISC_COMMAND_SW_SHUTDOWN			 (1L<<24)
+#define BNX2_MISC_COMMAND_SHUTDOWN_EN			 (1L<<25)
+#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN		 (1L<<26)
+#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23		 (1L<<27)
+#define BNX2_MISC_COMMAND_PCIE_DIS			 (1L<<28)
+
+#define BNX2_MISC_CFG					0x00000804
+#define BNX2_MISC_CFG_GRC_TMOUT				 (1L<<0)
+#define BNX2_MISC_CFG_NVM_WR_EN				 (0x3L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_PROTECT			 (0L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_PCI			 (1L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW			 (2L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2			 (3L<<1)
+#define BNX2_MISC_CFG_BIST_EN				 (1L<<3)
+#define BNX2_MISC_CFG_CK25_OUT_ALT_SRC			 (1L<<4)
+#define BNX2_MISC_CFG_RESERVED5_TE			 (1L<<5)
+#define BNX2_MISC_CFG_RESERVED6_TE			 (1L<<6)
+#define BNX2_MISC_CFG_CLK_CTL_OVERRIDE			 (1L<<7)
+#define BNX2_MISC_CFG_LEDMODE				 (0x7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC			 (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_TE			 (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_TE			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_TE			 (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_TE			 (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_TE			 (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_TE			 (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_TE			 (7L<<8)
+#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE			 (1L<<11)
+#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE			 (1L<<12)
+#define BNX2_MISC_CFG_LEDMODE_XI			 (0xfL<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC_XI			 (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_XI			 (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_XI			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_XI			 (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC2_XI			 (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_XI			 (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_XI			 (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_XI			 (7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC3_XI			 (8L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_XI			 (9L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY8_XI			 (10L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY9_XI			 (11L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC4_XI			 (12L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY10_XI			 (13L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY11_XI			 (14L<<8)
+#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI			 (15L<<8)
+#define BNX2_MISC_CFG_PORT_SELECT_XI			 (1L<<13)
+#define BNX2_MISC_CFG_PARITY_MODE_XI			 (1L<<14)
+
+#define BNX2_MISC_ID					0x00000808
+#define BNX2_MISC_ID_BOND_ID				 (0xfL<<0)
+#define BNX2_MISC_ID_BOND_ID_X				 (0L<<0)
+#define BNX2_MISC_ID_BOND_ID_C				 (3L<<0)
+#define BNX2_MISC_ID_BOND_ID_S				 (12L<<0)
+#define BNX2_MISC_ID_CHIP_METAL				 (0xffL<<4)
+#define BNX2_MISC_ID_CHIP_REV				 (0xfL<<12)
+#define BNX2_MISC_ID_CHIP_NUM				 (0xffffL<<16)
+
+#define BNX2_MISC_ENABLE_STATUS_BITS			0x0000080c
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_DMA_ENABLE	 (1L<<4)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_ENABLE_STATUS_BITS_EMAC_ENABLE	 (1L<<9)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_MBUF_ENABLE	 (1L<<12)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE	 (1L<<15)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_DMA_ENABLE	 (1L<<17)
+#define BNX2_MISC_ENABLE_STATUS_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_ENABLE_STATUS_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_ENABLE_STATUS_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE	 (1L<<21)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_ENABLE_STATUS_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE	 (1L<<25)
+#define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_ENABLE_SET_BITS			0x00000810
+#define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_DMA_ENABLE		 (1L<<4)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE		 (1L<<9)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE	 (1L<<12)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE		 (1L<<15)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE		 (1L<<17)
+#define BNX2_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_ENABLE_SET_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_ENABLE_SET_BITS_CONTEXT_ENABLE	 (1L<<21)
+#define BNX2_MISC_ENABLE_SET_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_ENABLE_SET_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_ENABLE_SET_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE		 (1L<<25)
+#define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_ENABLE_CLR_BITS			0x00000814
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE		 (1L<<4)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_ENABLE_CLR_BITS_EMAC_ENABLE		 (1L<<9)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_MBUF_ENABLE	 (1L<<12)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_V2P_ENABLE		 (1L<<15)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE		 (1L<<17)
+#define BNX2_MISC_ENABLE_CLR_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_ENABLE_CLR_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_ENABLE_CLR_BITS_CONTEXT_ENABLE	 (1L<<21)
+#define BNX2_MISC_ENABLE_CLR_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_ENABLE_CLR_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_ENABLE_CLR_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE		 (1L<<25)
+#define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_CLOCK_CONTROL_BITS			0x00000818
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET	 (0xfL<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ	 (0L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ	 (1L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ	 (2L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ	 (3L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ	 (4L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ	 (5L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ	 (6L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ	 (7L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW	 (0xfL<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE	 (1L<<6)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT	 (1L<<7)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC	 (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF	 (0L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12	 (1L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6	 (2L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62	 (4L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI	 (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER		 (1L<<11)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED	 (0xfL<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100	 (0L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80	 (1L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50	 (2L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40	 (4L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25	 (8L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI	 (0xfL<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP	 (1L<<16)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE	 (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE	 (1L<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE	 (1L<<19)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE	 (0xfffL<<20)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI	 (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI	 (0x3fL<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI	 (0x7L<<24)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI	 (1L<<27)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI	 (0xfL<<28)
+
+#define BNX2_MISC_SPIO					0x0000081c
+#define BNX2_MISC_SPIO_VALUE				 (0xffL<<0)
+#define BNX2_MISC_SPIO_SET				 (0xffL<<8)
+#define BNX2_MISC_SPIO_CLR				 (0xffL<<16)
+#define BNX2_MISC_SPIO_FLOAT				 (0xffL<<24)
+
+#define BNX2_MISC_SPIO_INT				0x00000820
+#define BNX2_MISC_SPIO_INT_INT_STATE_TE			 (0xfL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE			 (0xfL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_TE			 (0xfL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_TE			 (0xfL<<24)
+#define BNX2_MISC_SPIO_INT_INT_STATE_XI			 (0xffL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI			 (0xffL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_XI			 (0xffL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_XI			 (0xffL<<24)
+
+#define BNX2_MISC_CONFIG_LFSR				0x00000824
+#define BNX2_MISC_CONFIG_LFSR_DIV			 (0xffffL<<0)
+
+#define BNX2_MISC_LFSR_MASK_BITS			0x00000828
+#define BNX2_MISC_LFSR_MASK_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_DMA_ENABLE		 (1L<<4)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_LFSR_MASK_BITS_EMAC_ENABLE		 (1L<<9)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_MBUF_ENABLE		 (1L<<12)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_V2P_ENABLE		 (1L<<15)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_DMA_ENABLE		 (1L<<17)
+#define BNX2_MISC_LFSR_MASK_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_LFSR_MASK_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_LFSR_MASK_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_LFSR_MASK_BITS_CONTEXT_ENABLE		 (1L<<21)
+#define BNX2_MISC_LFSR_MASK_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_LFSR_MASK_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_LFSR_MASK_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE		 (1L<<25)
+#define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_ARB_REQ0				0x0000082c
+#define BNX2_MISC_ARB_REQ1				0x00000830
+#define BNX2_MISC_ARB_REQ2				0x00000834
+#define BNX2_MISC_ARB_REQ3				0x00000838
+#define BNX2_MISC_ARB_REQ4				0x0000083c
+#define BNX2_MISC_ARB_FREE0				0x00000840
+#define BNX2_MISC_ARB_FREE1				0x00000844
+#define BNX2_MISC_ARB_FREE2				0x00000848
+#define BNX2_MISC_ARB_FREE3				0x0000084c
+#define BNX2_MISC_ARB_FREE4				0x00000850
+#define BNX2_MISC_ARB_REQ_STATUS0			0x00000854
+#define BNX2_MISC_ARB_REQ_STATUS1			0x00000858
+#define BNX2_MISC_ARB_REQ_STATUS2			0x0000085c
+#define BNX2_MISC_ARB_REQ_STATUS3			0x00000860
+#define BNX2_MISC_ARB_REQ_STATUS4			0x00000864
+#define BNX2_MISC_ARB_GNT0				0x00000868
+#define BNX2_MISC_ARB_GNT0_0				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT0_1				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT0_2				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT0_3				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT0_4				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT0_5				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT0_6				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT0_7				 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT1				0x0000086c
+#define BNX2_MISC_ARB_GNT1_8				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT1_9				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT1_10				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT1_11				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT1_12				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT1_13				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT1_14				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT1_15				 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT2				0x00000870
+#define BNX2_MISC_ARB_GNT2_16				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT2_17				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT2_18				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT2_19				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT2_20				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT2_21				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT2_22				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT2_23				 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT3				0x00000874
+#define BNX2_MISC_ARB_GNT3_24				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT3_25				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT3_26				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT3_27				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT3_28				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT3_29				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT3_30				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT3_31				 (0x7L<<28)
+
+#define BNX2_MISC_RESERVED1				0x00000878
+#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE	 (0x3fL<<0)
+
+#define BNX2_MISC_RESERVED2				0x0000087c
+#define BNX2_MISC_RESERVED2_PCIE_DIS			 (1L<<0)
+#define BNX2_MISC_RESERVED2_LINK_IN_L23			 (1L<<1)
+
+#define BNX2_MISC_SM_ASF_CONTROL			0x00000880
+#define BNX2_MISC_SM_ASF_CONTROL_ASF_RST		 (1L<<0)
+#define BNX2_MISC_SM_ASF_CONTROL_TSC_EN			 (1L<<1)
+#define BNX2_MISC_SM_ASF_CONTROL_WG_TO			 (1L<<2)
+#define BNX2_MISC_SM_ASF_CONTROL_HB_TO			 (1L<<3)
+#define BNX2_MISC_SM_ASF_CONTROL_PA_TO			 (1L<<4)
+#define BNX2_MISC_SM_ASF_CONTROL_PL_TO			 (1L<<5)
+#define BNX2_MISC_SM_ASF_CONTROL_RT_TO			 (1L<<6)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT		 (1L<<7)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN		 (1L<<8)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE		 (1L<<9)
+#define BNX2_MISC_SM_ASF_CONTROL_RES			 (0x3L<<10)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EN			 (1L<<12)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN		 (1L<<13)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT	 (1L<<14)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD		 (1L<<15)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1		 (0x7fL<<16)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2		 (0x7fL<<23)
+#define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0	 (1L<<30)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN		 (1L<<31)
+
+#define BNX2_MISC_SMB_IN				0x00000884
+#define BNX2_MISC_SMB_IN_DAT_IN				 (0xffL<<0)
+#define BNX2_MISC_SMB_IN_RDY				 (1L<<8)
+#define BNX2_MISC_SMB_IN_DONE				 (1L<<9)
+#define BNX2_MISC_SMB_IN_FIRSTBYTE			 (1L<<10)
+#define BNX2_MISC_SMB_IN_STATUS				 (0x7L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_OK			 (0x0L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_PEC			 (0x1L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_OFLOW			 (0x2L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_STOP			 (0x3L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_TIMEOUT			 (0x4L<<11)
+
+#define BNX2_MISC_SMB_OUT				0x00000888
+#define BNX2_MISC_SMB_OUT_DAT_OUT			 (0xffL<<0)
+#define BNX2_MISC_SMB_OUT_RDY				 (1L<<8)
+#define BNX2_MISC_SMB_OUT_START				 (1L<<9)
+#define BNX2_MISC_SMB_OUT_LAST				 (1L<<10)
+#define BNX2_MISC_SMB_OUT_ACC_TYPE			 (1L<<11)
+#define BNX2_MISC_SMB_OUT_ENB_PEC			 (1L<<12)
+#define BNX2_MISC_SMB_OUT_GET_RX_LEN			 (1L<<13)
+#define BNX2_MISC_SMB_OUT_SMB_READ_LEN			 (0x3fL<<14)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS		 (0xfL<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK		 (0L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK	 (1L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW		 (2L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP		 (3L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT	 (4L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST	 (5L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK		 (6L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK	 (9L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST	 (0xdL<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE		 (1L<<24)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN		 (1L<<25)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN		 (1L<<26)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_EN		 (1L<<27)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_IN		 (1L<<28)
+
+#define BNX2_MISC_SMB_WATCHDOG				0x0000088c
+#define BNX2_MISC_SMB_WATCHDOG_WATCHDOG			 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_HEARTBEAT				0x00000890
+#define BNX2_MISC_SMB_HEARTBEAT_HEARTBEAT		 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_POLL_ASF				0x00000894
+#define BNX2_MISC_SMB_POLL_ASF_POLL_ASF			 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_POLL_LEGACY			0x00000898
+#define BNX2_MISC_SMB_POLL_LEGACY_POLL_LEGACY		 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_RETRAN				0x0000089c
+#define BNX2_MISC_SMB_RETRAN_RETRAN			 (0xffL<<0)
+
+#define BNX2_MISC_SMB_TIMESTAMP				0x000008a0
+#define BNX2_MISC_SMB_TIMESTAMP_TIMESTAMP		 (0xffffffffL<<0)
+
+#define BNX2_MISC_PERR_ENA0				0x000008a4
+#define BNX2_MISC_PERR_ENA0_COM_MISC_CTXC		 (1L<<0)
+#define BNX2_MISC_PERR_ENA0_COM_MISC_REGF		 (1L<<1)
+#define BNX2_MISC_PERR_ENA0_COM_MISC_SCPAD		 (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_CTXC		 (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_REGF		 (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_SCPAD		 (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CS_MISC_TMEM		 (1L<<6)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM0		 (1L<<7)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM1		 (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM2		 (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM3		 (1L<<10)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM4		 (1L<<11)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM5		 (1L<<12)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_PGTBL		 (1L<<13)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR0		 (1L<<14)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR1		 (1L<<15)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR2		 (1L<<16)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR3		 (1L<<17)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR4		 (1L<<18)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW0		 (1L<<19)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW1		 (1L<<20)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW2		 (1L<<21)
+#define BNX2_MISC_PERR_ENA0_HC_MISC_DMA			 (1L<<22)
+#define BNX2_MISC_PERR_ENA0_MCP_MISC_REGF		 (1L<<23)
+#define BNX2_MISC_PERR_ENA0_MCP_MISC_SCPAD		 (1L<<24)
+#define BNX2_MISC_PERR_ENA0_MQ_MISC_CTX			 (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RBDC_MISC			 (1L<<26)
+#define BNX2_MISC_PERR_ENA0_RBUF_MISC_MB		 (1L<<27)
+#define BNX2_MISC_PERR_ENA0_RBUF_MISC_PTR		 (1L<<28)
+#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC		 (1L<<29)
+#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM		 (1L<<30)
+#define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS		 (1L<<31)
+#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI		 (1L<<1)
+#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI	 (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI	 (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI	 (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI	 (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI	 (1L<<6)
+#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI		 (1L<<7)
+#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI	 (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI		 (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI		 (1L<<10)
+#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI	 (1L<<11)
+#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI		 (1L<<12)
+#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI	 (1L<<13)
+#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI	 (1L<<14)
+#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI		 (1L<<15)
+#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI	 (1L<<16)
+#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI		 (1L<<17)
+#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI		 (1L<<18)
+#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI	 (1L<<19)
+#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI	 (1L<<20)
+#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI	 (1L<<21)
+#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI	 (1L<<22)
+#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI	 (1L<<23)
+#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI	 (1L<<24)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI	 (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI	 (1L<<26)
+#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI		 (1L<<27)
+#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI		 (1L<<28)
+#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI		 (1L<<29)
+#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI		 (1L<<30)
+#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI		 (1L<<31)
+
+#define BNX2_MISC_PERR_ENA1				0x000008a8
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS		 (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P1IRAM		 (1L<<1)
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P2IRAM		 (1L<<2)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_CTXC		 (1L<<3)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_REGF		 (1L<<4)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_SCPAD		 (1L<<5)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_RBUFC		 (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TBDC_MISC			 (1L<<7)
+#define BNX2_MISC_PERR_ENA1_TDMA_MISC			 (1L<<8)
+#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB0		 (1L<<9)
+#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB1		 (1L<<10)
+#define BNX2_MISC_PERR_ENA1_TPAT_MISC_REGF		 (1L<<11)
+#define BNX2_MISC_PERR_ENA1_TPAT_MISC_SCPAD		 (1L<<12)
+#define BNX2_MISC_PERR_ENA1_TPBUF_MISC_MB		 (1L<<13)
+#define BNX2_MISC_PERR_ENA1_TSCH_MISC_LR		 (1L<<14)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_CTXC		 (1L<<15)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_REGF		 (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_SCPAD		 (1L<<17)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIORX		 (1L<<18)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIOTX		 (1L<<19)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_RX			 (1L<<20)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_TX			 (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_MISC			 (1L<<22)
+#define BNX2_MISC_PERR_ENA1_CSQ_MISC			 (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_MISC			 (1L<<24)
+#define BNX2_MISC_PERR_ENA1_MCPQ_MISC			 (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_MISC			 (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_MISC			 (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_MISC			 (1L<<28)
+#define BNX2_MISC_PERR_ENA1_RXPQ_MISC			 (1L<<29)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_MISC			 (1L<<30)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_MISC			 (1L<<31)
+#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI	 (1L<<2)
+#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI		 (1L<<3)
+#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI		 (1L<<4)
+#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI	 (1L<<5)
+#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI	 (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI		 (1L<<7)
+#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI		 (1L<<8)
+#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI		 (1L<<9)
+#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI		 (1L<<10)
+#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI		 (1L<<11)
+#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI		 (1L<<12)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI		 (1L<<13)
+#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI		 (1L<<14)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI		 (1L<<15)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI		 (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI		 (1L<<17)
+#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI		 (1L<<18)
+#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI		 (1L<<19)
+#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI		 (1L<<20)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI		 (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI		 (1L<<22)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI		 (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI		 (1L<<24)
+#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI		 (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI		 (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI	 (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI		 (1L<<28)
+#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI		 (1L<<29)
+
+#define BNX2_MISC_PERR_ENA2				0x000008ac
+#define BNX2_MISC_PERR_ENA2_COMQ_MISC			 (1L<<0)
+#define BNX2_MISC_PERR_ENA2_COMXQ_MISC			 (1L<<1)
+#define BNX2_MISC_PERR_ENA2_COMTQ_MISC			 (1L<<2)
+#define BNX2_MISC_PERR_ENA2_TSCHQ_MISC			 (1L<<3)
+#define BNX2_MISC_PERR_ENA2_TBDRQ_MISC			 (1L<<4)
+#define BNX2_MISC_PERR_ENA2_TXPQ_MISC			 (1L<<5)
+#define BNX2_MISC_PERR_ENA2_TDMAQ_MISC			 (1L<<6)
+#define BNX2_MISC_PERR_ENA2_TPATQ_MISC			 (1L<<7)
+#define BNX2_MISC_PERR_ENA2_TASQ_MISC			 (1L<<8)
+#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI		 (1L<<1)
+#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI		 (1L<<2)
+#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI		 (1L<<3)
+#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI	 (1L<<4)
+#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI		 (1L<<5)
+#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI	 (1L<<6)
+
+#define BNX2_MISC_DEBUG_VECTOR_SEL			0x000008b0
+#define BNX2_MISC_DEBUG_VECTOR_SEL_0			 (0xfffL<<0)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1			 (0xfffL<<12)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI			 (0xfffL<<15)
+
+#define BNX2_MISC_VREG_CONTROL				0x000008b4
+#define BNX2_MISC_VREG_CONTROL_1_2			 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI		 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI	 (0L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI	 (1L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI	 (2L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI	 (3L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI	 (4L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI	 (5L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI	 (6L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI		 (7L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI	 (8L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI	 (9L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI	 (10L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI	 (11L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI	 (12L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI	 (13L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI	 (14L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI	 (15L<<0)
+#define BNX2_MISC_VREG_CONTROL_2_5			 (0xfL<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14		 (0L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12		 (1L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10		 (2L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8		 (3L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6		 (4L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4		 (5L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2		 (6L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_NOM			 (7L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2		 (8L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4		 (9L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6		 (10L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8		 (11L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10		 (12L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12		 (13L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14		 (14L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16		 (15L<<4)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT			 (0xfL<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14		 (0L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12		 (1L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10		 (2L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8		 (3L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6		 (4L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4		 (5L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2		 (6L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM		 (7L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2		 (8L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4		 (9L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6		 (10L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8		 (11L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10		 (12L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12		 (13L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14		 (14L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16		 (15L<<8)
+
+#define BNX2_MISC_FINAL_CLK_CTL_VAL			0x000008b8
+#define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL	 (0x3ffffffL<<6)
+
+#define BNX2_MISC_GP_HW_CTL0				0x000008bc
+#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE			 (1L<<0)
+#define BNX2_MISC_GP_HW_CTL0_RMII_MODE			 (1L<<1)
+#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL		 (1L<<2)
+#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE			 (1L<<3)
+#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE	 (1L<<4)
+#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE	 (1L<<5)
+#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE	 (1L<<6)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI		 (0x7L<<4)
+#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY	 (1L<<7)
+#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE	 (1L<<8)
+#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE	 (1L<<9)
+#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE		 (1L<<10)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI		 (0x7L<<8)
+#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0			 (1L<<11)
+#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF		 (1L<<12)
+#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF		 (1L<<13)
+#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF		 (1L<<14)
+#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF	 (1L<<15)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI		 (0xfL<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA		 (0L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA		 (1L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA		 (3L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA		 (5L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA		 (7L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN		 (15L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS		 (1L<<20)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS		 (1L<<21)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT		 (0x3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P		 (0L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P		 (1L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P		 (2L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P		 (3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT		 (0x3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P		 (0L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P		 (1L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P		 (2L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P		 (3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ		 (0x3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA	 (0L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA	 (1L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA	 (2L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA	 (3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ		 (0x3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA	 (0L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA	 (1L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA	 (2L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA	 (3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ		 (0x3L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57	 (0L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45	 (1L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62	 (2L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66	 (3L<<30)
+
+#define BNX2_MISC_GP_HW_CTL1				0x000008c0
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE	 (1L<<0)
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE	 (1L<<1)
+#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE		 (1L<<2)
+#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE		 (1L<<3)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI		 (0xffffL<<0)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI		 (0xffffL<<16)
+
+#define BNX2_MISC_NEW_HW_CTL				0x000008c4
+#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS		 (1L<<0)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE		 (1L<<1)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0		 (1L<<2)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1		 (1L<<3)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED		 (0xfffL<<4)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT		 (0xffffL<<16)
+
+#define BNX2_MISC_NEW_CORE_CTL				0x000008c8
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS	 (1L<<0)
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ		 (1L<<1)
+#define BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE		 (1L<<16)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN		 (0x3fffL<<2)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC		 (0xffffL<<16)
+
+#define BNX2_MISC_ECO_HW_CTL				0x000008cc
+#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN		 (1L<<0)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT		 (0x7fffL<<1)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD		 (0xffffL<<16)
+
+#define BNX2_MISC_ECO_CORE_CTL				0x000008d0
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT		 (0xffffL<<0)
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD		 (0xffffL<<16)
+
+#define BNX2_MISC_PPIO					0x000008d4
+#define BNX2_MISC_PPIO_VALUE				 (0xfL<<0)
+#define BNX2_MISC_PPIO_SET				 (0xfL<<8)
+#define BNX2_MISC_PPIO_CLR				 (0xfL<<16)
+#define BNX2_MISC_PPIO_FLOAT				 (0xfL<<24)
+
+#define BNX2_MISC_PPIO_INT				0x000008d8
+#define BNX2_MISC_PPIO_INT_INT_STATE			 (0xfL<<0)
+#define BNX2_MISC_PPIO_INT_OLD_VALUE			 (0xfL<<8)
+#define BNX2_MISC_PPIO_INT_OLD_SET			 (0xfL<<16)
+#define BNX2_MISC_PPIO_INT_OLD_CLR			 (0xfL<<24)
+
+#define BNX2_MISC_RESET_NUMS				0x000008dc
+#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS		 (0x7L<<0)
+#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS		 (0x7L<<4)
+#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS		 (0x7L<<8)
+#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS		 (0x7L<<12)
+#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS		 (0x7L<<16)
+
+#define BNX2_MISC_CS16_ERR				0x000008e0
+#define BNX2_MISC_CS16_ERR_ENA_PCI			 (1L<<0)
+#define BNX2_MISC_CS16_ERR_ENA_RDMA			 (1L<<1)
+#define BNX2_MISC_CS16_ERR_ENA_TDMA			 (1L<<2)
+#define BNX2_MISC_CS16_ERR_ENA_EMAC			 (1L<<3)
+#define BNX2_MISC_CS16_ERR_ENA_CTX			 (1L<<4)
+#define BNX2_MISC_CS16_ERR_ENA_TBDR			 (1L<<5)
+#define BNX2_MISC_CS16_ERR_ENA_RBDC			 (1L<<6)
+#define BNX2_MISC_CS16_ERR_ENA_COM			 (1L<<7)
+#define BNX2_MISC_CS16_ERR_ENA_CP			 (1L<<8)
+#define BNX2_MISC_CS16_ERR_STA_PCI			 (1L<<16)
+#define BNX2_MISC_CS16_ERR_STA_RDMA			 (1L<<17)
+#define BNX2_MISC_CS16_ERR_STA_TDMA			 (1L<<18)
+#define BNX2_MISC_CS16_ERR_STA_EMAC			 (1L<<19)
+#define BNX2_MISC_CS16_ERR_STA_CTX			 (1L<<20)
+#define BNX2_MISC_CS16_ERR_STA_TBDR			 (1L<<21)
+#define BNX2_MISC_CS16_ERR_STA_RBDC			 (1L<<22)
+#define BNX2_MISC_CS16_ERR_STA_COM			 (1L<<23)
+#define BNX2_MISC_CS16_ERR_STA_CP			 (1L<<24)
+
+#define BNX2_MISC_SPIO_EVENT				0x000008e4
+#define BNX2_MISC_SPIO_EVENT_ENABLE			 (0xffL<<0)
+
+#define BNX2_MISC_PPIO_EVENT				0x000008e8
+#define BNX2_MISC_PPIO_EVENT_ENABLE			 (0xfL<<0)
+
+#define BNX2_MISC_DUAL_MEDIA_CTRL			0x000008ec
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID		 (0xffL<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X		 (0L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C		 (3L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S		 (12L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP	 (0x7L<<8)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN		 (1L<<11)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET	 (1L<<12)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET	 (1L<<13)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET		 (1L<<14)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET		 (1L<<15)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST		 (1L<<16)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST		 (1L<<17)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST		 (1L<<18)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST		 (1L<<19)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST		 (1L<<20)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL		 (0x7L<<21)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP		 (1L<<24)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE	 (1L<<25)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ	 (0xfL<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ	 (1L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ	 (2L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ	 (4L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ	 (8L<<26)
+
+#define BNX2_MISC_OTP_CMD1				0x000008f0
+#define BNX2_MISC_OTP_CMD1_FMODE			 (0x7L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_IDLE			 (0L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_WRITE			 (1L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_INIT			 (2L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_SET			 (3L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RST			 (4L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY			 (5L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0		 (6L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1		 (7L<<0)
+#define BNX2_MISC_OTP_CMD1_USEPINS			 (1L<<8)
+#define BNX2_MISC_OTP_CMD1_PROGSEL			 (1L<<9)
+#define BNX2_MISC_OTP_CMD1_PROGSTART			 (1L<<10)
+#define BNX2_MISC_OTP_CMD1_PCOUNT			 (0x7L<<16)
+#define BNX2_MISC_OTP_CMD1_PBYP				 (1L<<19)
+#define BNX2_MISC_OTP_CMD1_VSEL				 (0xfL<<20)
+#define BNX2_MISC_OTP_CMD1_TM				 (0x7L<<27)
+#define BNX2_MISC_OTP_CMD1_SADBYP			 (1L<<30)
+#define BNX2_MISC_OTP_CMD1_DEBUG			 (1L<<31)
+
+#define BNX2_MISC_OTP_CMD2				0x000008f4
+#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR			 (0x3ffL<<0)
+#define BNX2_MISC_OTP_CMD2_DOSEL			 (0x7fL<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_0			 (0L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_1			 (1L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_127			 (127L<<16)
+
+#define BNX2_MISC_OTP_STATUS				0x000008f8
+#define BNX2_MISC_OTP_STATUS_DATA			 (0xffL<<0)
+#define BNX2_MISC_OTP_STATUS_VALID			 (1L<<8)
+#define BNX2_MISC_OTP_STATUS_BUSY			 (1L<<9)
+#define BNX2_MISC_OTP_STATUS_BUSYSM			 (1L<<10)
+#define BNX2_MISC_OTP_STATUS_DONE			 (1L<<11)
+
+#define BNX2_MISC_OTP_SHIFT1_CMD			0x000008fc
+#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N		 (1L<<0)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START		 (1L<<2)
+#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA		 (1L<<3)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT		 (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT1_DATA			0x00000900
+#define BNX2_MISC_OTP_SHIFT2_CMD			0x00000904
+#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N		 (1L<<0)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START		 (1L<<2)
+#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA		 (1L<<3)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT		 (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT2_DATA			0x00000908
+#define BNX2_MISC_BIST_CS0				0x0000090c
+#define BNX2_MISC_BIST_CS0_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS0_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS0_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS0_MBIST_GO			 (1L<<9)
+#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE		 (1L<<31)
+
+#define BNX2_MISC_BIST_MEMSTATUS0			0x00000910
+#define BNX2_MISC_BIST_CS1				0x00000914
+#define BNX2_MISC_BIST_CS1_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS1_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS1_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS1_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS1			0x00000918
+#define BNX2_MISC_BIST_CS2				0x0000091c
+#define BNX2_MISC_BIST_CS2_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS2_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS2_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS2_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS2			0x00000920
+#define BNX2_MISC_BIST_CS3				0x00000924
+#define BNX2_MISC_BIST_CS3_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS3_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS3_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS3_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS3			0x00000928
+#define BNX2_MISC_BIST_CS4				0x0000092c
+#define BNX2_MISC_BIST_CS4_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS4_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS4_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS4_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS4			0x00000930
+#define BNX2_MISC_BIST_CS5				0x00000934
+#define BNX2_MISC_BIST_CS5_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS5_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS5_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS5_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS5			0x00000938
+#define BNX2_MISC_MEM_TM0				0x0000093c
+#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM		 (0xfL<<0)
+#define BNX2_MISC_MEM_TM0_MCP_SCPAD			 (0xfL<<8)
+#define BNX2_MISC_MEM_TM0_UMP_TM			 (0xffL<<16)
+#define BNX2_MISC_MEM_TM0_HB_MEM_TM			 (0xfL<<24)
+
+#define BNX2_MISC_USPLL_CTRL				0x00000940
+#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS			 (1L<<0)
+#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS		 (1L<<1)
+#define BNX2_MISC_USPLL_CTRL_LCPX			 (0x3fL<<2)
+#define BNX2_MISC_USPLL_CTRL_RX				 (0x3L<<8)
+#define BNX2_MISC_USPLL_CTRL_VC_EN			 (1L<<10)
+#define BNX2_MISC_USPLL_CTRL_VCO_MG			 (0x3L<<11)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XF			 (0x7L<<13)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XS			 (0x7L<<16)
+#define BNX2_MISC_USPLL_CTRL_TESTD_EN			 (1L<<19)
+#define BNX2_MISC_USPLL_CTRL_TESTD_SEL			 (0x7L<<20)
+#define BNX2_MISC_USPLL_CTRL_TESTA_EN			 (1L<<23)
+#define BNX2_MISC_USPLL_CTRL_TESTA_SEL			 (0x3L<<24)
+#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF			 (1L<<26)
+#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST		 (1L<<27)
+#define BNX2_MISC_USPLL_CTRL_ANALOG_RST			 (1L<<28)
+#define BNX2_MISC_USPLL_CTRL_LOCK			 (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS0				0x00000944
+#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR		 (1L<<1)
+#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR	 (1L<<2)
+#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR	 (1L<<3)
+#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR		 (1L<<5)
+#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR		 (1L<<6)
+#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR		 (1L<<7)
+#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR		 (1L<<8)
+#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR		 (1L<<9)
+#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR		 (1L<<10)
+#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR		 (1L<<11)
+#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR		 (1L<<12)
+#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR		 (1L<<13)
+#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR		 (1L<<14)
+#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR		 (1L<<15)
+#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR		 (1L<<16)
+#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR		 (1L<<17)
+#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR		 (1L<<18)
+#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR	 (1L<<19)
+#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR	 (1L<<20)
+#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR		 (1L<<21)
+#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR	 (1L<<22)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR		 (1L<<23)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR		 (1L<<24)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR	 (1L<<25)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR	 (1L<<26)
+#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR		 (1L<<27)
+#define BNX2_MISC_PERR_STATUS0_THBUF_PERR		 (1L<<28)
+#define BNX2_MISC_PERR_STATUS0_TDMA_PERR		 (1L<<29)
+#define BNX2_MISC_PERR_STATUS0_TBDC_PERR		 (1L<<30)
+#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR		 (1L<<31)
+
+#define BNX2_MISC_PERR_STATUS1				0x00000948
+#define BNX2_MISC_PERR_STATUS1_RBDC_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR		 (1L<<2)
+#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR		 (1L<<3)
+#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR	 (1L<<5)
+#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR	 (1L<<6)
+#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR		 (1L<<7)
+#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR		 (1L<<8)
+#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR		 (1L<<9)
+#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR		 (1L<<10)
+#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR		 (1L<<11)
+#define BNX2_MISC_PERR_STATUS1_COMQ_PERR		 (1L<<12)
+#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR		 (1L<<13)
+#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR		 (1L<<14)
+#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR		 (1L<<15)
+#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR		 (1L<<16)
+#define BNX2_MISC_PERR_STATUS1_TASQ_PERR		 (1L<<17)
+#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR		 (1L<<18)
+#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR		 (1L<<19)
+#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR		 (1L<<20)
+#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR		 (1L<<21)
+#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR		 (1L<<22)
+#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR		 (1L<<23)
+#define BNX2_MISC_PERR_STATUS1_CPQ_PERR			 (1L<<24)
+#define BNX2_MISC_PERR_STATUS1_CSQ_PERR			 (1L<<25)
+#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR		 (1L<<26)
+#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR		 (1L<<27)
+#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR		 (1L<<28)
+#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR		 (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS2				0x0000094c
+#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR		 (1L<<1)
+#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR		 (1L<<2)
+#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR		 (1L<<3)
+#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR		 (1L<<5)
+#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR		 (1L<<6)
+
+#define BNX2_MISC_LCPLL_CTRL0				0x00000950
+#define BNX2_MISC_LCPLL_CTRL0_OAC			 (0x7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY		 (0L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO			 (1L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY		 (3L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY			 (7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL			 (0x7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360		 (0L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480		 (1L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600		 (3L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720		 (7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL			 (0x3L<<6)
+#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE		 (0x7L<<8)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL			 (0x3L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0		 (0L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1		 (1L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2		 (2L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART		 (1L<<13)
+#define BNX2_MISC_LCPLL_CTRL0_RESERVED			 (1L<<14)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN		 (1L<<15)
+#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN		 (1L<<16)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN		 (1L<<17)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN		 (1L<<18)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN		 (1L<<19)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE		 (1L<<20)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS		 (1L<<21)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN	 (1L<<22)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE		 (1L<<23)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN	 (1L<<24)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS		 (1L<<25)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART		 (1L<<26)
+#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN		 (1L<<27)
+
+#define BNX2_MISC_LCPLL_CTRL1				0x00000954
+#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM		 (0x1fL<<0)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN	 (1L<<5)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN		 (1L<<6)
+#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR		 (1L<<7)
+
+#define BNX2_MISC_LCPLL_STATUS				0x00000958
+#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM		 (1L<<0)
+#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM		 (1L<<1)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE		 (1L<<2)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS		 (1L<<3)
+#define BNX2_MISC_LCPLL_STATUS_PLLSTATE			 (0x7L<<4)
+#define BNX2_MISC_LCPLL_STATUS_CAPSTATE			 (0x7L<<7)
+#define BNX2_MISC_LCPLL_STATUS_CAPSELECT		 (0x1fL<<10)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR		 (1L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0	 (0L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1	 (1L<<15)
+
+#define BNX2_MISC_OSCFUNDS_CTRL				0x0000095c
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON		 (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF		 (0L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON		 (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM		 (0x3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0		 (0L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1		 (1L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2		 (2L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3		 (3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ		 (0x3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0		 (0L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1		 (1L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2		 (2L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3		 (3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ		 (0x3L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0		 (0L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1		 (1L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2		 (2L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3		 (3L<<10)
+
+
+/*
+ *  nvm_reg definition
+ *  offset: 0x6400
+ */
+#define BNX2_NVM_COMMAND				0x00006400
+#define BNX2_NVM_COMMAND_RST				 (1L<<0)
+#define BNX2_NVM_COMMAND_DONE				 (1L<<3)
+#define BNX2_NVM_COMMAND_DOIT				 (1L<<4)
+#define BNX2_NVM_COMMAND_WR				 (1L<<5)
+#define BNX2_NVM_COMMAND_ERASE				 (1L<<6)
+#define BNX2_NVM_COMMAND_FIRST				 (1L<<7)
+#define BNX2_NVM_COMMAND_LAST				 (1L<<8)
+#define BNX2_NVM_COMMAND_WREN				 (1L<<16)
+#define BNX2_NVM_COMMAND_WRDI				 (1L<<17)
+#define BNX2_NVM_COMMAND_EWSR				 (1L<<18)
+#define BNX2_NVM_COMMAND_WRSR				 (1L<<19)
+#define BNX2_NVM_COMMAND_RD_ID				 (1L<<20)
+#define BNX2_NVM_COMMAND_RD_STATUS			 (1L<<21)
+#define BNX2_NVM_COMMAND_MODE_256			 (1L<<22)
+
+#define BNX2_NVM_STATUS					0x00006404
+#define BNX2_NVM_STATUS_PI_FSM_STATE			 (0xfL<<0)
+#define BNX2_NVM_STATUS_EE_FSM_STATE			 (0xfL<<4)
+#define BNX2_NVM_STATUS_EQ_FSM_STATE			 (0xfL<<8)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI		 (0x1fL<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI	 (0L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI	 (1L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI	 (2L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI	 (3L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI	 (4L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI	 (5L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI	 (6L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI	 (7L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI	 (8L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI	 (9L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI	 (10L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI	 (11L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI	 (12L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI	 (13L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI	 (14L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI	 (15L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI	 (16L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI	 (17L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI	 (18L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI	 (19L<<0)
+
+#define BNX2_NVM_WRITE					0x00006408
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE			 (0xffffffffL<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_BIT_BANG		 (0L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EECLK		 (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EEDATA		 (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK		 (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B		 (8L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO		 (16L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI		 (32L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI		 (8L<<0)
+
+#define BNX2_NVM_ADDR					0x0000640c
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE			 (0xffffffL<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_BIT_BANG		 (0L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EECLK		 (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EEDATA		 (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK		 (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B		 (8L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO			 (16L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI			 (32L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI		 (8L<<0)
+
+#define BNX2_NVM_READ					0x00006410
+#define BNX2_NVM_READ_NVM_READ_VALUE			 (0xffffffffL<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_BIT_BANG		 (0L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_EECLK		 (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_EEDATA		 (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK		 (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B		 (8L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO			 (16L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI			 (32L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI		 (8L<<0)
+
+#define BNX2_NVM_CFG1					0x00006414
+#define BNX2_NVM_CFG1_FLASH_MODE			 (1L<<0)
+#define BNX2_NVM_CFG1_BUFFER_MODE			 (1L<<1)
+#define BNX2_NVM_CFG1_PASS_MODE				 (1L<<2)
+#define BNX2_NVM_CFG1_BITBANG_MODE			 (1L<<3)
+#define BNX2_NVM_CFG1_STATUS_BIT			 (0x7L<<4)
+#define BNX2_NVM_CFG1_STATUS_BIT_FLASH_RDY		 (0L<<4)
+#define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY		 (7L<<4)
+#define BNX2_NVM_CFG1_SPI_CLK_DIV			 (0xfL<<7)
+#define BNX2_NVM_CFG1_SEE_CLK_DIV			 (0x7ffL<<11)
+#define BNX2_NVM_CFG1_STRAP_CONTROL_0			 (1L<<23)
+#define BNX2_NVM_CFG1_PROTECT_MODE			 (1L<<24)
+#define BNX2_NVM_CFG1_FLASH_SIZE			 (1L<<25)
+#define BNX2_NVM_CFG1_FW_USTRAP_1			 (1L<<26)
+#define BNX2_NVM_CFG1_FW_USTRAP_0			 (1L<<27)
+#define BNX2_NVM_CFG1_FW_USTRAP_2			 (1L<<28)
+#define BNX2_NVM_CFG1_FW_USTRAP_3			 (1L<<29)
+#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN			 (1L<<30)
+#define BNX2_NVM_CFG1_COMPAT_BYPASSS			 (1L<<31)
+
+#define BNX2_NVM_CFG2					0x00006418
+#define BNX2_NVM_CFG2_ERASE_CMD				 (0xffL<<0)
+#define BNX2_NVM_CFG2_DUMMY				 (0xffL<<8)
+#define BNX2_NVM_CFG2_STATUS_CMD			 (0xffL<<16)
+#define BNX2_NVM_CFG2_READ_ID				 (0xffL<<24)
+
+#define BNX2_NVM_CFG3					0x0000641c
+#define BNX2_NVM_CFG3_BUFFER_RD_CMD			 (0xffL<<0)
+#define BNX2_NVM_CFG3_WRITE_CMD				 (0xffL<<8)
+#define BNX2_NVM_CFG3_BUFFER_WRITE_CMD			 (0xffL<<16)
+#define BNX2_NVM_CFG3_READ_CMD				 (0xffL<<24)
+
+#define BNX2_NVM_SW_ARB					0x00006420
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET0			 (1L<<0)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET1			 (1L<<1)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET2			 (1L<<2)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET3			 (1L<<3)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR0			 (1L<<4)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR1			 (1L<<5)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR2			 (1L<<6)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR3			 (1L<<7)
+#define BNX2_NVM_SW_ARB_ARB_ARB0			 (1L<<8)
+#define BNX2_NVM_SW_ARB_ARB_ARB1			 (1L<<9)
+#define BNX2_NVM_SW_ARB_ARB_ARB2			 (1L<<10)
+#define BNX2_NVM_SW_ARB_ARB_ARB3			 (1L<<11)
+#define BNX2_NVM_SW_ARB_REQ0				 (1L<<12)
+#define BNX2_NVM_SW_ARB_REQ1				 (1L<<13)
+#define BNX2_NVM_SW_ARB_REQ2				 (1L<<14)
+#define BNX2_NVM_SW_ARB_REQ3				 (1L<<15)
+
+#define BNX2_NVM_ACCESS_ENABLE				0x00006424
+#define BNX2_NVM_ACCESS_ENABLE_EN			 (1L<<0)
+#define BNX2_NVM_ACCESS_ENABLE_WR_EN			 (1L<<1)
+
+#define BNX2_NVM_WRITE1					0x00006428
+#define BNX2_NVM_WRITE1_WREN_CMD			 (0xffL<<0)
+#define BNX2_NVM_WRITE1_WRDI_CMD			 (0xffL<<8)
+#define BNX2_NVM_WRITE1_SR_DATA				 (0xffL<<16)
+
+#define BNX2_NVM_CFG4					0x0000642c
+#define BNX2_NVM_CFG4_FLASH_SIZE			 (0x7L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT			 (0L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT			 (1L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT			 (2L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT			 (3L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT			 (4L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT			 (5L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT			 (6L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT		 (7L<<0)
+#define BNX2_NVM_CFG4_FLASH_VENDOR			 (1L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ST			 (0L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL		 (1L<<3)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC		 (0x3L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8	 (0L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9	 (1L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10	 (2L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11	 (3L<<4)
+#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY		 (1L<<6)
+#define BNX2_NVM_CFG4_RESERVED				 (0x1ffffffL<<7)
+
+#define BNX2_NVM_RECONFIG				0x00006430
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE		 (0xfL<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST		 (0L<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL	 (1L<<0)
+#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE		 (0xfL<<4)
+#define BNX2_NVM_RECONFIG_RESERVED			 (0x7fffffL<<8)
+#define BNX2_NVM_RECONFIG_RECONFIG_DONE			 (1L<<31)
+
+
+
+/*
+ *  dma_reg definition
+ *  offset: 0xc00
+ */
+#define BNX2_DMA_COMMAND				0x00000c00
+#define BNX2_DMA_COMMAND_ENABLE				 (1L<<0)
+
+#define BNX2_DMA_STATUS					0x00000c04
+#define BNX2_DMA_STATUS_PAR_ERROR_STATE			 (1L<<0)
+#define BNX2_DMA_STATUS_READ_TRANSFERS_STAT		 (1L<<16)
+#define BNX2_DMA_STATUS_READ_DELAY_PCI_CLKS_STAT	 (1L<<17)
+#define BNX2_DMA_STATUS_BIG_READ_TRANSFERS_STAT		 (1L<<18)
+#define BNX2_DMA_STATUS_BIG_READ_DELAY_PCI_CLKS_STAT	 (1L<<19)
+#define BNX2_DMA_STATUS_BIG_READ_RETRY_AFTER_DATA_STAT	 (1L<<20)
+#define BNX2_DMA_STATUS_WRITE_TRANSFERS_STAT		 (1L<<21)
+#define BNX2_DMA_STATUS_WRITE_DELAY_PCI_CLKS_STAT	 (1L<<22)
+#define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT	 (1L<<23)
+#define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT	 (1L<<24)
+#define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT	 (1L<<25)
+#define BNX2_DMA_STATUS_GLOBAL_ERR_XI			 (1L<<0)
+#define BNX2_DMA_STATUS_BME_XI				 (1L<<4)
+
+#define BNX2_DMA_CONFIG					0x00000c08
+#define BNX2_DMA_CONFIG_DATA_BYTE_SWAP			 (1L<<0)
+#define BNX2_DMA_CONFIG_DATA_WORD_SWAP			 (1L<<1)
+#define BNX2_DMA_CONFIG_CNTL_BYTE_SWAP			 (1L<<4)
+#define BNX2_DMA_CONFIG_CNTL_WORD_SWAP			 (1L<<5)
+#define BNX2_DMA_CONFIG_ONE_DMA				 (1L<<6)
+#define BNX2_DMA_CONFIG_CNTL_TWO_DMA			 (1L<<7)
+#define BNX2_DMA_CONFIG_CNTL_FPGA_MODE			 (1L<<8)
+#define BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA		 (1L<<10)
+#define BNX2_DMA_CONFIG_CNTL_PCI_COMP_DLY		 (1L<<11)
+#define BNX2_DMA_CONFIG_NO_RCHANS_IN_USE		 (0xfL<<12)
+#define BNX2_DMA_CONFIG_NO_WCHANS_IN_USE		 (0xfL<<16)
+#define BNX2_DMA_CONFIG_PCI_CLK_CMP_BITS		 (0x7L<<20)
+#define BNX2_DMA_CONFIG_PCI_FAST_CLK_CMP		 (1L<<23)
+#define BNX2_DMA_CONFIG_BIG_SIZE			 (0xfL<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_NONE			 (0x0L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_64			 (0x1L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_128			 (0x2L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_256			 (0x4L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_512			 (0x8L<<24)
+#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI		 (0x3L<<0)
+#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI		 (0x3L<<4)
+#define BNX2_DMA_CONFIG_MAX_PL_XI			 (0x7L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_128B_XI			 (0L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_256B_XI			 (1L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_512B_XI			 (2L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_EN_XI			 (1L<<15)
+#define BNX2_DMA_CONFIG_MAX_RRS_XI			 (0x7L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI			 (0L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI			 (1L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI			 (2L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI		 (3L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI		 (4L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI		 (5L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI			 (1L<<19)
+#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI			 (1L<<31)
+
+#define BNX2_DMA_BLACKOUT				0x00000c0c
+#define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT		 (0xffL<<0)
+#define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT		 (0xffL<<8)
+#define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT		 (0xffL<<16)
+
+#define BNX2_DMA_READ_MASTER_SETTING_0			0x00000c10
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY	 (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY	 (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN	 (1L<<15)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP	 (1L<<16)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER	 (1L<<17)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY	 (1L<<18)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS	 (0x7L<<20)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN	 (1L<<23)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP	 (1L<<24)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER	 (1L<<25)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY	 (1L<<26)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS	 (0x7L<<28)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN	 (1L<<31)
+
+#define BNX2_DMA_READ_MASTER_SETTING_1			0x00000c14
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY	 (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY	 (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN	 (1L<<15)
+
+#define BNX2_DMA_WRITE_MASTER_SETTING_0			0x00000c18
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY	 (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD	 (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY	 (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD	 (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN	 (1L<<15)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP	 (1L<<24)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER	 (1L<<25)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY	 (1L<<26)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD	 (1L<<27)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS	 (0x7L<<28)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN	 (1L<<31)
+
+#define BNX2_DMA_WRITE_MASTER_SETTING_1			0x00000c1c
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY	 (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD	 (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY	 (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD	 (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN	 (1L<<15)
+
+#define BNX2_DMA_ARBITER				0x00000c20
+#define BNX2_DMA_ARBITER_NUM_READS			 (0x7L<<0)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE			 (1L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT		 (0L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN		 (1L<<4)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE			 (0x3L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT		 (0L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN		 (1L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN	 (2L<<5)
+#define BNX2_DMA_ARBITER_ALT_MODE_EN			 (1L<<8)
+#define BNX2_DMA_ARBITER_RR_MODE			 (1L<<9)
+#define BNX2_DMA_ARBITER_TIMER_MODE			 (1L<<10)
+#define BNX2_DMA_ARBITER_OUSTD_READ_REQ			 (0xfL<<12)
+
+#define BNX2_DMA_ARB_TIMERS				0x00000c24
+#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME		 (0xffL<<0)
+#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT		 (0xffL<<12)
+#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT		 (0xfffL<<20)
+
+#define BNX2_DMA_DEBUG_VECT_PEEK			0x00000c2c
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_DMA_TAG_RAM_00				0x00000c30
+#define BNX2_DMA_TAG_RAM_00_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_00_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_00_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_00_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_00_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_01				0x00000c34
+#define BNX2_DMA_TAG_RAM_01_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_01_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_01_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_01_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_01_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_02				0x00000c38
+#define BNX2_DMA_TAG_RAM_02_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_02_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_02_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_02_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_02_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_03				0x00000c3c
+#define BNX2_DMA_TAG_RAM_03_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_03_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_03_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_03_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_03_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_04				0x00000c40
+#define BNX2_DMA_TAG_RAM_04_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_04_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_04_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_04_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_04_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_05				0x00000c44
+#define BNX2_DMA_TAG_RAM_05_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_05_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_05_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_05_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_05_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_06				0x00000c48
+#define BNX2_DMA_TAG_RAM_06_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_06_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_06_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_06_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_06_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_07				0x00000c4c
+#define BNX2_DMA_TAG_RAM_07_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_07_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_07_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_07_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_07_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_08				0x00000c50
+#define BNX2_DMA_TAG_RAM_08_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_08_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_08_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_08_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_08_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_09				0x00000c54
+#define BNX2_DMA_TAG_RAM_09_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_09_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_09_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_09_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_09_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_10				0x00000c58
+#define BNX2_DMA_TAG_RAM_10_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_10_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_10_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_10_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_10_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_11				0x00000c5c
+#define BNX2_DMA_TAG_RAM_11_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_11_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_11_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_11_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_11_VALID			 (1L<<10)
+
+#define BNX2_DMA_RCHAN_STAT_22				0x00000c60
+#define BNX2_DMA_RCHAN_STAT_30				0x00000c64
+#define BNX2_DMA_RCHAN_STAT_31				0x00000c68
+#define BNX2_DMA_RCHAN_STAT_32				0x00000c6c
+#define BNX2_DMA_RCHAN_STAT_40				0x00000c70
+#define BNX2_DMA_RCHAN_STAT_41				0x00000c74
+#define BNX2_DMA_RCHAN_STAT_42				0x00000c78
+#define BNX2_DMA_RCHAN_STAT_50				0x00000c7c
+#define BNX2_DMA_RCHAN_STAT_51				0x00000c80
+#define BNX2_DMA_RCHAN_STAT_52				0x00000c84
+#define BNX2_DMA_RCHAN_STAT_60				0x00000c88
+#define BNX2_DMA_RCHAN_STAT_61				0x00000c8c
+#define BNX2_DMA_RCHAN_STAT_62				0x00000c90
+#define BNX2_DMA_RCHAN_STAT_70				0x00000c94
+#define BNX2_DMA_RCHAN_STAT_71				0x00000c98
+#define BNX2_DMA_RCHAN_STAT_72				0x00000c9c
+#define BNX2_DMA_WCHAN_STAT_00				0x00000ca0
+#define BNX2_DMA_WCHAN_STAT_00_WCHAN_STA_HOST_ADDR_LOW	 (0xffffffffL<<0)
+
+#define BNX2_DMA_WCHAN_STAT_01				0x00000ca4
+#define BNX2_DMA_WCHAN_STAT_01_WCHAN_STA_HOST_ADDR_HIGH	 (0xffffffffL<<0)
+
+#define BNX2_DMA_WCHAN_STAT_02				0x00000ca8
+#define BNX2_DMA_WCHAN_STAT_02_LENGTH			 (0xffffL<<0)
+#define BNX2_DMA_WCHAN_STAT_02_WORD_SWAP		 (1L<<16)
+#define BNX2_DMA_WCHAN_STAT_02_BYTE_SWAP		 (1L<<17)
+#define BNX2_DMA_WCHAN_STAT_02_PRIORITY_LVL		 (1L<<18)
+
+#define BNX2_DMA_WCHAN_STAT_10				0x00000cac
+#define BNX2_DMA_WCHAN_STAT_11				0x00000cb0
+#define BNX2_DMA_WCHAN_STAT_12				0x00000cb4
+#define BNX2_DMA_WCHAN_STAT_20				0x00000cb8
+#define BNX2_DMA_WCHAN_STAT_21				0x00000cbc
+#define BNX2_DMA_WCHAN_STAT_22				0x00000cc0
+#define BNX2_DMA_WCHAN_STAT_30				0x00000cc4
+#define BNX2_DMA_WCHAN_STAT_31				0x00000cc8
+#define BNX2_DMA_WCHAN_STAT_32				0x00000ccc
+#define BNX2_DMA_WCHAN_STAT_40				0x00000cd0
+#define BNX2_DMA_WCHAN_STAT_41				0x00000cd4
+#define BNX2_DMA_WCHAN_STAT_42				0x00000cd8
+#define BNX2_DMA_WCHAN_STAT_50				0x00000cdc
+#define BNX2_DMA_WCHAN_STAT_51				0x00000ce0
+#define BNX2_DMA_WCHAN_STAT_52				0x00000ce4
+#define BNX2_DMA_WCHAN_STAT_60				0x00000ce8
+#define BNX2_DMA_WCHAN_STAT_61				0x00000cec
+#define BNX2_DMA_WCHAN_STAT_62				0x00000cf0
+#define BNX2_DMA_WCHAN_STAT_70				0x00000cf4
+#define BNX2_DMA_WCHAN_STAT_71				0x00000cf8
+#define BNX2_DMA_WCHAN_STAT_72				0x00000cfc
+#define BNX2_DMA_ARB_STAT_00				0x00000d00
+#define BNX2_DMA_ARB_STAT_00_MASTER			 (0xffffL<<0)
+#define BNX2_DMA_ARB_STAT_00_MASTER_ENC			 (0xffL<<16)
+#define BNX2_DMA_ARB_STAT_00_CUR_BINMSTR		 (0xffL<<24)
+
+#define BNX2_DMA_ARB_STAT_01				0x00000d04
+#define BNX2_DMA_ARB_STAT_01_LPR_RPTR			 (0xfL<<0)
+#define BNX2_DMA_ARB_STAT_01_LPR_WPTR			 (0xfL<<4)
+#define BNX2_DMA_ARB_STAT_01_LPB_RPTR			 (0xfL<<8)
+#define BNX2_DMA_ARB_STAT_01_LPB_WPTR			 (0xfL<<12)
+#define BNX2_DMA_ARB_STAT_01_HPR_RPTR			 (0xfL<<16)
+#define BNX2_DMA_ARB_STAT_01_HPR_WPTR			 (0xfL<<20)
+#define BNX2_DMA_ARB_STAT_01_HPB_RPTR			 (0xfL<<24)
+#define BNX2_DMA_ARB_STAT_01_HPB_WPTR			 (0xfL<<28)
+
+#define BNX2_DMA_FUSE_CTRL0_CMD				0x00000f00
+#define BNX2_DMA_FUSE_CTRL0_CMD_PWRUP_DONE		 (1L<<0)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT			 (1L<<2)
+#define BNX2_DMA_FUSE_CTRL0_CMD_LOAD			 (1L<<3)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SEL			 (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL0_DATA			0x00000f04
+#define BNX2_DMA_FUSE_CTRL1_CMD				0x00000f08
+#define BNX2_DMA_FUSE_CTRL1_CMD_PWRUP_DONE		 (1L<<0)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT			 (1L<<2)
+#define BNX2_DMA_FUSE_CTRL1_CMD_LOAD			 (1L<<3)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SEL			 (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL1_DATA			0x00000f0c
+#define BNX2_DMA_FUSE_CTRL2_CMD				0x00000f10
+#define BNX2_DMA_FUSE_CTRL2_CMD_PWRUP_DONE		 (1L<<0)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT			 (1L<<2)
+#define BNX2_DMA_FUSE_CTRL2_CMD_LOAD			 (1L<<3)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SEL			 (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL2_DATA			0x00000f14
+
+
+/*
+ *  context_reg definition
+ *  offset: 0x1000
+ */
+#define BNX2_CTX_COMMAND				0x00001000
+#define BNX2_CTX_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT		 (1L<<1)
+#define BNX2_CTX_COMMAND_DISABLE_PLRU			 (1L<<2)
+#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ		 (1L<<3)
+#define BNX2_CTX_COMMAND_FLUSH_AHEAD			 (0x1fL<<8)
+#define BNX2_CTX_COMMAND_MEM_INIT			 (1L<<13)
+#define BNX2_CTX_COMMAND_PAGE_SIZE			 (0xfL<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256			 (0L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512			 (1L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1K			 (2L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_2K			 (3L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_4K			 (4L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_8K			 (5L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_16K			 (6L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_32K			 (7L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_64K			 (8L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_128K			 (9L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256K			 (10L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512K			 (11L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1M			 (12L<<16)
+
+#define BNX2_CTX_STATUS					0x00001004
+#define BNX2_CTX_STATUS_LOCK_WAIT			 (1L<<0)
+#define BNX2_CTX_STATUS_READ_STAT			 (1L<<16)
+#define BNX2_CTX_STATUS_WRITE_STAT			 (1L<<17)
+#define BNX2_CTX_STATUS_ACC_STALL_STAT			 (1L<<18)
+#define BNX2_CTX_STATUS_LOCK_STALL_STAT			 (1L<<19)
+#define BNX2_CTX_STATUS_EXT_READ_STAT			 (1L<<20)
+#define BNX2_CTX_STATUS_EXT_WRITE_STAT			 (1L<<21)
+#define BNX2_CTX_STATUS_MISS_STAT			 (1L<<22)
+#define BNX2_CTX_STATUS_HIT_STAT			 (1L<<23)
+#define BNX2_CTX_STATUS_DEAD_LOCK			 (1L<<24)
+#define BNX2_CTX_STATUS_USAGE_CNT_ERR			 (1L<<25)
+#define BNX2_CTX_STATUS_INVALID_PAGE			 (1L<<26)
+
+#define BNX2_CTX_VIRT_ADDR				0x00001008
+#define BNX2_CTX_VIRT_ADDR_VIRT_ADDR			 (0x7fffL<<6)
+
+#define BNX2_CTX_PAGE_TBL				0x0000100c
+#define BNX2_CTX_PAGE_TBL_PAGE_TBL			 (0x3fffL<<6)
+
+#define BNX2_CTX_DATA_ADR				0x00001010
+#define BNX2_CTX_DATA_ADR_DATA_ADR			 (0x7ffffL<<2)
+
+#define BNX2_CTX_DATA					0x00001014
+#define BNX2_CTX_LOCK					0x00001018
+#define BNX2_CTX_LOCK_TYPE				 (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID		 (0x0L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL		 (0x1L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX			 (0x2L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER		 (0x4L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE		 (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_VOID_XI			 (0L<<0)
+#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI			 (1L<<0)
+#define BNX2_CTX_LOCK_TYPE_TX_XI			 (2L<<0)
+#define BNX2_CTX_LOCK_TYPE_TIMER_XI			 (4L<<0)
+#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI			 (7L<<0)
+#define BNX2_CTX_LOCK_CID_VALUE				 (0x3fffL<<7)
+#define BNX2_CTX_LOCK_GRANTED				 (1L<<26)
+#define BNX2_CTX_LOCK_MODE				 (0x7L<<27)
+#define BNX2_CTX_LOCK_MODE_UNLOCK			 (0x0L<<27)
+#define BNX2_CTX_LOCK_MODE_IMMEDIATE			 (0x1L<<27)
+#define BNX2_CTX_LOCK_MODE_SURE				 (0x2L<<27)
+#define BNX2_CTX_LOCK_STATUS				 (1L<<30)
+#define BNX2_CTX_LOCK_REQ				 (1L<<31)
+
+#define BNX2_CTX_CTX_CTRL				0x0000101c
+#define BNX2_CTX_CTX_CTRL_CTX_ADDR			 (0x7ffffL<<2)
+#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT			 (0x3L<<21)
+#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC			 (1L<<23)
+#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE			 (0x3L<<24)
+#define BNX2_CTX_CTX_CTRL_ATTR				 (1L<<26)
+#define BNX2_CTX_CTX_CTRL_WRITE_REQ			 (1L<<30)
+#define BNX2_CTX_CTX_CTRL_READ_REQ			 (1L<<31)
+
+#define BNX2_CTX_CTX_DATA				0x00001020
+#define BNX2_CTX_ACCESS_STATUS				0x00001040
+#define BNX2_CTX_ACCESS_STATUS_MASTERENCODED		 (0xfL<<0)
+#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM		 (0x3L<<10)
+#define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM		 (0x3L<<12)
+#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM	 (0x3L<<14)
+#define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST	 (0x7ffL<<17)
+#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI	 (0x1fL<<0)
+#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI	 (0x1fL<<5)
+#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI		 (0x3fffffL<<10)
+
+#define BNX2_CTX_DBG_LOCK_STATUS			0x00001044
+#define BNX2_CTX_DBG_LOCK_STATUS_SM			 (0x3ffL<<0)
+#define BNX2_CTX_DBG_LOCK_STATUS_MATCH			 (0x3ffL<<22)
+
+#define BNX2_CTX_CACHE_CTRL_STATUS			0x00001048
+#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW	 (1L<<0)
+#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP	 (1L<<1)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START		 (1L<<6)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT	 (0x3fL<<7)
+#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED	 (0x3fL<<13)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE	 (1L<<19)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE	 (1L<<20)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE	 (1L<<21)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE	 (1L<<22)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE	 (1L<<23)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE	 (1L<<24)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE	 (1L<<25)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE	 (1L<<26)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE	 (1L<<27)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE	 (1L<<28)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE	 (1L<<29)
+
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS			0x0000104c
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC		 (0x7L<<0)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC		 (0x7L<<3)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC		 (0x7L<<6)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC		 (0x7L<<9)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR	 (0x7fffL<<16)
+
+#define BNX2_CTX_CACHE_STATUS				0x00001050
+#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES		 (0x3ffL<<0)
+#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES		 (0x3ffL<<16)
+
+#define BNX2_CTX_DMA_STATUS				0x00001054
+#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS		 (0x3L<<0)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS		 (0x3L<<2)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS		 (0x3L<<4)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS		 (0x3L<<6)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS		 (0x3L<<8)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS		 (0x3L<<10)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS		 (0x3L<<12)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS		 (0x3L<<14)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS		 (0x3L<<16)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS		 (0x3L<<18)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS		 (0x3L<<20)
+
+#define BNX2_CTX_REP_STATUS				0x00001058
+#define BNX2_CTX_REP_STATUS_ERROR_ENTRY			 (0x3ffL<<0)
+#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID		 (0x1fL<<10)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR		 (1L<<16)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR		 (1L<<17)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR		 (1L<<18)
+
+#define BNX2_CTX_CKSUM_ERROR_STATUS			0x0000105c
+#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_CTX_CHNL_LOCK_STATUS_0			0x00001080
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_CID			 (0x3fffL<<0)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE		 (0x3L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE		 (1L<<16)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI		 (1L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI		 (0x7L<<15)
+
+#define BNX2_CTX_CHNL_LOCK_STATUS_1			0x00001084
+#define BNX2_CTX_CHNL_LOCK_STATUS_2			0x00001088
+#define BNX2_CTX_CHNL_LOCK_STATUS_3			0x0000108c
+#define BNX2_CTX_CHNL_LOCK_STATUS_4			0x00001090
+#define BNX2_CTX_CHNL_LOCK_STATUS_5			0x00001094
+#define BNX2_CTX_CHNL_LOCK_STATUS_6			0x00001098
+#define BNX2_CTX_CHNL_LOCK_STATUS_7			0x0000109c
+#define BNX2_CTX_CHNL_LOCK_STATUS_8			0x000010a0
+#define BNX2_CTX_CHNL_LOCK_STATUS_9			0x000010a4
+
+#define BNX2_CTX_CACHE_DATA				0x000010c4
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL			0x000010c8
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR	 (0x1ffL<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ		 (1L<<30)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ		 (1L<<31)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0			0x000010cc
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID		 (1L<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE		 (0xffffffL<<8)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA1			0x000010d0
+#define BNX2_CTX_CAM_CTRL				0x000010d4
+#define BNX2_CTX_CAM_CTRL_CAM_ADDR			 (0x3ffL<<0)
+#define BNX2_CTX_CAM_CTRL_RESET				 (1L<<27)
+#define BNX2_CTX_CAM_CTRL_INVALIDATE			 (1L<<28)
+#define BNX2_CTX_CAM_CTRL_SEARCH			 (1L<<29)
+#define BNX2_CTX_CAM_CTRL_WRITE_REQ			 (1L<<30)
+#define BNX2_CTX_CAM_CTRL_READ_REQ			 (1L<<31)
+
+
+/*
+ *  emac_reg definition
+ *  offset: 0x1400
+ */
+#define BNX2_EMAC_MODE					0x00001400
+#define BNX2_EMAC_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_MODE_HALF_DUPLEX			 (1L<<1)
+#define BNX2_EMAC_MODE_PORT				 (0x3L<<2)
+#define BNX2_EMAC_MODE_PORT_NONE			 (0L<<2)
+#define BNX2_EMAC_MODE_PORT_MII				 (1L<<2)
+#define BNX2_EMAC_MODE_PORT_GMII			 (2L<<2)
+#define BNX2_EMAC_MODE_PORT_MII_10M			 (3L<<2)
+#define BNX2_EMAC_MODE_MAC_LOOP				 (1L<<4)
+#define BNX2_EMAC_MODE_25G_MODE				 (1L<<5)
+#define BNX2_EMAC_MODE_TAGGED_MAC_CTL			 (1L<<7)
+#define BNX2_EMAC_MODE_TX_BURST				 (1L<<8)
+#define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA		 (1L<<9)
+#define BNX2_EMAC_MODE_EXT_LINK_POL			 (1L<<10)
+#define BNX2_EMAC_MODE_FORCE_LINK			 (1L<<11)
+#define BNX2_EMAC_MODE_SERDES_MODE			 (1L<<12)
+#define BNX2_EMAC_MODE_BOND_OVRD			 (1L<<13)
+#define BNX2_EMAC_MODE_MPKT				 (1L<<18)
+#define BNX2_EMAC_MODE_MPKT_RCVD			 (1L<<19)
+#define BNX2_EMAC_MODE_ACPI_RCVD			 (1L<<20)
+
+#define BNX2_EMAC_STATUS				0x00001404
+#define BNX2_EMAC_STATUS_LINK				 (1L<<11)
+#define BNX2_EMAC_STATUS_LINK_CHANGE			 (1L<<12)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE	 (1L<<13)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE		 (1L<<14)
+#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE		 (1L<<16)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0		 (1L<<17)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE	 (1L<<18)
+#define BNX2_EMAC_STATUS_MI_COMPLETE			 (1L<<22)
+#define BNX2_EMAC_STATUS_MI_INT				 (1L<<23)
+#define BNX2_EMAC_STATUS_AP_ERROR			 (1L<<24)
+#define BNX2_EMAC_STATUS_PARITY_ERROR_STATE		 (1L<<31)
+
+#define BNX2_EMAC_ATTENTION_ENA				0x00001408
+#define BNX2_EMAC_ATTENTION_ENA_LINK			 (1L<<11)
+#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE		 (1L<<14)
+#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE		 (1L<<16)
+#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE	 (1L<<18)
+#define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE		 (1L<<22)
+#define BNX2_EMAC_ATTENTION_ENA_MI_INT			 (1L<<23)
+#define BNX2_EMAC_ATTENTION_ENA_AP_ERROR		 (1L<<24)
+
+#define BNX2_EMAC_LED					0x0000140c
+#define BNX2_EMAC_LED_OVERRIDE				 (1L<<0)
+#define BNX2_EMAC_LED_1000MB_OVERRIDE			 (1L<<1)
+#define BNX2_EMAC_LED_100MB_OVERRIDE			 (1L<<2)
+#define BNX2_EMAC_LED_10MB_OVERRIDE			 (1L<<3)
+#define BNX2_EMAC_LED_TRAFFIC_OVERRIDE			 (1L<<4)
+#define BNX2_EMAC_LED_BLNK_TRAFFIC			 (1L<<5)
+#define BNX2_EMAC_LED_TRAFFIC				 (1L<<6)
+#define BNX2_EMAC_LED_1000MB				 (1L<<7)
+#define BNX2_EMAC_LED_100MB				 (1L<<8)
+#define BNX2_EMAC_LED_10MB				 (1L<<9)
+#define BNX2_EMAC_LED_TRAFFIC_STAT			 (1L<<10)
+#define BNX2_EMAC_LED_2500MB				 (1L<<11)
+#define BNX2_EMAC_LED_2500MB_OVERRIDE			 (1L<<12)
+#define BNX2_EMAC_LED_ACTIVITY_SEL			 (0x3L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_0			 (0L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_1			 (1L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_2			 (2L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_3			 (3L<<17)
+#define BNX2_EMAC_LED_BLNK_RATE				 (0xfffL<<19)
+#define BNX2_EMAC_LED_BLNK_RATE_ENA			 (1L<<31)
+
+#define BNX2_EMAC_MAC_MATCH0				0x00001410
+#define BNX2_EMAC_MAC_MATCH1				0x00001414
+#define BNX2_EMAC_MAC_MATCH2				0x00001418
+#define BNX2_EMAC_MAC_MATCH3				0x0000141c
+#define BNX2_EMAC_MAC_MATCH4				0x00001420
+#define BNX2_EMAC_MAC_MATCH5				0x00001424
+#define BNX2_EMAC_MAC_MATCH6				0x00001428
+#define BNX2_EMAC_MAC_MATCH7				0x0000142c
+#define BNX2_EMAC_MAC_MATCH8				0x00001430
+#define BNX2_EMAC_MAC_MATCH9				0x00001434
+#define BNX2_EMAC_MAC_MATCH10				0x00001438
+#define BNX2_EMAC_MAC_MATCH11				0x0000143c
+#define BNX2_EMAC_MAC_MATCH12				0x00001440
+#define BNX2_EMAC_MAC_MATCH13				0x00001444
+#define BNX2_EMAC_MAC_MATCH14				0x00001448
+#define BNX2_EMAC_MAC_MATCH15				0x0000144c
+#define BNX2_EMAC_MAC_MATCH16				0x00001450
+#define BNX2_EMAC_MAC_MATCH17				0x00001454
+#define BNX2_EMAC_MAC_MATCH18				0x00001458
+#define BNX2_EMAC_MAC_MATCH19				0x0000145c
+#define BNX2_EMAC_MAC_MATCH20				0x00001460
+#define BNX2_EMAC_MAC_MATCH21				0x00001464
+#define BNX2_EMAC_MAC_MATCH22				0x00001468
+#define BNX2_EMAC_MAC_MATCH23				0x0000146c
+#define BNX2_EMAC_MAC_MATCH24				0x00001470
+#define BNX2_EMAC_MAC_MATCH25				0x00001474
+#define BNX2_EMAC_MAC_MATCH26				0x00001478
+#define BNX2_EMAC_MAC_MATCH27				0x0000147c
+#define BNX2_EMAC_MAC_MATCH28				0x00001480
+#define BNX2_EMAC_MAC_MATCH29				0x00001484
+#define BNX2_EMAC_MAC_MATCH30				0x00001488
+#define BNX2_EMAC_MAC_MATCH31				0x0000148c
+#define BNX2_EMAC_BACKOFF_SEED				0x00001498
+#define BNX2_EMAC_BACKOFF_SEED_EMAC_BACKOFF_SEED	 (0x3ffL<<0)
+
+#define BNX2_EMAC_RX_MTU_SIZE				0x0000149c
+#define BNX2_EMAC_RX_MTU_SIZE_MTU_SIZE			 (0xffffL<<0)
+#define BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA			 (1L<<31)
+
+#define BNX2_EMAC_SERDES_CNTL				0x000014a4
+#define BNX2_EMAC_SERDES_CNTL_RXR			 (0x7L<<0)
+#define BNX2_EMAC_SERDES_CNTL_RXG			 (0x3L<<3)
+#define BNX2_EMAC_SERDES_CNTL_RXCKSEL			 (1L<<6)
+#define BNX2_EMAC_SERDES_CNTL_TXBIAS			 (0x7L<<7)
+#define BNX2_EMAC_SERDES_CNTL_BGMAX			 (1L<<10)
+#define BNX2_EMAC_SERDES_CNTL_BGMIN			 (1L<<11)
+#define BNX2_EMAC_SERDES_CNTL_TXMODE			 (1L<<12)
+#define BNX2_EMAC_SERDES_CNTL_TXEDGE			 (1L<<13)
+#define BNX2_EMAC_SERDES_CNTL_SERDES_MODE		 (1L<<14)
+#define BNX2_EMAC_SERDES_CNTL_PLLTEST			 (1L<<15)
+#define BNX2_EMAC_SERDES_CNTL_CDET_EN			 (1L<<16)
+#define BNX2_EMAC_SERDES_CNTL_TBI_LBK			 (1L<<17)
+#define BNX2_EMAC_SERDES_CNTL_REMOTE_LBK		 (1L<<18)
+#define BNX2_EMAC_SERDES_CNTL_REV_PHASE			 (1L<<19)
+#define BNX2_EMAC_SERDES_CNTL_REGCTL12			 (0x3L<<20)
+#define BNX2_EMAC_SERDES_CNTL_REGCTL25			 (0x3L<<22)
+
+#define BNX2_EMAC_SERDES_STATUS				0x000014a8
+#define BNX2_EMAC_SERDES_STATUS_RX_STAT			 (0xffL<<0)
+#define BNX2_EMAC_SERDES_STATUS_COMMA_DET		 (1L<<8)
+
+#define BNX2_EMAC_MDIO_COMM				0x000014ac
+#define BNX2_EMAC_MDIO_COMM_DATA			 (0xffffL<<0)
+#define BNX2_EMAC_MDIO_COMM_REG_ADDR			 (0x1fL<<16)
+#define BNX2_EMAC_MDIO_COMM_PHY_ADDR			 (0x1fL<<21)
+#define BNX2_EMAC_MDIO_COMM_COMMAND			 (0x3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0		 (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS		 (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ		 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI		 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI	 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3		 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45		 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_FAIL			 (1L<<28)
+#define BNX2_EMAC_MDIO_COMM_START_BUSY			 (1L<<29)
+#define BNX2_EMAC_MDIO_COMM_DISEXT			 (1L<<30)
+
+#define BNX2_EMAC_MDIO_STATUS				0x000014b0
+#define BNX2_EMAC_MDIO_STATUS_LINK			 (1L<<0)
+#define BNX2_EMAC_MDIO_STATUS_10MB			 (1L<<1)
+
+#define BNX2_EMAC_MDIO_MODE				0x000014b4
+#define BNX2_EMAC_MDIO_MODE_SHORT_PREAMBLE		 (1L<<1)
+#define BNX2_EMAC_MDIO_MODE_AUTO_POLL			 (1L<<4)
+#define BNX2_EMAC_MDIO_MODE_BIT_BANG			 (1L<<8)
+#define BNX2_EMAC_MDIO_MODE_MDIO			 (1L<<9)
+#define BNX2_EMAC_MDIO_MODE_MDIO_OE			 (1L<<10)
+#define BNX2_EMAC_MDIO_MODE_MDC				 (1L<<11)
+#define BNX2_EMAC_MDIO_MODE_MDINT			 (1L<<12)
+#define BNX2_EMAC_MDIO_MODE_EXT_MDINT			 (1L<<13)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT			 (0x1fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI		 (0x3fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI		 (1L<<31)
+
+#define BNX2_EMAC_MDIO_AUTO_STATUS			0x000014b8
+#define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR		 (1L<<0)
+
+#define BNX2_EMAC_TX_MODE				0x000014bc
+#define BNX2_EMAC_TX_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_TX_MODE_CS16_TEST			 (1L<<2)
+#define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN			 (1L<<3)
+#define BNX2_EMAC_TX_MODE_FLOW_EN			 (1L<<4)
+#define BNX2_EMAC_TX_MODE_BIG_BACKOFF			 (1L<<5)
+#define BNX2_EMAC_TX_MODE_LONG_PAUSE			 (1L<<6)
+#define BNX2_EMAC_TX_MODE_LINK_AWARE			 (1L<<7)
+
+#define BNX2_EMAC_TX_STATUS				0x000014c0
+#define BNX2_EMAC_TX_STATUS_XOFFED			 (1L<<0)
+#define BNX2_EMAC_TX_STATUS_XOFF_SENT			 (1L<<1)
+#define BNX2_EMAC_TX_STATUS_XON_SENT			 (1L<<2)
+#define BNX2_EMAC_TX_STATUS_LINK_UP			 (1L<<3)
+#define BNX2_EMAC_TX_STATUS_UNDERRUN			 (1L<<4)
+#define BNX2_EMAC_TX_STATUS_CS16_ERROR			 (1L<<5)
+
+#define BNX2_EMAC_TX_LENGTHS				0x000014c4
+#define BNX2_EMAC_TX_LENGTHS_SLOT			 (0xffL<<0)
+#define BNX2_EMAC_TX_LENGTHS_IPG			 (0xfL<<8)
+#define BNX2_EMAC_TX_LENGTHS_IPG_CRS			 (0x3L<<12)
+
+#define BNX2_EMAC_RX_MODE				0x000014c8
+#define BNX2_EMAC_RX_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_RX_MODE_FLOW_EN			 (1L<<2)
+#define BNX2_EMAC_RX_MODE_KEEP_MAC_CONTROL		 (1L<<3)
+#define BNX2_EMAC_RX_MODE_KEEP_PAUSE			 (1L<<4)
+#define BNX2_EMAC_RX_MODE_ACCEPT_OVERSIZE		 (1L<<5)
+#define BNX2_EMAC_RX_MODE_ACCEPT_RUNTS			 (1L<<6)
+#define BNX2_EMAC_RX_MODE_LLC_CHK			 (1L<<7)
+#define BNX2_EMAC_RX_MODE_PROMISCUOUS			 (1L<<8)
+#define BNX2_EMAC_RX_MODE_NO_CRC_CHK			 (1L<<9)
+#define BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG			 (1L<<10)
+#define BNX2_EMAC_RX_MODE_FILT_BROADCAST		 (1L<<11)
+#define BNX2_EMAC_RX_MODE_SORT_MODE			 (1L<<12)
+
+#define BNX2_EMAC_RX_STATUS				0x000014cc
+#define BNX2_EMAC_RX_STATUS_FFED			 (1L<<0)
+#define BNX2_EMAC_RX_STATUS_FF_RECEIVED			 (1L<<1)
+#define BNX2_EMAC_RX_STATUS_N_RECEIVED			 (1L<<2)
+
+#define BNX2_EMAC_MULTICAST_HASH0			0x000014d0
+#define BNX2_EMAC_MULTICAST_HASH1			0x000014d4
+#define BNX2_EMAC_MULTICAST_HASH2			0x000014d8
+#define BNX2_EMAC_MULTICAST_HASH3			0x000014dc
+#define BNX2_EMAC_MULTICAST_HASH4			0x000014e0
+#define BNX2_EMAC_MULTICAST_HASH5			0x000014e4
+#define BNX2_EMAC_MULTICAST_HASH6			0x000014e8
+#define BNX2_EMAC_MULTICAST_HASH7			0x000014ec
+#define BNX2_EMAC_CKSUM_ERROR_STATUS			0x000014f0
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_EMAC_RX_STAT_IFHCINOCTETS			0x00001500
+#define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS		0x00001504
+#define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS		0x00001508
+#define BNX2_EMAC_RX_STAT_IFHCINUCASTPKTS		0x0000150c
+#define BNX2_EMAC_RX_STAT_IFHCINMULTICASTPKTS		0x00001510
+#define BNX2_EMAC_RX_STAT_IFHCINBROADCASTPKTS		0x00001514
+#define BNX2_EMAC_RX_STAT_DOT3STATSFCSERRORS		0x00001518
+#define BNX2_EMAC_RX_STAT_DOT3STATSALIGNMENTERRORS	0x0000151c
+#define BNX2_EMAC_RX_STAT_DOT3STATSCARRIERSENSEERRORS	0x00001520
+#define BNX2_EMAC_RX_STAT_XONPAUSEFRAMESRECEIVED	0x00001524
+#define BNX2_EMAC_RX_STAT_XOFFPAUSEFRAMESRECEIVED	0x00001528
+#define BNX2_EMAC_RX_STAT_MACCONTROLFRAMESRECEIVED	0x0000152c
+#define BNX2_EMAC_RX_STAT_XOFFSTATEENTERED		0x00001530
+#define BNX2_EMAC_RX_STAT_DOT3STATSFRAMESTOOLONG	0x00001534
+#define BNX2_EMAC_RX_STAT_ETHERSTATSJABBERS		0x00001538
+#define BNX2_EMAC_RX_STAT_ETHERSTATSUNDERSIZEPKTS	0x0000153c
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS64OCTETS	0x00001540
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS	0x00001544
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS	0x00001548
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS	0x0000154c
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS	0x00001550
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS	0x00001554
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS	0x00001558
+#define BNX2_EMAC_RXMAC_DEBUG0				0x0000155c
+#define BNX2_EMAC_RXMAC_DEBUG1				0x00001560
+#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT	 (1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_OUT_RANGE		 (1L<<1)
+#define BNX2_EMAC_RXMAC_DEBUG1_BAD_CRC			 (1L<<2)
+#define BNX2_EMAC_RXMAC_DEBUG1_RX_ERROR			 (1L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG1_ALIGN_ERROR		 (1L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG1_LAST_DATA		 (1L<<5)
+#define BNX2_EMAC_RXMAC_DEBUG1_ODD_BYTE_START		 (1L<<6)
+#define BNX2_EMAC_RXMAC_DEBUG1_BYTE_COUNT		 (0xffffL<<7)
+#define BNX2_EMAC_RXMAC_DEBUG1_SLOT_TIME		 (0xffL<<23)
+
+#define BNX2_EMAC_RXMAC_DEBUG2				0x00001564
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE			 (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_IDLE		 (0x0L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SFD		 (0x1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DATA		 (0x2L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SKEEP		 (0x3L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_EXT		 (0x4L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DROP		 (0x5L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SDROP		 (0x6L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_FC		 (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE		 (0xfL<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_IDLE		 (0x0L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA0		 (0x1L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA1		 (0x2L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA2		 (0x3L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA3		 (0x4L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_ABORT		 (0x5L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_WAIT		 (0x6L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_STATUS		 (0x7L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_LAST		 (0x8L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_BYTE_IN			 (0xffL<<7)
+#define BNX2_EMAC_RXMAC_DEBUG2_FALSEC			 (1L<<15)
+#define BNX2_EMAC_RXMAC_DEBUG2_TAGGED			 (1L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE		 (1L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_IDLE		 (0L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_PAUSED	 (1L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_SE_COUNTER		 (0xfL<<19)
+#define BNX2_EMAC_RXMAC_DEBUG2_QUANTA			 (0x1fL<<23)
+
+#define BNX2_EMAC_RXMAC_DEBUG3				0x00001568
+#define BNX2_EMAC_RXMAC_DEBUG3_PAUSE_CTR		 (0xffffL<<0)
+#define BNX2_EMAC_RXMAC_DEBUG3_TMP_PAUSE_CTR		 (0xffffL<<16)
+
+#define BNX2_EMAC_RXMAC_DEBUG4				0x0000156c
+#define BNX2_EMAC_RXMAC_DEBUG4_TYPE_FIELD		 (0xffffL<<0)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE		 (0x3fL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_IDLE		 (0x0L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2		 (0x1L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3		 (0x2L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI		 (0x3L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3		 (0x5L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1		 (0x6L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2		 (0x7L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2		 (0x7L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3		 (0x8L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2		 (0x9L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC3		 (0xaL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT1	 (0xeL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT2	 (0xfL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MCHECK	 (0x10L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC		 (0x11L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC2		 (0x12L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC3		 (0x13L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA1		 (0x14L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA2		 (0x15L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA3		 (0x16L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BTYPE		 (0x17L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC		 (0x18L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PTYPE		 (0x19L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_CMD		 (0x1aL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MAC		 (0x1bL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_LATCH		 (0x1cL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XOFF		 (0x1dL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XON		 (0x1eL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PAUSED	 (0x1fL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_NPAUSED	 (0x20L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TTYPE		 (0x21L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TVAL		 (0x22L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA1		 (0x23L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA2		 (0x24L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA3		 (0x25L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTYPE		 (0x26L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTTYPE	 (0x27L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTVAL		 (0x28L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MTYPE		 (0x29L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_DROP		 (0x2aL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_DROP_PKT			 (1L<<22)
+#define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED		 (1L<<23)
+#define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER		 (1L<<24)
+#define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA		 (1L<<25)
+#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND		 (1L<<26)
+#define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE			 (1L<<27)
+#define BNX2_EMAC_RXMAC_DEBUG4_START			 (1L<<28)
+
+#define BNX2_EMAC_RXMAC_DEBUG5				0x00001570
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM			 (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_IDLE		 (0L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_EOF	 (1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_STAT	 (2L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4FCRC	 (3L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4RDE	 (4L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4ALL	 (5L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_1WD_WAIT_STAT	 (6L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1		 (0x7L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_VDW		 (0x0L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_STAT		 (0x1L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_AEOF		 (0x2L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_NEOF		 (0x3L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SOF		 (0x4L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SAEOF		 (0x6L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SNEOF		 (0x7L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_EOF_DETECTED		 (1L<<7)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF0		 (0x7L<<8)
+#define BNX2_EMAC_RXMAC_DEBUG5_RPM_IDI_FIFO_FULL	 (1L<<11)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_CCODE		 (1L<<12)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_DATA		 (1L<<13)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_STAT		 (1L<<14)
+#define BNX2_EMAC_RXMAC_DEBUG5_CLR_STAT			 (1L<<15)
+#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_CCODE		 (0x3L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT		 (1L<<19)
+#define BNX2_EMAC_RXMAC_DEBUG5_FMLEN			 (0xfffL<<20)
+
+#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS		0x00001574
+#define BNX2_EMAC_RX_STAT_AC0				0x00001580
+#define BNX2_EMAC_RX_STAT_AC1				0x00001584
+#define BNX2_EMAC_RX_STAT_AC2				0x00001588
+#define BNX2_EMAC_RX_STAT_AC3				0x0000158c
+#define BNX2_EMAC_RX_STAT_AC4				0x00001590
+#define BNX2_EMAC_RX_STAT_AC5				0x00001594
+#define BNX2_EMAC_RX_STAT_AC6				0x00001598
+#define BNX2_EMAC_RX_STAT_AC7				0x0000159c
+#define BNX2_EMAC_RX_STAT_AC8				0x000015a0
+#define BNX2_EMAC_RX_STAT_AC9				0x000015a4
+#define BNX2_EMAC_RX_STAT_AC10				0x000015a8
+#define BNX2_EMAC_RX_STAT_AC11				0x000015ac
+#define BNX2_EMAC_RX_STAT_AC12				0x000015b0
+#define BNX2_EMAC_RX_STAT_AC13				0x000015b4
+#define BNX2_EMAC_RX_STAT_AC14				0x000015b8
+#define BNX2_EMAC_RX_STAT_AC15				0x000015bc
+#define BNX2_EMAC_RX_STAT_AC16				0x000015c0
+#define BNX2_EMAC_RX_STAT_AC17				0x000015c4
+#define BNX2_EMAC_RX_STAT_AC18				0x000015c8
+#define BNX2_EMAC_RX_STAT_AC19				0x000015cc
+#define BNX2_EMAC_RX_STAT_AC20				0x000015d0
+#define BNX2_EMAC_RX_STAT_AC21				0x000015d4
+#define BNX2_EMAC_RX_STAT_AC22				0x000015d8
+#define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC		0x000015dc
+#define BNX2_EMAC_RX_STAT_AC_28				0x000015f4
+#define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS			0x00001600
+#define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS		0x00001604
+#define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS		0x00001608
+#define BNX2_EMAC_TX_STAT_OUTXONSENT			0x0000160c
+#define BNX2_EMAC_TX_STAT_OUTXOFFSENT			0x00001610
+#define BNX2_EMAC_TX_STAT_FLOWCONTROLDONE		0x00001614
+#define BNX2_EMAC_TX_STAT_DOT3STATSSINGLECOLLISIONFRAMES	0x00001618
+#define BNX2_EMAC_TX_STAT_DOT3STATSMULTIPLECOLLISIONFRAMES	0x0000161c
+#define BNX2_EMAC_TX_STAT_DOT3STATSDEFERREDTRANSMISSIONS	0x00001620
+#define BNX2_EMAC_TX_STAT_DOT3STATSEXCESSIVECOLLISIONS	0x00001624
+#define BNX2_EMAC_TX_STAT_DOT3STATSLATECOLLISIONS	0x00001628
+#define BNX2_EMAC_TX_STAT_IFHCOUTUCASTPKTS		0x0000162c
+#define BNX2_EMAC_TX_STAT_IFHCOUTMULTICASTPKTS		0x00001630
+#define BNX2_EMAC_TX_STAT_IFHCOUTBROADCASTPKTS		0x00001634
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS64OCTETS	0x00001638
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS	0x0000163c
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS	0x00001640
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS	0x00001644
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS	0x00001648
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS	0x0000164c
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS	0x00001650
+#define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS	0x00001654
+#define BNX2_EMAC_TXMAC_DEBUG0				0x00001658
+#define BNX2_EMAC_TXMAC_DEBUG1				0x0000165c
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE		 (0xfL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_IDLE		 (0x0L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_START0		 (0x1L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA0		 (0x4L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA1		 (0x5L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA2		 (0x6L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA3		 (0x7L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT0		 (0x8L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT1		 (0x9L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_CRS_ENABLE		 (1L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG1_BAD_CRC			 (1L<<5)
+#define BNX2_EMAC_TXMAC_DEBUG1_SE_COUNTER		 (0xfL<<6)
+#define BNX2_EMAC_TXMAC_DEBUG1_SEND_PAUSE		 (1L<<10)
+#define BNX2_EMAC_TXMAC_DEBUG1_LATE_COLLISION		 (1L<<11)
+#define BNX2_EMAC_TXMAC_DEBUG1_MAX_DEFER		 (1L<<12)
+#define BNX2_EMAC_TXMAC_DEBUG1_DEFERRED			 (1L<<13)
+#define BNX2_EMAC_TXMAC_DEBUG1_ONE_BYTE			 (1L<<14)
+#define BNX2_EMAC_TXMAC_DEBUG1_IPG_TIME			 (0xfL<<15)
+#define BNX2_EMAC_TXMAC_DEBUG1_SLOT_TIME		 (0xffL<<19)
+
+#define BNX2_EMAC_TXMAC_DEBUG2				0x00001660
+#define BNX2_EMAC_TXMAC_DEBUG2_BACK_OFF			 (0x3ffL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG2_BYTE_COUNT		 (0xffffL<<10)
+#define BNX2_EMAC_TXMAC_DEBUG2_COL_COUNT		 (0x1fL<<26)
+#define BNX2_EMAC_TXMAC_DEBUG2_COL_BIT			 (1L<<31)
+
+#define BNX2_EMAC_TXMAC_DEBUG3				0x00001664
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE			 (0xfL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_IDLE		 (0x0L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE1		 (0x1L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE2		 (0x2L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SFD		 (0x3L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_DATA		 (0x4L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC1		 (0x5L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC2		 (0x6L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EXT		 (0x7L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATB		 (0x8L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATG		 (0x9L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_JAM		 (0xaL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EJAM		 (0xbL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BJAM		 (0xcL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SWAIT		 (0xdL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BACKOFF		 (0xeL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE		 (0x7L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_IDLE		 (0x0L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_WAIT		 (0x1L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_UNI		 (0x2L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_MC		 (0x3L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC2		 (0x4L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC3		 (0x5L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC		 (0x6L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_CRS_DONE			 (1L<<7)
+#define BNX2_EMAC_TXMAC_DEBUG3_XOFF			 (1L<<8)
+#define BNX2_EMAC_TXMAC_DEBUG3_SE_COUNTER		 (0xfL<<9)
+#define BNX2_EMAC_TXMAC_DEBUG3_QUANTA_COUNTER		 (0x1fL<<13)
+
+#define BNX2_EMAC_TXMAC_DEBUG4				0x00001668
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_COUNTER		 (0xffffL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE		 (0xfL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE		 (0x0L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1		 (0x2L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2		 (0x3L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3		 (0x4L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2		 (0x5L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3		 (0x6L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1		 (0x7L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1		 (0x8L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2		 (0x9L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME		 (0xaL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE		 (0xcL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT		 (0xdL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD		 (0xeL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID		 (1L<<20)
+#define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC		 (1L<<21)
+#define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED		 (1L<<22)
+#define BNX2_EMAC_TXMAC_DEBUG4_MAX_DEFER		 (1L<<23)
+#define BNX2_EMAC_TXMAC_DEBUG4_SEND_EXTEND		 (1L<<24)
+#define BNX2_EMAC_TXMAC_DEBUG4_SEND_PADDING		 (1L<<25)
+#define BNX2_EMAC_TXMAC_DEBUG4_EOF_LOC			 (1L<<26)
+#define BNX2_EMAC_TXMAC_DEBUG4_COLLIDING		 (1L<<27)
+#define BNX2_EMAC_TXMAC_DEBUG4_COL_IN			 (1L<<28)
+#define BNX2_EMAC_TXMAC_DEBUG4_BURSTING			 (1L<<29)
+#define BNX2_EMAC_TXMAC_DEBUG4_ADVANCE			 (1L<<30)
+#define BNX2_EMAC_TXMAC_DEBUG4_GO			 (1L<<31)
+
+#define BNX2_EMAC_TX_STAT_AC0				0x00001680
+#define BNX2_EMAC_TX_STAT_AC1				0x00001684
+#define BNX2_EMAC_TX_STAT_AC2				0x00001688
+#define BNX2_EMAC_TX_STAT_AC3				0x0000168c
+#define BNX2_EMAC_TX_STAT_AC4				0x00001690
+#define BNX2_EMAC_TX_STAT_AC5				0x00001694
+#define BNX2_EMAC_TX_STAT_AC6				0x00001698
+#define BNX2_EMAC_TX_STAT_AC7				0x0000169c
+#define BNX2_EMAC_TX_STAT_AC8				0x000016a0
+#define BNX2_EMAC_TX_STAT_AC9				0x000016a4
+#define BNX2_EMAC_TX_STAT_AC10				0x000016a8
+#define BNX2_EMAC_TX_STAT_AC11				0x000016ac
+#define BNX2_EMAC_TX_STAT_AC12				0x000016b0
+#define BNX2_EMAC_TX_STAT_AC13				0x000016b4
+#define BNX2_EMAC_TX_STAT_AC14				0x000016b8
+#define BNX2_EMAC_TX_STAT_AC15				0x000016bc
+#define BNX2_EMAC_TX_STAT_AC16				0x000016c0
+#define BNX2_EMAC_TX_STAT_AC17				0x000016c4
+#define BNX2_EMAC_TX_STAT_AC18				0x000016c8
+#define BNX2_EMAC_TX_STAT_AC19				0x000016cc
+#define BNX2_EMAC_TX_STAT_AC20				0x000016d0
+#define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC		0x000016d8
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL			0x000016fc
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC	 (0x7fL<<0)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM	 (0x7fL<<16)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN	 (1L<<31)
+
+
+/*
+ *  rpm_reg definition
+ *  offset: 0x1800
+ */
+#define BNX2_RPM_COMMAND				0x00001800
+#define BNX2_RPM_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_RPM_COMMAND_OVERRUN_ABORT			 (1L<<4)
+
+#define BNX2_RPM_STATUS					0x00001804
+#define BNX2_RPM_STATUS_MBUF_WAIT			 (1L<<0)
+#define BNX2_RPM_STATUS_FREE_WAIT			 (1L<<1)
+
+#define BNX2_RPM_CONFIG					0x00001808
+#define BNX2_RPM_CONFIG_NO_PSD_HDR_CKSUM		 (1L<<0)
+#define BNX2_RPM_CONFIG_ACPI_ENA			 (1L<<1)
+#define BNX2_RPM_CONFIG_ACPI_KEEP			 (1L<<2)
+#define BNX2_RPM_CONFIG_MP_KEEP				 (1L<<3)
+#define BNX2_RPM_CONFIG_SORT_VECT_VAL			 (0xfL<<4)
+#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT		 (1L<<30)
+#define BNX2_RPM_CONFIG_IGNORE_VLAN			 (1L<<31)
+
+#define BNX2_RPM_MGMT_PKT_CTRL				0x0000180c
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT		 (0xfL<<0)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE		 (0xfL<<4)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN		 (1L<<30)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN			 (1L<<31)
+
+#define BNX2_RPM_VLAN_MATCH0				0x00001810
+#define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH1				0x00001814
+#define BNX2_RPM_VLAN_MATCH1_RPM_VLAN_MTCH1_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH2				0x00001818
+#define BNX2_RPM_VLAN_MATCH2_RPM_VLAN_MTCH2_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH3				0x0000181c
+#define BNX2_RPM_VLAN_MATCH3_RPM_VLAN_MTCH3_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_SORT_USER0				0x00001820
+#define BNX2_RPM_SORT_USER0_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER0_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER0_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER0_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER0_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER0_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER0_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH		 (1L<<25)
+#define BNX2_RPM_SORT_USER0_ENA				 (1L<<31)
+
+#define BNX2_RPM_SORT_USER1				0x00001824
+#define BNX2_RPM_SORT_USER1_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER1_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER1_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER1_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER1_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER1_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER1_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER1_ENA				 (1L<<31)
+
+#define BNX2_RPM_SORT_USER2				0x00001828
+#define BNX2_RPM_SORT_USER2_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER2_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER2_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER2_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER2_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER2_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER2_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER2_ENA				 (1L<<31)
+
+#define BNX2_RPM_SORT_USER3				0x0000182c
+#define BNX2_RPM_SORT_USER3_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER3_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER3_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER3_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER3_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER3_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER3_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER3_ENA				 (1L<<31)
+
+#define BNX2_RPM_STAT_L2_FILTER_DISCARDS		0x00001840
+#define BNX2_RPM_STAT_RULE_CHECKER_DISCARDS		0x00001844
+#define BNX2_RPM_STAT_IFINFTQDISCARDS			0x00001848
+#define BNX2_RPM_STAT_IFINMBUFDISCARD			0x0000184c
+#define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT		0x00001850
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0		0x00001854
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1		0x00001858
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2		0x0000185c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3		0x00001860
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4		0x00001864
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5		0x00001868
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6		0x0000186c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7		0x00001870
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_STAT_AC0				0x00001880
+#define BNX2_RPM_STAT_AC1				0x00001884
+#define BNX2_RPM_STAT_AC2				0x00001888
+#define BNX2_RPM_STAT_AC3				0x0000188c
+#define BNX2_RPM_STAT_AC4				0x00001890
+#define BNX2_RPM_RC_CNTL_16				0x000018e0
+#define BNX2_RPM_RC_CNTL_16_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_16_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_16_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_16_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_16_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_16_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_16_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_16_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_16_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_16_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_16_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_16_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_16_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_16_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_16			0x000018e4
+#define BNX2_RPM_RC_VALUE_MASK_16_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_16_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_17				0x000018e8
+#define BNX2_RPM_RC_CNTL_17_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_17_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_17_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_17_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_17_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_17_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_17_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_17_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_17_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_17_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_17_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_17_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_17_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_17_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_17			0x000018ec
+#define BNX2_RPM_RC_VALUE_MASK_17_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_17_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_18				0x000018f0
+#define BNX2_RPM_RC_CNTL_18_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_18_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_18_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_18_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_18_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_18_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_18_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_18_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_18_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_18_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_18_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_18_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_18_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_18_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_18			0x000018f4
+#define BNX2_RPM_RC_VALUE_MASK_18_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_18_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_19				0x000018f8
+#define BNX2_RPM_RC_CNTL_19_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_19_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_19_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_19_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_19_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_19_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_19_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_19_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_19_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_19_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_19_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_19_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_19_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_19_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_19			0x000018fc
+#define BNX2_RPM_RC_VALUE_MASK_19_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_19_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_0				0x00001900
+#define BNX2_RPM_RC_CNTL_0_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_0_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_0_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_0_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP			 (2L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP			 (3L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_0_COMP				 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_GREATER			 (2L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_0_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_0_SBIT				 (1L<<19)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL			 (0xfL<<20)
+#define BNX2_RPM_RC_CNTL_0_MAP				 (1L<<24)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_0_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_0_MASK				 (1L<<26)
+#define BNX2_RPM_RC_CNTL_0_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_0_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_0_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_0_NBIT				 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_0			0x00001904
+#define BNX2_RPM_RC_VALUE_MASK_0_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_0_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_1				0x00001908
+#define BNX2_RPM_RC_CNTL_1_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_1_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_1_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_1_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_1_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_1_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_1_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_1_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_1_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_1_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_1_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_1_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_1_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_1_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_1			0x0000190c
+#define BNX2_RPM_RC_VALUE_MASK_1_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_1_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_2				0x00001910
+#define BNX2_RPM_RC_CNTL_2_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_2_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_2_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_2_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_2_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_2_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_2_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_2_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_2_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_2_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_2_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_2_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_2_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_2_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_2			0x00001914
+#define BNX2_RPM_RC_VALUE_MASK_2_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_2_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_3				0x00001918
+#define BNX2_RPM_RC_CNTL_3_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_3_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_3_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_3_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_3_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_3_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_3_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_3_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_3_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_3_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_3_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_3_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_3_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_3_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_3			0x0000191c
+#define BNX2_RPM_RC_VALUE_MASK_3_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_3_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_4				0x00001920
+#define BNX2_RPM_RC_CNTL_4_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_4_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_4_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_4_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_4_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_4_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_4_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_4_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_4_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_4_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_4_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_4_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_4_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_4_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_4			0x00001924
+#define BNX2_RPM_RC_VALUE_MASK_4_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_4_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_5				0x00001928
+#define BNX2_RPM_RC_CNTL_5_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_5_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_5_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_5_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_5_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_5_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_5_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_5_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_5_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_5_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_5_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_5_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_5_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_5_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_5			0x0000192c
+#define BNX2_RPM_RC_VALUE_MASK_5_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_5_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_6				0x00001930
+#define BNX2_RPM_RC_CNTL_6_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_6_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_6_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_6_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_6_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_6_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_6_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_6_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_6_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_6_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_6_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_6_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_6_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_6_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_6			0x00001934
+#define BNX2_RPM_RC_VALUE_MASK_6_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_6_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_7				0x00001938
+#define BNX2_RPM_RC_CNTL_7_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_7_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_7_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_7_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_7_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_7_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_7_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_7_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_7_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_7_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_7_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_7_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_7_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_7_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_7			0x0000193c
+#define BNX2_RPM_RC_VALUE_MASK_7_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_7_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_8				0x00001940
+#define BNX2_RPM_RC_CNTL_8_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_8_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_8_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_8_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_8_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_8_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_8_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_8_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_8_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_8_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_8_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_8_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_8_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_8_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_8			0x00001944
+#define BNX2_RPM_RC_VALUE_MASK_8_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_8_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_9				0x00001948
+#define BNX2_RPM_RC_CNTL_9_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_9_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_9_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_9_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_9_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_9_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_9_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_9_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_9_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_9_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_9_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_9_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_9_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_9_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_9			0x0000194c
+#define BNX2_RPM_RC_VALUE_MASK_9_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_9_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_10				0x00001950
+#define BNX2_RPM_RC_CNTL_10_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_10_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_10_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_10_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_10_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_10_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_10_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_10_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_10_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_10_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_10_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_10_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_10_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_10_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_10			0x00001954
+#define BNX2_RPM_RC_VALUE_MASK_10_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_10_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_11				0x00001958
+#define BNX2_RPM_RC_CNTL_11_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_11_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_11_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_11_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_11_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_11_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_11_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_11_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_11_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_11_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_11_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_11_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_11_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_11_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_11			0x0000195c
+#define BNX2_RPM_RC_VALUE_MASK_11_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_11_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_12				0x00001960
+#define BNX2_RPM_RC_CNTL_12_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_12_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_12_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_12_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_12_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_12_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_12_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_12_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_12_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_12_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_12_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_12_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_12_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_12_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_12			0x00001964
+#define BNX2_RPM_RC_VALUE_MASK_12_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_12_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_13				0x00001968
+#define BNX2_RPM_RC_CNTL_13_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_13_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_13_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_13_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_13_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_13_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_13_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_13_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_13_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_13_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_13_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_13_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_13_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_13_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_13			0x0000196c
+#define BNX2_RPM_RC_VALUE_MASK_13_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_13_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_14				0x00001970
+#define BNX2_RPM_RC_CNTL_14_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_14_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_14_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_14_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_14_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_14_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_14_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_14_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_14_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_14_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_14_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_14_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_14_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_14_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_14			0x00001974
+#define BNX2_RPM_RC_VALUE_MASK_14_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_14_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_15				0x00001978
+#define BNX2_RPM_RC_CNTL_15_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_15_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_15_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_15_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_15_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_15_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_15_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_15_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_15_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_15_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_15_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_15_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_15_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_15_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_15			0x0000197c
+#define BNX2_RPM_RC_VALUE_MASK_15_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_15_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CONFIG				0x00001980
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE			 (0xffffL<<0)
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI		 (0xfffffL<<0)
+#define BNX2_RPM_RC_CONFIG_DEF_CLASS			 (0x7L<<24)
+#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE		 (1L<<31)
+
+#define BNX2_RPM_DEBUG0					0x00001984
+#define BNX2_RPM_DEBUG0_FM_BCNT				 (0xffffL<<0)
+#define BNX2_RPM_DEBUG0_T_DATA_OFST_VLD			 (1L<<16)
+#define BNX2_RPM_DEBUG0_T_UDP_OFST_VLD			 (1L<<17)
+#define BNX2_RPM_DEBUG0_T_TCP_OFST_VLD			 (1L<<18)
+#define BNX2_RPM_DEBUG0_T_IP_OFST_VLD			 (1L<<19)
+#define BNX2_RPM_DEBUG0_IP_MORE_FRGMT			 (1L<<20)
+#define BNX2_RPM_DEBUG0_T_IP_NO_TCP_UDP_HDR		 (1L<<21)
+#define BNX2_RPM_DEBUG0_LLC_SNAP			 (1L<<22)
+#define BNX2_RPM_DEBUG0_FM_STARTED			 (1L<<23)
+#define BNX2_RPM_DEBUG0_DONE				 (1L<<24)
+#define BNX2_RPM_DEBUG0_WAIT_4_DONE			 (1L<<25)
+#define BNX2_RPM_DEBUG0_USE_TPBUF_CKSUM			 (1L<<26)
+#define BNX2_RPM_DEBUG0_RX_NO_PSD_HDR_CKSUM		 (1L<<27)
+#define BNX2_RPM_DEBUG0_IGNORE_VLAN			 (1L<<28)
+#define BNX2_RPM_DEBUG0_RP_ENA_ACTIVE			 (1L<<31)
+
+#define BNX2_RPM_DEBUG1					0x00001988
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST			 (0xffffL<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IDLE			 (0L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_ALL		 (1L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IPLLC	 (2L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_IP		 (4L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IP		 (8L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP_START		 (16L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP			 (32L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_TCP			 (64L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_UDP			 (128L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_AH			 (256L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP			 (512L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP_PAYLOAD		 (1024L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_DATA			 (2048L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRY		 (0x2000L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRYOUT		 (0x4000L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_LATCH_RESULT		 (0x8000L<<0)
+#define BNX2_RPM_DEBUG1_HDR_BCNT			 (0x7ffL<<16)
+#define BNX2_RPM_DEBUG1_UNKNOWN_ETYPE_D			 (1L<<28)
+#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D2			 (1L<<29)
+#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D1			 (1L<<30)
+#define BNX2_RPM_DEBUG1_EOF_0XTRA_WD			 (1L<<31)
+
+#define BNX2_RPM_DEBUG2					0x0000198c
+#define BNX2_RPM_DEBUG2_CMD_HIT_VEC			 (0xffffL<<0)
+#define BNX2_RPM_DEBUG2_IP_BCNT				 (0xffL<<16)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M4			 (1L<<24)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M3			 (1L<<25)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M2			 (1L<<26)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M1			 (1L<<27)
+#define BNX2_RPM_DEBUG2_IPIPE_EMPTY			 (1L<<28)
+#define BNX2_RPM_DEBUG2_FM_DISCARD			 (1L<<29)
+#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D2		 (1L<<30)
+#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D1		 (1L<<31)
+
+#define BNX2_RPM_DEBUG3					0x00001990
+#define BNX2_RPM_DEBUG3_AVAIL_MBUF_PTR			 (0x1ffL<<0)
+#define BNX2_RPM_DEBUG3_RDE_RLUPQ_WR_REQ_INT		 (1L<<9)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_LAST_INT		 (1L<<10)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_REQ_INT		 (1L<<11)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_FREE_REQ		 (1L<<12)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_ALLOC_REQ		 (1L<<13)
+#define BNX2_RPM_DEBUG3_DFSM_MBUF_NOTAVAIL		 (1L<<14)
+#define BNX2_RPM_DEBUG3_RBUF_RDE_SOF_DROP		 (1L<<15)
+#define BNX2_RPM_DEBUG3_DFIFO_VLD_ENTRY_CT		 (0xfL<<16)
+#define BNX2_RPM_DEBUG3_RDE_SRC_FIFO_ALMFULL		 (1L<<21)
+#define BNX2_RPM_DEBUG3_DROP_NXT_VLD			 (1L<<22)
+#define BNX2_RPM_DEBUG3_DROP_NXT			 (1L<<23)
+#define BNX2_RPM_DEBUG3_FTQ_FSM				 (0x3L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_IDLE			 (0x0L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_ACK		 (0x1L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_FREE		 (0x2L<<24)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM			 (0x3L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_SOF		 (0x0L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_GET_MBUF		 (0x1L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DMA_DATA		 (0x2L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DATA		 (0x3L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_EOF		 (0x4L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_MF_ACK		 (0x5L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DROP_NXT_VLD	 (0x6L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DONE		 (0x7L<<26)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM			 (1L<<29)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM_IDLE			 (0L<<29)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM_WAIT_ACK		 (1L<<29)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM			 (1L<<30)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM_ET_MBUF		 (0x0L<<30)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM_IVE_MBUF		 (0x1L<<30)
+#define BNX2_RPM_DEBUG3_CCODE_EOF_ERROR			 (1L<<31)
+
+#define BNX2_RPM_DEBUG4					0x00001994
+#define BNX2_RPM_DEBUG4_DFSM_MBUF_CLUSTER		 (0x1ffffffL<<0)
+#define BNX2_RPM_DEBUG4_DFIFO_CUR_CCODE			 (0x7L<<25)
+#define BNX2_RPM_DEBUG4_MBWRITE_FSM			 (0x7L<<28)
+#define BNX2_RPM_DEBUG4_DFIFO_EMPTY			 (1L<<31)
+
+#define BNX2_RPM_DEBUG5					0x00001998
+#define BNX2_RPM_DEBUG5_RDROP_WPTR			 (0x1fL<<0)
+#define BNX2_RPM_DEBUG5_RDROP_ACPI_RPTR			 (0x1fL<<5)
+#define BNX2_RPM_DEBUG5_RDROP_MC_RPTR			 (0x1fL<<10)
+#define BNX2_RPM_DEBUG5_RDROP_RC_RPTR			 (0x1fL<<15)
+#define BNX2_RPM_DEBUG5_RDROP_ACPI_EMPTY		 (1L<<20)
+#define BNX2_RPM_DEBUG5_RDROP_MC_EMPTY			 (1L<<21)
+#define BNX2_RPM_DEBUG5_RDROP_AEOF_VEC_AT_RDROP_MC_RPTR	 (1L<<22)
+#define BNX2_RPM_DEBUG5_HOLDREG_WOL_DROP_INT		 (1L<<23)
+#define BNX2_RPM_DEBUG5_HOLDREG_DISCARD			 (1L<<24)
+#define BNX2_RPM_DEBUG5_HOLDREG_MBUF_NOTAVAIL		 (1L<<25)
+#define BNX2_RPM_DEBUG5_HOLDREG_MC_EMPTY		 (1L<<26)
+#define BNX2_RPM_DEBUG5_HOLDREG_RC_EMPTY		 (1L<<27)
+#define BNX2_RPM_DEBUG5_HOLDREG_FC_EMPTY		 (1L<<28)
+#define BNX2_RPM_DEBUG5_HOLDREG_ACPI_EMPTY		 (1L<<29)
+#define BNX2_RPM_DEBUG5_HOLDREG_FULL_T			 (1L<<30)
+#define BNX2_RPM_DEBUG5_HOLDREG_RD			 (1L<<31)
+
+#define BNX2_RPM_DEBUG6					0x0000199c
+#define BNX2_RPM_DEBUG6_ACPI_VEC			 (0xffffL<<0)
+#define BNX2_RPM_DEBUG6_VEC				 (0xffffL<<16)
+
+#define BNX2_RPM_DEBUG7					0x000019a0
+#define BNX2_RPM_DEBUG7_RPM_DBG7_LAST_CRC		 (0xffffffffL<<0)
+
+#define BNX2_RPM_DEBUG8					0x000019a4
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM			 (0xfL<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_IDLE		 (0L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W1_ADDR		 (1L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W2_ADDR		 (2L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W3_ADDR		 (3L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_WAIT_THBUF	 (4L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_DATA		 (5L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W0_ADDR		 (6L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W1_ADDR		 (7L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W2_ADDR		 (8L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_ADDR		 (9L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_WAIT_THBUF		 (10L<<0)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_W0			 (1L<<4)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_W3_DATA		 (1L<<5)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_WAIT		 (1L<<6)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W3		 (1L<<7)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W2		 (1L<<8)
+#define BNX2_RPM_DEBUG8_EOF_W_LTEQ6_VLDBYTES		 (1L<<9)
+#define BNX2_RPM_DEBUG8_EOF_W_LTEQ4_VLDBYTES		 (1L<<10)
+#define BNX2_RPM_DEBUG8_NXT_EOF_W_12_VLDBYTES		 (1L<<11)
+#define BNX2_RPM_DEBUG8_EOF_DET				 (1L<<12)
+#define BNX2_RPM_DEBUG8_SOF_DET				 (1L<<13)
+#define BNX2_RPM_DEBUG8_WAIT_4_SOF			 (1L<<14)
+#define BNX2_RPM_DEBUG8_ALL_DONE			 (1L<<15)
+#define BNX2_RPM_DEBUG8_THBUF_ADDR			 (0x7fL<<16)
+#define BNX2_RPM_DEBUG8_BYTE_CTR			 (0xffL<<24)
+
+#define BNX2_RPM_DEBUG9					0x000019a8
+#define BNX2_RPM_DEBUG9_OUTFIFO_COUNT			 (0x7L<<0)
+#define BNX2_RPM_DEBUG9_RDE_ACPI_RDY			 (1L<<3)
+#define BNX2_RPM_DEBUG9_VLD_RD_ENTRY_CT			 (0x7L<<4)
+#define BNX2_RPM_DEBUG9_OUTFIFO_OVERRUN_OCCURRED	 (1L<<28)
+#define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED		 (1L<<29)
+#define BNX2_RPM_DEBUG9_ACPI_MATCH_INT			 (1L<<30)
+#define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN			 (1L<<31)
+#define BNX2_RPM_DEBUG9_BEMEM_R_XI			 (0x1fL<<0)
+#define BNX2_RPM_DEBUG9_EO_XI				 (1L<<5)
+#define BNX2_RPM_DEBUG9_AEOF_DE_XI			 (1L<<6)
+#define BNX2_RPM_DEBUG9_SO_XI				 (1L<<7)
+#define BNX2_RPM_DEBUG9_WD64_CT_XI			 (0x1fL<<8)
+#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI			 (0x7L<<13)
+#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI		 (0xfL<<16)
+#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI		 (0x3ffL<<20)
+#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI			 (1L<<30)
+#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI		 (1L<<31)
+
+#define BNX2_RPM_ACPI_DBG_BUF_W00			0x000019c0
+#define BNX2_RPM_ACPI_DBG_BUF_W01			0x000019c4
+#define BNX2_RPM_ACPI_DBG_BUF_W02			0x000019c8
+#define BNX2_RPM_ACPI_DBG_BUF_W03			0x000019cc
+#define BNX2_RPM_ACPI_DBG_BUF_W10			0x000019d0
+#define BNX2_RPM_ACPI_DBG_BUF_W11			0x000019d4
+#define BNX2_RPM_ACPI_DBG_BUF_W12			0x000019d8
+#define BNX2_RPM_ACPI_DBG_BUF_W13			0x000019dc
+#define BNX2_RPM_ACPI_DBG_BUF_W20			0x000019e0
+#define BNX2_RPM_ACPI_DBG_BUF_W21			0x000019e4
+#define BNX2_RPM_ACPI_DBG_BUF_W22			0x000019e8
+#define BNX2_RPM_ACPI_DBG_BUF_W23			0x000019ec
+#define BNX2_RPM_ACPI_DBG_BUF_W30			0x000019f0
+#define BNX2_RPM_ACPI_DBG_BUF_W31			0x000019f4
+#define BNX2_RPM_ACPI_DBG_BUF_W32			0x000019f8
+#define BNX2_RPM_ACPI_DBG_BUF_W33			0x000019fc
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL			0x00001a00
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS	 (0xffffL<<0)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD		 (1L<<28)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE		 (1L<<29)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT		 (1L<<30)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR		 (1L<<31)
+
+#define BNX2_RPM_ACPI_PATTERN_CTRL			0x00001a04
+#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID		 (0xfL<<0)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR		 (1L<<30)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_WR			 (1L<<31)
+
+#define BNX2_RPM_ACPI_DATA				0x00001a08
+#define BNX2_RPM_ACPI_DATA_PATTERN_BE			 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN0			0x00001a0c
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3		 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2		 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1		 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0		 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN1			0x00001a10
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7		 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6		 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5		 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4		 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC0			0x00001a18
+#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC1			0x00001a1c
+#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC2			0x00001a20
+#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC3			0x00001a24
+#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC4			0x00001a28
+#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC5			0x00001a2c
+#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC6			0x00001a30
+#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC7			0x00001a34
+#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7		 (0xffffffffL<<0)
+
+
+/*
+ *  rlup_reg definition
+ *  offset: 0x2000
+ */
+#define BNX2_RLUP_RSS_CONFIG				0x0000201c
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_XI		 (0x3L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI	 (0L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI	 (1L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_IP_ONLY_XI	 (2L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_RES_XI	 (3L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_XI		 (0x3L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI	 (0L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI	 (1L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI	 (2L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI	 (3L<<2)
+
+#define BNX2_RLUP_RSS_COMMAND				0x00002048
+#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR	 (0xfUL<<0)
+#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK		 (0xffUL<<4)
+#define BNX2_RLUP_RSS_COMMAND_WRITE			 (1UL<<12)
+#define BNX2_RLUP_RSS_COMMAND_READ			 (1UL<<13)
+#define BNX2_RLUP_RSS_COMMAND_HASH_MASK			 (0x7UL<<14)
+
+#define BNX2_RLUP_RSS_DATA				0x0000204c
+
+
+/*
+ *  rbuf_reg definition
+ *  offset: 0x200000
+ */
+#define BNX2_RBUF_COMMAND				0x00200000
+#define BNX2_RBUF_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_RBUF_COMMAND_FREE_INIT			 (1L<<1)
+#define BNX2_RBUF_COMMAND_RAM_INIT			 (1L<<2)
+#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL		 (1L<<3)
+#define BNX2_RBUF_COMMAND_OVER_FREE			 (1L<<4)
+#define BNX2_RBUF_COMMAND_ALLOC_REQ			 (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE		 (1L<<6)
+#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI			 (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI		 (1L<<6)
+#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI	 (1L<<7)
+
+#define BNX2_RBUF_STATUS1				0x00200004
+#define BNX2_RBUF_STATUS1_FREE_COUNT			 (0x3ffL<<0)
+
+#define BNX2_RBUF_STATUS2				0x00200008
+#define BNX2_RBUF_STATUS2_FREE_TAIL			 (0x1ffL<<0)
+#define BNX2_RBUF_STATUS2_FREE_HEAD			 (0x1ffL<<16)
+
+#define BNX2_RBUF_CONFIG				0x0020000c
+#define BNX2_RBUF_CONFIG_XOFF_TRIP			 (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 31 / 1000) + 54)
+#define BNX2_RBUF_CONFIG_XON_TRIP			 (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 39 / 1000) + 66)
+#define BNX2_RBUF_CONFIG_VAL(mtu)			 \
+	(BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) |		 \
+	(BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_FW_BUF_ALLOC				0x00200010
+#define BNX2_RBUF_FW_BUF_ALLOC_VALUE			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_ALLOC_TYPE			 (1L<<16)
+#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ		 (1L<<31)
+
+#define BNX2_RBUF_FW_BUF_FREE				0x00200014
+#define BNX2_RBUF_FW_BUF_FREE_COUNT			 (0x7fL<<0)
+#define BNX2_RBUF_FW_BUF_FREE_TAIL			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_FREE_HEAD			 (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_FREE_TYPE			 (1L<<25)
+#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ			 (1L<<31)
+
+#define BNX2_RBUF_FW_BUF_SEL				0x00200018
+#define BNX2_RBUF_FW_BUF_SEL_COUNT			 (0x7fL<<0)
+#define BNX2_RBUF_FW_BUF_SEL_TAIL			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_SEL_HEAD			 (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ			 (1L<<31)
+
+#define BNX2_RBUF_CONFIG2				0x0020001c
+#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP			 (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu)	 \
+	((((mtu) - 1500) * 4 / 1000) + 5)
+#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP			 (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu)	 \
+	((((mtu) - 1500) * 2 / 100) + 30)
+#define BNX2_RBUF_CONFIG2_VAL(mtu)			 \
+	(BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) |	 \
+	(BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_CONFIG3				0x00200020
+#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP			 (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 12 / 1000) + 18)
+#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP			 (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 2 / 100) + 30)
+#define BNX2_RBUF_CONFIG3_VAL(mtu)			 \
+	(BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) |	 \
+	(BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_PKT_DATA				0x00208000
+#define BNX2_RBUF_CLIST_DATA				0x00210000
+#define BNX2_RBUF_BUF_DATA				0x00220000
+
+
+/*
+ *  rv2p_reg definition
+ *  offset: 0x2800
+ */
+#define BNX2_RV2P_COMMAND				0x00002800
+#define BNX2_RV2P_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_RV2P_COMMAND_PROC1_INTRPT			 (1L<<1)
+#define BNX2_RV2P_COMMAND_PROC2_INTRPT			 (1L<<2)
+#define BNX2_RV2P_COMMAND_ABORT0			 (1L<<4)
+#define BNX2_RV2P_COMMAND_ABORT1			 (1L<<5)
+#define BNX2_RV2P_COMMAND_ABORT2			 (1L<<6)
+#define BNX2_RV2P_COMMAND_ABORT3			 (1L<<7)
+#define BNX2_RV2P_COMMAND_ABORT4			 (1L<<8)
+#define BNX2_RV2P_COMMAND_ABORT5			 (1L<<9)
+#define BNX2_RV2P_COMMAND_PROC1_RESET			 (1L<<16)
+#define BNX2_RV2P_COMMAND_PROC2_RESET			 (1L<<17)
+#define BNX2_RV2P_COMMAND_CTXIF_RESET			 (1L<<18)
+
+#define BNX2_RV2P_STATUS				0x00002804
+#define BNX2_RV2P_STATUS_ALWAYS_0			 (1L<<0)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT0_CNT		 (1L<<8)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT1_CNT		 (1L<<9)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT2_CNT		 (1L<<10)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT3_CNT		 (1L<<11)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT4_CNT		 (1L<<12)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT5_CNT		 (1L<<13)
+
+#define BNX2_RV2P_CONFIG				0x00002808
+#define BNX2_RV2P_CONFIG_STALL_PROC1			 (1L<<0)
+#define BNX2_RV2P_CONFIG_STALL_PROC2			 (1L<<1)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT0		 (1L<<8)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT1		 (1L<<9)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT2		 (1L<<10)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT3		 (1L<<11)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT4		 (1L<<12)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT5		 (1L<<13)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT0		 (1L<<16)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT1		 (1L<<17)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT2		 (1L<<18)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT3		 (1L<<19)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT4		 (1L<<20)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT5		 (1L<<21)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE			 (0xfL<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_256			 (0L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_512			 (1L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_1K			 (2L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_2K			 (3L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_4K			 (4L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_8K			 (5L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_16K			 (6L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_32K			 (7L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_64K			 (8L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_128K			 (9L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_256K			 (10L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_512K			 (11L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_1M			 (12L<<24)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_0			0x00002810
+#define BNX2_RV2P_GEN_BFR_ADDR_0_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_1			0x00002814
+#define BNX2_RV2P_GEN_BFR_ADDR_1_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_2			0x00002818
+#define BNX2_RV2P_GEN_BFR_ADDR_2_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_3			0x0000281c
+#define BNX2_RV2P_GEN_BFR_ADDR_3_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_INSTR_HIGH				0x00002830
+#define BNX2_RV2P_INSTR_HIGH_HIGH			 (0x1fL<<0)
+
+#define BNX2_RV2P_INSTR_LOW				0x00002834
+#define BNX2_RV2P_INSTR_LOW_LOW				 (0xffffffffL<<0)
+
+#define BNX2_RV2P_PROC1_ADDR_CMD			0x00002838
+#define BNX2_RV2P_PROC1_ADDR_CMD_ADD			 (0x3ffL<<0)
+#define BNX2_RV2P_PROC1_ADDR_CMD_RDWR			 (1L<<31)
+
+#define BNX2_RV2P_PROC2_ADDR_CMD			0x0000283c
+#define BNX2_RV2P_PROC2_ADDR_CMD_ADD			 (0x3ffL<<0)
+#define BNX2_RV2P_PROC2_ADDR_CMD_RDWR			 (1L<<31)
+
+#define BNX2_RV2P_PROC1_GRC_DEBUG			0x00002840
+#define BNX2_RV2P_PROC2_GRC_DEBUG			0x00002844
+#define BNX2_RV2P_GRC_PROC_DEBUG			0x00002848
+#define BNX2_RV2P_DEBUG_VECT_PEEK			0x0000284c
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_RV2P_MPFE_PFE_CTL				0x00002afc
+#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE			 (0xfL<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_RV2P_RV2PPQ				0x00002b40
+#define BNX2_RV2P_PFTQ_CMD				0x00002b78
+#define BNX2_RV2P_PFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RV2P_PFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RV2P_PFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RV2P_PFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RV2P_PFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RV2P_PFTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_RV2P_PFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RV2P_PFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RV2P_PFTQ_CTL				0x00002b7c
+#define BNX2_RV2P_PFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RV2P_PFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RV2P_PFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RV2P_RV2PTQ				0x00002b80
+#define BNX2_RV2P_TFTQ_CMD				0x00002bb8
+#define BNX2_RV2P_TFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RV2P_TFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RV2P_TFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RV2P_TFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RV2P_TFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RV2P_TFTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_RV2P_TFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RV2P_TFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RV2P_TFTQ_CTL				0x00002bbc
+#define BNX2_RV2P_TFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RV2P_TFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RV2P_TFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RV2P_RV2PMQ				0x00002bc0
+#define BNX2_RV2P_MFTQ_CMD				0x00002bf8
+#define BNX2_RV2P_MFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RV2P_MFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RV2P_MFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RV2P_MFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RV2P_MFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RV2P_MFTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_RV2P_MFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RV2P_MFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RV2P_MFTQ_CTL				0x00002bfc
+#define BNX2_RV2P_MFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RV2P_MFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RV2P_MFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RV2P_MFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RV2P_MFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+
+
+/*
+ *  mq_reg definition
+ *  offset: 0x3c00
+ */
+#define BNX2_MQ_COMMAND					0x00003c00
+#define BNX2_MQ_COMMAND_ENABLED				 (1L<<0)
+#define BNX2_MQ_COMMAND_INIT				 (1L<<1)
+#define BNX2_MQ_COMMAND_OVERFLOW			 (1L<<4)
+#define BNX2_MQ_COMMAND_WR_ERROR			 (1L<<5)
+#define BNX2_MQ_COMMAND_RD_ERROR			 (1L<<6)
+#define BNX2_MQ_COMMAND_IDB_CFG_ERROR			 (1L<<7)
+#define BNX2_MQ_COMMAND_IDB_OVERFLOW			 (1L<<10)
+#define BNX2_MQ_COMMAND_NO_BIN_ERROR			 (1L<<11)
+#define BNX2_MQ_COMMAND_NO_MAP_ERROR			 (1L<<12)
+
+#define BNX2_MQ_STATUS					0x00003c04
+#define BNX2_MQ_STATUS_CTX_ACCESS_STAT			 (1L<<16)
+#define BNX2_MQ_STATUS_CTX_ACCESS64_STAT		 (1L<<17)
+#define BNX2_MQ_STATUS_PCI_STALL_STAT			 (1L<<18)
+#define BNX2_MQ_STATUS_IDB_OFLOW_STAT			 (1L<<19)
+
+#define BNX2_MQ_CONFIG					0x00003c08
+#define BNX2_MQ_CONFIG_TX_HIGH_PRI			 (1L<<0)
+#define BNX2_MQ_CONFIG_HALT_DIS				 (1L<<1)
+#define BNX2_MQ_CONFIG_BIN_MQ_MODE			 (1L<<2)
+#define BNX2_MQ_CONFIG_DIS_IDB_DROP			 (1L<<3)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE			 (0x7L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256		 (0L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512		 (1L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_1K		 (2L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_2K		 (3L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_4K		 (4L<<4)
+#define BNX2_MQ_CONFIG_MAX_DEPTH			 (0x7fL<<8)
+#define BNX2_MQ_CONFIG_CUR_DEPTH			 (0x7fL<<20)
+
+#define BNX2_MQ_ENQUEUE1				0x00003c0c
+#define BNX2_MQ_ENQUEUE1_OFFSET				 (0x3fL<<2)
+#define BNX2_MQ_ENQUEUE1_CID				 (0x3fffL<<8)
+#define BNX2_MQ_ENQUEUE1_BYTE_MASK			 (0xfL<<24)
+#define BNX2_MQ_ENQUEUE1_KNL_MODE			 (1L<<28)
+
+#define BNX2_MQ_ENQUEUE2				0x00003c10
+#define BNX2_MQ_BAD_WR_ADDR				0x00003c14
+#define BNX2_MQ_BAD_RD_ADDR				0x00003c18
+#define BNX2_MQ_KNL_BYP_WIND_START			0x00003c1c
+#define BNX2_MQ_KNL_BYP_WIND_START_VALUE		 (0xfffffL<<12)
+
+#define BNX2_MQ_KNL_WIND_END				0x00003c20
+#define BNX2_MQ_KNL_WIND_END_VALUE			 (0xffffffL<<8)
+
+#define BNX2_MQ_KNL_WRITE_MASK1				0x00003c24
+#define BNX2_MQ_KNL_TX_MASK1				0x00003c28
+#define BNX2_MQ_KNL_CMD_MASK1				0x00003c2c
+#define BNX2_MQ_KNL_COND_ENQUEUE_MASK1			0x00003c30
+#define BNX2_MQ_KNL_RX_V2P_MASK1			0x00003c34
+#define BNX2_MQ_KNL_WRITE_MASK2				0x00003c38
+#define BNX2_MQ_KNL_TX_MASK2				0x00003c3c
+#define BNX2_MQ_KNL_CMD_MASK2				0x00003c40
+#define BNX2_MQ_KNL_COND_ENQUEUE_MASK2			0x00003c44
+#define BNX2_MQ_KNL_RX_V2P_MASK2			0x00003c48
+#define BNX2_MQ_KNL_BYP_WRITE_MASK1			0x00003c4c
+#define BNX2_MQ_KNL_BYP_TX_MASK1			0x00003c50
+#define BNX2_MQ_KNL_BYP_CMD_MASK1			0x00003c54
+#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK1		0x00003c58
+#define BNX2_MQ_KNL_BYP_RX_V2P_MASK1			0x00003c5c
+#define BNX2_MQ_KNL_BYP_WRITE_MASK2			0x00003c60
+#define BNX2_MQ_KNL_BYP_TX_MASK2			0x00003c64
+#define BNX2_MQ_KNL_BYP_CMD_MASK2			0x00003c68
+#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK2		0x00003c6c
+#define BNX2_MQ_KNL_BYP_RX_V2P_MASK2			0x00003c70
+#define BNX2_MQ_MEM_WR_ADDR				0x00003c74
+#define BNX2_MQ_MEM_WR_ADDR_VALUE			 (0x3fL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA0				0x00003c78
+#define BNX2_MQ_MEM_WR_DATA0_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA1				0x00003c7c
+#define BNX2_MQ_MEM_WR_DATA1_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA2				0x00003c80
+#define BNX2_MQ_MEM_WR_DATA2_VALUE			 (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI			 (0x7fffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_ADDR				0x00003c84
+#define BNX2_MQ_MEM_RD_ADDR_VALUE			 (0x3fL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA0				0x00003c88
+#define BNX2_MQ_MEM_RD_DATA0_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA1				0x00003c8c
+#define BNX2_MQ_MEM_RD_DATA1_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA2				0x00003c90
+#define BNX2_MQ_MEM_RD_DATA2_VALUE			 (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI			 (0x7fffffffL<<0)
+
+#define BNX2_MQ_MAP_L2_3				0x00003d2c
+#define BNX2_MQ_MAP_L2_3_MQ_OFFSET			 (0xffL<<0)
+#define BNX2_MQ_MAP_L2_3_SZ				 (0x3L<<8)
+#define BNX2_MQ_MAP_L2_3_CTX_OFFSET			 (0x2ffL<<10)
+#define BNX2_MQ_MAP_L2_3_BIN_OFFSET			 (0x7L<<23)
+#define BNX2_MQ_MAP_L2_3_ARM				 (0x3L<<26)
+#define BNX2_MQ_MAP_L2_3_ENA				 (0x1L<<31)
+#define BNX2_MQ_MAP_L2_3_DEFAULT			 0x82004646
+
+#define BNX2_MQ_MAP_L2_5				0x00003d34
+#define BNX2_MQ_MAP_L2_5_ARM				 (0x3L<<26)
+
+/*
+ *  tsch_reg definition
+ *  offset: 0x4c00
+ */
+#define BNX2_TSCH_TSS_CFG				0x00004c1c
+#define BNX2_TSCH_TSS_CFG_TSS_START_CID			 (0x7ffL<<8)
+#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON		 (0xfL<<24)
+
+
+
+/*
+ *  tbdr_reg definition
+ *  offset: 0x5000
+ */
+#define BNX2_TBDR_COMMAND				0x00005000
+#define BNX2_TBDR_COMMAND_ENABLE			 (1L<<0)
+#define BNX2_TBDR_COMMAND_SOFT_RST			 (1L<<1)
+#define BNX2_TBDR_COMMAND_MSTR_ABORT			 (1L<<4)
+
+#define BNX2_TBDR_STATUS				0x00005004
+#define BNX2_TBDR_STATUS_DMA_WAIT			 (1L<<0)
+#define BNX2_TBDR_STATUS_FTQ_WAIT			 (1L<<1)
+#define BNX2_TBDR_STATUS_FIFO_OVERFLOW			 (1L<<2)
+#define BNX2_TBDR_STATUS_FIFO_UNDERFLOW			 (1L<<3)
+#define BNX2_TBDR_STATUS_SEARCHMISS_ERROR		 (1L<<4)
+#define BNX2_TBDR_STATUS_FTQ_ENTRY_CNT			 (1L<<5)
+#define BNX2_TBDR_STATUS_BURST_CNT			 (1L<<6)
+
+#define BNX2_TBDR_CONFIG				0x00005008
+#define BNX2_TBDR_CONFIG_MAX_BDS			 (0xffL<<0)
+#define BNX2_TBDR_CONFIG_SWAP_MODE			 (1L<<8)
+#define BNX2_TBDR_CONFIG_PRIORITY			 (1L<<9)
+#define BNX2_TBDR_CONFIG_CACHE_NEXT_PAGE_PTRS		 (1L<<10)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE			 (0xfL<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_256			 (0L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_512			 (1L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_1K			 (2L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_2K			 (3L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_4K			 (4L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_8K			 (5L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_16K			 (6L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_32K			 (7L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_64K			 (8L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_128K			 (9L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_256K			 (10L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_512K			 (11L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_1M			 (12L<<24)
+
+#define BNX2_TBDR_DEBUG_VECT_PEEK			0x0000500c
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_TBDR_CKSUM_ERROR_STATUS			0x00005010
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TBDR_TBDRQ					0x000053c0
+#define BNX2_TBDR_FTQ_CMD				0x000053f8
+#define BNX2_TBDR_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TBDR_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TBDR_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TBDR_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TBDR_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TBDR_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TBDR_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TBDR_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TBDR_FTQ_CTL				0x000053fc
+#define BNX2_TBDR_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TBDR_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TBDR_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TBDR_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+
+/*
+ *  tbdc definition
+ *  offset: 0x5400
+ */
+#define BNX2_TBDC_COMMAND                               0x5400
+#define BNX2_TBDC_COMMAND_CMD_ENABLED                    (1UL<<0)
+#define BNX2_TBDC_COMMAND_CMD_FLUSH                      (1UL<<1)
+#define BNX2_TBDC_COMMAND_CMD_SOFT_RST                   (1UL<<2)
+#define BNX2_TBDC_COMMAND_CMD_REG_ARB                    (1UL<<3)
+#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR              (1UL<<4)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR           (1UL<<5)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR          (1UL<<6)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR           (1UL<<7)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR          (1UL<<8)
+
+#define BNX2_TBDC_STATUS				0x5404
+#define BNX2_TBDC_STATUS_FREE_CNT                        (0x3fUL<<0)
+
+#define BNX2_TBDC_BD_ADDR                               0x5424
+
+#define BNX2_TBDC_BIDX                                  0x542c
+#define BNX2_TBDC_BDIDX_BDIDX                            (0xffffUL<<0)
+#define BNX2_TBDC_BDIDX_CMD                              (0xffUL<<24)
+
+#define BNX2_TBDC_CID                                   0x5430
+
+#define BNX2_TBDC_CAM_OPCODE                            0x5434
+#define BNX2_TBDC_CAM_OPCODE_OPCODE                      (0x7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH               (0UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE          (1UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE           (2UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE            (4UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ             (5UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE            (6UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ             (7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX                 (1UL<<4)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CID                   (1UL<<5)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD                   (1UL<<6)
+#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED                  (1UL<<7)
+#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS                  (0xffUL<<8)
+
+
+/*
+ *  tdma_reg definition
+ *  offset: 0x5c00
+ */
+#define BNX2_TDMA_COMMAND				0x00005c00
+#define BNX2_TDMA_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_TDMA_COMMAND_MASTER_ABORT			 (1L<<4)
+#define BNX2_TDMA_COMMAND_CS16_ERR			 (1L<<5)
+#define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT		 (1L<<7)
+#define BNX2_TDMA_COMMAND_MASK_CS1			 (1L<<20)
+#define BNX2_TDMA_COMMAND_MASK_CS2			 (1L<<21)
+#define BNX2_TDMA_COMMAND_MASK_CS3			 (1L<<22)
+#define BNX2_TDMA_COMMAND_MASK_CS4			 (1L<<23)
+#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR		 (1L<<24)
+#define BNX2_TDMA_COMMAND_OFIFO_CLR			 (1L<<30)
+#define BNX2_TDMA_COMMAND_IFIFO_CLR			 (1L<<31)
+
+#define BNX2_TDMA_STATUS				0x00005c04
+#define BNX2_TDMA_STATUS_DMA_WAIT			 (1L<<0)
+#define BNX2_TDMA_STATUS_PAYLOAD_WAIT			 (1L<<1)
+#define BNX2_TDMA_STATUS_PATCH_FTQ_WAIT			 (1L<<2)
+#define BNX2_TDMA_STATUS_LOCK_WAIT			 (1L<<3)
+#define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT			 (1L<<16)
+#define BNX2_TDMA_STATUS_BURST_CNT			 (1L<<17)
+#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH		 (0x3fL<<20)
+#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW			 (1L<<30)
+#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW			 (1L<<31)
+
+#define BNX2_TDMA_CONFIG				0x00005c08
+#define BNX2_TDMA_CONFIG_ONE_DMA			 (1L<<0)
+#define BNX2_TDMA_CONFIG_ONE_RECORD			 (1L<<1)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN			 (0x3L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0			 (0L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1			 (1L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2			 (2L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3			 (3L<<2)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ			 (0xfL<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_64			 (0L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_128			 (0x4L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_256			 (0x6L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_512			 (0x8L<<4)
+#define BNX2_TDMA_CONFIG_LINE_SZ			 (0xfL<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_64			 (0L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_128			 (4L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_256			 (6L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_512			 (8L<<8)
+#define BNX2_TDMA_CONFIG_ALIGN_ENA			 (1L<<15)
+#define BNX2_TDMA_CONFIG_CHK_L2_BD			 (1L<<16)
+#define BNX2_TDMA_CONFIG_CMPL_ENTRY			 (1L<<17)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP			 (1L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_3			 (0L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_2			 (1L<<19)
+#define BNX2_TDMA_CONFIG_FIFO_CMP			 (0xfL<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI			 (0x7L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI		 (0L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI		 (1L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI		 (2L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI		 (3L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI		 (4L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI		 (5L<<20)
+#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI			 (1L<<23)
+#define BNX2_TDMA_CONFIG_BYTES_OST_XI			 (0x7L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI		 (0L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI		 (1L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI		 (2L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI		 (3L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI		 (4L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI		 (5L<<24)
+#define BNX2_TDMA_CONFIG_HC_BYPASS_XI			 (1L<<27)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_XI			 (0x7L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI		 (0L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI		 (1L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI		 (2L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI		 (3L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI		 (4L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI		 (5L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI			 (1L<<31)
+
+#define BNX2_TDMA_PAYLOAD_PROD				0x00005c0c
+#define BNX2_TDMA_PAYLOAD_PROD_VALUE			 (0x1fffL<<3)
+
+#define BNX2_TDMA_DBG_WATCHDOG				0x00005c10
+#define BNX2_TDMA_DBG_TRIGGER				0x00005c14
+#define BNX2_TDMA_DMAD_FSM				0x00005c80
+#define BNX2_TDMA_DMAD_FSM_BD_INVLD			 (1L<<0)
+#define BNX2_TDMA_DMAD_FSM_PUSH				 (0xfL<<4)
+#define BNX2_TDMA_DMAD_FSM_ARB_TBDC			 (0x3L<<8)
+#define BNX2_TDMA_DMAD_FSM_ARB_CTX			 (1L<<12)
+#define BNX2_TDMA_DMAD_FSM_DR_INTF			 (1L<<16)
+#define BNX2_TDMA_DMAD_FSM_DMAD				 (0x7L<<20)
+#define BNX2_TDMA_DMAD_FSM_BD				 (0xfL<<24)
+
+#define BNX2_TDMA_DMAD_STATUS				0x00005c84
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_PUSH_ENTRY		 (0x3L<<0)
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_DMAD_ENTRY		 (0x3L<<4)
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_BD_ENTRY		 (0x3L<<8)
+#define BNX2_TDMA_DMAD_STATUS_IFTQ_ENUM			 (0xfL<<12)
+
+#define BNX2_TDMA_DR_INTF_FSM				0x00005c88
+#define BNX2_TDMA_DR_INTF_FSM_L2_COMP			 (0x3L<<0)
+#define BNX2_TDMA_DR_INTF_FSM_TPATQ			 (0x7L<<4)
+#define BNX2_TDMA_DR_INTF_FSM_TPBUF			 (0x3L<<8)
+#define BNX2_TDMA_DR_INTF_FSM_DR_BUF			 (0x7L<<12)
+#define BNX2_TDMA_DR_INTF_FSM_DMAD			 (0x7L<<16)
+
+#define BNX2_TDMA_DR_INTF_STATUS			0x00005c8c
+#define BNX2_TDMA_DR_INTF_STATUS_HOLE_PHASE		 (0x7L<<0)
+#define BNX2_TDMA_DR_INTF_STATUS_DATA_AVAIL		 (0x3L<<4)
+#define BNX2_TDMA_DR_INTF_STATUS_SHIFT_ADDR		 (0x7L<<8)
+#define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR		 (0xfL<<12)
+#define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT		 (0x7L<<16)
+
+#define BNX2_TDMA_PUSH_FSM				0x00005c90
+#define BNX2_TDMA_BD_IF_DEBUG				0x00005c94
+#define BNX2_TDMA_DMAD_IF_DEBUG				0x00005c98
+#define BNX2_TDMA_CTX_IF_DEBUG				0x00005c9c
+#define BNX2_TDMA_TPBUF_IF_DEBUG			0x00005ca0
+#define BNX2_TDMA_DR_IF_DEBUG				0x00005ca4
+#define BNX2_TDMA_TPATQ_IF_DEBUG			0x00005ca8
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM			0x00005cac
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED		 (0xffffL<<0)
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TDMA_TDMA_PCIE_CKSUM			0x00005cb0
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED		 (0xffffL<<0)
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TDMA_TDMAQ					0x00005fc0
+#define BNX2_TDMA_FTQ_CMD				0x00005ff8
+#define BNX2_TDMA_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TDMA_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TDMA_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TDMA_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TDMA_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TDMA_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TDMA_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TDMA_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TDMA_FTQ_CTL				0x00005ffc
+#define BNX2_TDMA_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TDMA_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TDMA_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TDMA_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TDMA_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+
+
+/*
+ *  hc_reg definition
+ *  offset: 0x6800
+ */
+#define BNX2_HC_COMMAND					0x00006800
+#define BNX2_HC_COMMAND_ENABLE				 (1L<<0)
+#define BNX2_HC_COMMAND_SKIP_ABORT			 (1L<<4)
+#define BNX2_HC_COMMAND_COAL_NOW			 (1L<<16)
+#define BNX2_HC_COMMAND_COAL_NOW_WO_INT			 (1L<<17)
+#define BNX2_HC_COMMAND_STATS_NOW			 (1L<<18)
+#define BNX2_HC_COMMAND_FORCE_INT			 (0x3L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_NULL			 (0L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_HIGH			 (1L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_LOW			 (2L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_FREE			 (3L<<19)
+#define BNX2_HC_COMMAND_CLR_STAT_NOW			 (1L<<21)
+#define BNX2_HC_COMMAND_MAIN_PWR_INT			 (1L<<22)
+#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT		 (1L<<27)
+
+#define BNX2_HC_STATUS					0x00006804
+#define BNX2_HC_STATUS_MASTER_ABORT			 (1L<<0)
+#define BNX2_HC_STATUS_PARITY_ERROR_STATE		 (1L<<1)
+#define BNX2_HC_STATUS_PCI_CLK_CNT_STAT			 (1L<<16)
+#define BNX2_HC_STATUS_CORE_CLK_CNT_STAT		 (1L<<17)
+#define BNX2_HC_STATUS_NUM_STATUS_BLOCKS_STAT		 (1L<<18)
+#define BNX2_HC_STATUS_NUM_INT_GEN_STAT			 (1L<<19)
+#define BNX2_HC_STATUS_NUM_INT_MBOX_WR_STAT		 (1L<<20)
+#define BNX2_HC_STATUS_CORE_CLKS_TO_HW_INTACK_STAT	 (1L<<23)
+#define BNX2_HC_STATUS_CORE_CLKS_TO_SW_INTACK_STAT	 (1L<<24)
+#define BNX2_HC_STATUS_CORE_CLKS_DURING_SW_INTACK_STAT	 (1L<<25)
+
+#define BNX2_HC_CONFIG					0x00006808
+#define BNX2_HC_CONFIG_COLLECT_STATS			 (1L<<0)
+#define BNX2_HC_CONFIG_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_CONFIG_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_CONFIG_COM_TMR_MODE			 (1L<<3)
+#define BNX2_HC_CONFIG_CMD_TMR_MODE			 (1L<<4)
+#define BNX2_HC_CONFIG_STATISTIC_PRIORITY		 (1L<<5)
+#define BNX2_HC_CONFIG_STATUS_PRIORITY			 (1L<<6)
+#define BNX2_HC_CONFIG_STAT_MEM_ADDR			 (0xffL<<8)
+#define BNX2_HC_CONFIG_PER_MODE				 (1L<<16)
+#define BNX2_HC_CONFIG_ONE_SHOT				 (1L<<17)
+#define BNX2_HC_CONFIG_USE_INT_PARAM			 (1L<<18)
+#define BNX2_HC_CONFIG_SET_MASK_AT_RD			 (1L<<19)
+#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT		 (0xfL<<20)
+#define BNX2_HC_CONFIG_SB_ADDR_INC			 (0x7L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_64B			 (0L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_128B			 (1L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_256B			 (2L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_512B			 (3L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B		 (4L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B		 (5L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B		 (6L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B		 (7L<<24)
+#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR		 (1L<<29)
+#define BNX2_HC_CONFIG_UNMASK_ALL			 (1L<<30)
+#define BNX2_HC_CONFIG_TX_SEL				 (1L<<31)
+
+#define BNX2_HC_ATTN_BITS_ENABLE			0x0000680c
+#define BNX2_HC_STATUS_ADDR_L				0x00006810
+#define BNX2_HC_STATUS_ADDR_H				0x00006814
+#define BNX2_HC_STATISTICS_ADDR_L			0x00006818
+#define BNX2_HC_STATISTICS_ADDR_H			0x0000681c
+#define BNX2_HC_TX_QUICK_CONS_TRIP			0x00006820
+#define BNX2_HC_TX_QUICK_CONS_TRIP_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_INT			 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP				0x00006824
+#define BNX2_HC_COMP_PROD_TRIP_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP			0x00006828
+#define BNX2_HC_RX_QUICK_CONS_TRIP_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS				0x0000682c
+#define BNX2_HC_RX_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS				0x00006830
+#define BNX2_HC_TX_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS				0x00006834
+#define BNX2_HC_COM_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS				0x00006838
+#define BNX2_HC_CMD_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS				0x0000683c
+#define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_STAT_COLLECT_TICKS			0x00006840
+#define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS	 (0xffL<<4)
+
+#define BNX2_HC_STATS_TICKS				0x00006844
+#define BNX2_HC_STATS_TICKS_HC_STAT_TICKS		 (0xffffL<<8)
+
+#define BNX2_HC_STATS_INTERRUPT_STATUS			0x00006848
+#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS	 (0x1ffL<<0)
+#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS	 (0x1ffL<<16)
+
+#define BNX2_HC_STAT_MEM_DATA				0x0000684c
+#define BNX2_HC_STAT_GEN_SEL_0				0x00006850
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0	 (0L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1	 (1L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2	 (2L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3	 (3L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4	 (4L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5	 (5L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT6	 (6L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT7	 (7L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT8	 (8L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT9	 (9L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT10	 (10L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT11	 (11L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0	 (12L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1	 (13L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2	 (14L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3	 (15L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4	 (16L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5	 (17L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT6	 (18L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT7	 (19L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT0	 (20L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT1	 (21L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT2	 (22L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT3	 (23L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT4	 (24L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT5	 (25L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT6	 (26L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT7	 (27L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT8	 (28L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT9	 (29L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT10	 (30L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT11	 (31L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT0	 (32L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT1	 (33L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT2	 (34L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT3	 (35L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT0	 (36L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT1	 (37L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT2	 (38L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT3	 (39L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT4	 (40L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT5	 (41L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT6	 (42L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT7	 (43L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT0	 (44L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT1	 (45L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT2	 (46L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT3	 (47L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT4	 (48L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT5	 (49L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT6	 (50L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT7	 (51L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_PCI_CLK_CNT	 (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CORE_CLK_CNT	 (53L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS	 (54L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN	 (55L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR	 (56L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK	 (59L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK	 (60L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK	 (61L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_CMD_CNT	 (62L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_SLOT_CNT	 (63L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_CMD_CNT	 (64L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_SLOT_CNT	 (65L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT	 (66L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT	 (67L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT	 (68L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT	 (69L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT	 (70L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT	 (71L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT	 (72L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT	 (73L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT	 (74L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT	 (75L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT	 (76L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT	 (77L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT	 (78L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT	 (79L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT	 (80L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT	 (81L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT	 (82L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT	 (83L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT	 (84L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_TRANSFERS_CNT	 (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_DELAY_PCI_CLKS_CNT	 (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_TRANSFERS_CNT	 (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_DELAY_PCI_CLKS_CNT	 (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_RETRY_AFTER_DATA_CNT	 (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_TRANSFERS_CNT	 (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_DELAY_PCI_CLKS_CNT	 (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_TRANSFERS_CNT	 (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_DELAY_PCI_CLKS_CNT	 (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_RETRY_AFTER_DATA_CNT	 (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_WR_CNT64	 (95L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_RD_CNT64	 (96L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_ACC_STALL_CLKS	 (97L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_LOCK_STALL_CLKS	 (98L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS_STAT	 (99L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS64_STAT	 (100L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_PCI_STALL_STAT	 (101L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_FTQ_ENTRY_CNT	 (102L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_BURST_CNT	 (103L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_FTQ_ENTRY_CNT	 (104L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_BURST_CNT	 (105L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_FTQ_ENTRY_CNT	 (106L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_BURST_CNT	 (107L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUP_MATCH_CNT	 (108L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_POLL_PASS_CNT	 (109L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR1_CNT	 (110L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR2_CNT	 (111L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR3_CNT	 (112L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR4_CNT	 (113L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR5_CNT	 (114L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT0	 (115L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT1	 (116L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT2	 (117L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT3	 (118L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT4	 (119L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT5	 (120L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC1_MISS	 (121L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC2_MISS	 (122L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_BURST_CNT	 (127L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI	 (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI	 (57L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI	 (58L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI	 (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI	 (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI	 (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI	 (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI	 (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI	 (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI	 (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI	 (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI	 (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI	 (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI	 (123L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI	 (124L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI	 (125L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI	 (126L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI	 (128L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI	 (129L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI	 (130L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI	 (131L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI	 (132L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI	 (133L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI	 (134L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI	 (135L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI	 (136L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI	 (137L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI	 (138L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI	 (139L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI	 (140L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI	 (141L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI	 (142L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI	 (143L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI	 (144L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI	 (145L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI	 (146L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI	 (147L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI	 (148L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI	 (149L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI	 (150L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI	 (151L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI	 (152L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI	 (153L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI	 (154L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI	 (155L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI	 (156L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI	 (157L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI	 (158L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI	 (159L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI	 (160L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI	 (161L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI	 (162L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI	 (163L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI	 (164L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI	 (165L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI	 (166L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI	 (167L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI	 (168L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI	 (169L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI	 (170L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI	 (171L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI	 (172L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI	 (173L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI	 (174L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI	 (175L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI	 (176L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI	 (177L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI	 (178L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_1				0x00006854
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_2				0x00006858
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_3				0x0000685c
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_STAT0				0x00006888
+#define BNX2_HC_STAT_GEN_STAT1				0x0000688c
+#define BNX2_HC_STAT_GEN_STAT2				0x00006890
+#define BNX2_HC_STAT_GEN_STAT3				0x00006894
+#define BNX2_HC_STAT_GEN_STAT4				0x00006898
+#define BNX2_HC_STAT_GEN_STAT5				0x0000689c
+#define BNX2_HC_STAT_GEN_STAT6				0x000068a0
+#define BNX2_HC_STAT_GEN_STAT7				0x000068a4
+#define BNX2_HC_STAT_GEN_STAT8				0x000068a8
+#define BNX2_HC_STAT_GEN_STAT9				0x000068ac
+#define BNX2_HC_STAT_GEN_STAT10				0x000068b0
+#define BNX2_HC_STAT_GEN_STAT11				0x000068b4
+#define BNX2_HC_STAT_GEN_STAT12				0x000068b8
+#define BNX2_HC_STAT_GEN_STAT13				0x000068bc
+#define BNX2_HC_STAT_GEN_STAT14				0x000068c0
+#define BNX2_HC_STAT_GEN_STAT15				0x000068c4
+#define BNX2_HC_STAT_GEN_STAT_AC0			0x000068c8
+#define BNX2_HC_STAT_GEN_STAT_AC1			0x000068cc
+#define BNX2_HC_STAT_GEN_STAT_AC2			0x000068d0
+#define BNX2_HC_STAT_GEN_STAT_AC3			0x000068d4
+#define BNX2_HC_STAT_GEN_STAT_AC4			0x000068d8
+#define BNX2_HC_STAT_GEN_STAT_AC5			0x000068dc
+#define BNX2_HC_STAT_GEN_STAT_AC6			0x000068e0
+#define BNX2_HC_STAT_GEN_STAT_AC7			0x000068e4
+#define BNX2_HC_STAT_GEN_STAT_AC8			0x000068e8
+#define BNX2_HC_STAT_GEN_STAT_AC9			0x000068ec
+#define BNX2_HC_STAT_GEN_STAT_AC10			0x000068f0
+#define BNX2_HC_STAT_GEN_STAT_AC11			0x000068f4
+#define BNX2_HC_STAT_GEN_STAT_AC12			0x000068f8
+#define BNX2_HC_STAT_GEN_STAT_AC13			0x000068fc
+#define BNX2_HC_STAT_GEN_STAT_AC14			0x00006900
+#define BNX2_HC_STAT_GEN_STAT_AC15			0x00006904
+#define BNX2_HC_STAT_GEN_STAT_AC			0x000068c8
+#define BNX2_HC_VIS					0x00006908
+#define BNX2_HC_VIS_STAT_BUILD_STATE			 (0xfL<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE		 (0L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_START		 (1L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_REQUEST		 (2L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE64		 (3L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE32		 (4L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE_DONE	 (5L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_DMA		 (6L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_CONTROL	 (7L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_LOW		 (8L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_HIGH		 (9L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_DATA		 (10L<<0)
+#define BNX2_HC_VIS_DMA_STAT_STATE			 (0xfL<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_IDLE			 (0L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_PARAM		 (1L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_DMA		 (2L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP		 (3L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_COMP			 (4L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_PARAM	 (5L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_DMA	 (6L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_1		 (7L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_2		 (8L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WAIT			 (9L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_ABORT		 (15L<<8)
+#define BNX2_HC_VIS_DMA_MSI_STATE			 (0x7L<<12)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE		 (0x3L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_IDLE		 (0L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_COUNT	 (1L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_START	 (2L<<15)
+
+#define BNX2_HC_VIS_1					0x0000690c
+#define BNX2_HC_VIS_1_HW_INTACK_STATE			 (1L<<4)
+#define BNX2_HC_VIS_1_HW_INTACK_STATE_IDLE		 (0L<<4)
+#define BNX2_HC_VIS_1_HW_INTACK_STATE_COUNT		 (1L<<4)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE			 (1L<<5)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE_IDLE		 (0L<<5)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE_COUNT		 (1L<<5)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE		 (1L<<6)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_IDLE	 (0L<<6)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_COUNT	 (1L<<6)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE		 (1L<<7)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_IDLE		 (0L<<7)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_COUNT		 (1L<<7)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE			 (0xfL<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_IDLE		 (0L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_DMA		 (1L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_UPDATE		 (2L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_ASSIGN		 (3L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_WAIT		 (4L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_UPDATE	 (5L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_ASSIGN	 (6L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_WAIT		 (7L<<17)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE			 (0x3L<<21)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_NORMAL		 (0L<<21)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_CLEAR		 (1L<<21)
+#define BNX2_HC_VIS_1_INT_GEN_STATE			 (1L<<23)
+#define BNX2_HC_VIS_1_INT_GEN_STATE_DLE			 (0L<<23)
+#define BNX2_HC_VIS_1_INT_GEN_STATE_NTERRUPT		 (1L<<23)
+#define BNX2_HC_VIS_1_STAT_CHAN_ID			 (0x7L<<24)
+#define BNX2_HC_VIS_1_INT_B				 (1L<<27)
+
+#define BNX2_HC_DEBUG_VECT_PEEK				0x00006910
+#define BNX2_HC_DEBUG_VECT_PEEK_1_VALUE			 (0x7ffL<<0)
+#define BNX2_HC_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_HC_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_VALUE			 (0x7ffL<<16)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_HC_COALESCE_NOW				0x00006914
+#define BNX2_HC_COALESCE_NOW_COAL_NOW			 (0x1ffL<<1)
+#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT		 (0x1ffL<<11)
+#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT		 (0x1ffL<<21)
+
+#define BNX2_HC_MSIX_BIT_VECTOR				0x00006918
+#define BNX2_HC_MSIX_BIT_VECTOR_VAL			 (0x1ffL<<0)
+
+#define BNX2_HC_SB_CONFIG_1				0x00006a00
+#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_1_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_1_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1			0x00006a04
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_1			0x00006a08
+#define BNX2_HC_COMP_PROD_TRIP_1_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_1_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1			0x00006a0c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_1				0x00006a10
+#define BNX2_HC_RX_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_1				0x00006a14
+#define BNX2_HC_TX_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_1				0x00006a18
+#define BNX2_HC_COM_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_1				0x00006a1c
+#define BNX2_HC_CMD_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_1			0x00006a20
+#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_2				0x00006a24
+#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_2_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_2_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2			0x00006a28
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_2			0x00006a2c
+#define BNX2_HC_COMP_PROD_TRIP_2_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_2_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2			0x00006a30
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_2				0x00006a34
+#define BNX2_HC_RX_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_2				0x00006a38
+#define BNX2_HC_TX_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_2				0x00006a3c
+#define BNX2_HC_COM_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_2				0x00006a40
+#define BNX2_HC_CMD_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_2			0x00006a44
+#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_3				0x00006a48
+#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_3_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_3_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3			0x00006a4c
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_3			0x00006a50
+#define BNX2_HC_COMP_PROD_TRIP_3_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_3_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3			0x00006a54
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_3				0x00006a58
+#define BNX2_HC_RX_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_3				0x00006a5c
+#define BNX2_HC_TX_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_3				0x00006a60
+#define BNX2_HC_COM_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_3				0x00006a64
+#define BNX2_HC_CMD_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_3			0x00006a68
+#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_4				0x00006a6c
+#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_4_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_4_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4			0x00006a70
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_4			0x00006a74
+#define BNX2_HC_COMP_PROD_TRIP_4_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_4_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4			0x00006a78
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_4				0x00006a7c
+#define BNX2_HC_RX_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_4				0x00006a80
+#define BNX2_HC_TX_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_4				0x00006a84
+#define BNX2_HC_COM_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_4				0x00006a88
+#define BNX2_HC_CMD_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_4			0x00006a8c
+#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_5				0x00006a90
+#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_5_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_5_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5			0x00006a94
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_5			0x00006a98
+#define BNX2_HC_COMP_PROD_TRIP_5_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_5_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5			0x00006a9c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_5				0x00006aa0
+#define BNX2_HC_RX_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_5				0x00006aa4
+#define BNX2_HC_TX_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_5				0x00006aa8
+#define BNX2_HC_COM_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_5				0x00006aac
+#define BNX2_HC_CMD_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_5			0x00006ab0
+#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_6				0x00006ab4
+#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_6_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_6_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6			0x00006ab8
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_6			0x00006abc
+#define BNX2_HC_COMP_PROD_TRIP_6_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_6_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6			0x00006ac0
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_6				0x00006ac4
+#define BNX2_HC_RX_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_6				0x00006ac8
+#define BNX2_HC_TX_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_6				0x00006acc
+#define BNX2_HC_COM_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_6				0x00006ad0
+#define BNX2_HC_CMD_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_6			0x00006ad4
+#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_7				0x00006ad8
+#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_7_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_7_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7			0x00006adc
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_7			0x00006ae0
+#define BNX2_HC_COMP_PROD_TRIP_7_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_7_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7			0x00006ae4
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_7				0x00006ae8
+#define BNX2_HC_RX_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_7				0x00006aec
+#define BNX2_HC_TX_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_7				0x00006af0
+#define BNX2_HC_COM_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_7				0x00006af4
+#define BNX2_HC_CMD_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_7			0x00006af8
+#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_8				0x00006afc
+#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_8_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_8_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8			0x00006b00
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_8			0x00006b04
+#define BNX2_HC_COMP_PROD_TRIP_8_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_8_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8			0x00006b08
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_8				0x00006b0c
+#define BNX2_HC_RX_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_8				0x00006b10
+#define BNX2_HC_TX_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_8				0x00006b14
+#define BNX2_HC_COM_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_8				0x00006b18
+#define BNX2_HC_CMD_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_8			0x00006b1c
+#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_SIZE	(BNX2_HC_SB_CONFIG_2 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_COMP_PROD_TRIP_OFF	(BNX2_HC_COMP_PROD_TRIP_1 -	\
+					 BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_COM_TICKS_OFF	(BNX2_HC_COM_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_CMD_TICKS_OFF	(BNX2_HC_CMD_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_OFF	(BNX2_HC_TX_QUICK_CONS_TRIP_1 -	\
+					 BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_TX_TICKS_OFF	(BNX2_HC_TX_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_OFF	(BNX2_HC_RX_QUICK_CONS_TRIP_1 - \
+					 BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_RX_TICKS_OFF	(BNX2_HC_RX_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+
+
+/*
+ *  txp_reg definition
+ *  offset: 0x40000
+ */
+#define BNX2_TXP_CPU_MODE				0x00045000
+#define BNX2_TXP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_TXP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_TXP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_TXP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_TXP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_TXP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_TXP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_TXP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_TXP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_TXP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_TXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_TXP_CPU_STATE				0x00045004
+#define BNX2_TXP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_TXP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_TXP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_TXP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_TXP_CPU_STATE_INTERRUPT			 (1L<<12)
+#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_TXP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_TXP_CPU_EVENT_MASK				0x00045008
+#define BNX2_TXP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_TXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_TXP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_TXP_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_TXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_TXP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_TXP_CPU_PROGRAM_COUNTER			0x0004501c
+#define BNX2_TXP_CPU_INSTRUCTION			0x00045020
+#define BNX2_TXP_CPU_DATA_ACCESS			0x00045024
+#define BNX2_TXP_CPU_INTERRUPT_ENABLE			0x00045028
+#define BNX2_TXP_CPU_INTERRUPT_VECTOR			0x0004502c
+#define BNX2_TXP_CPU_INTERRUPT_SAVED_PC			0x00045030
+#define BNX2_TXP_CPU_HW_BREAKPOINT			0x00045034
+#define BNX2_TXP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_TXP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK			0x00045038
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR			0x00045048
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_TXP_CPU_REG_FILE				0x00045200
+#define BNX2_TXP_TXPQ					0x000453c0
+#define BNX2_TXP_FTQ_CMD				0x000453f8
+#define BNX2_TXP_FTQ_CMD_OFFSET				 (0x3ffL<<0)
+#define BNX2_TXP_FTQ_CMD_WR_TOP				 (1L<<10)
+#define BNX2_TXP_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TXP_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TXP_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TXP_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TXP_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TXP_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TXP_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TXP_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TXP_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TXP_FTQ_CTL				0x000453fc
+#define BNX2_TXP_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TXP_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TXP_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TXP_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_TXP_SCRATCH				0x00060000
+
+
+/*
+ *  tpat_reg definition
+ *  offset: 0x80000
+ */
+#define BNX2_TPAT_CPU_MODE				0x00085000
+#define BNX2_TPAT_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_TPAT_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_TPAT_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_TPAT_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_TPAT_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_TPAT_CPU_MODE_INTERRUPT_ENA		 (1L<<7)
+#define BNX2_TPAT_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_TPAT_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_TPAT_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_TPAT_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_TPAT_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_TPAT_CPU_STATE				0x00085004
+#define BNX2_TPAT_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_TPAT_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED	 (1L<<5)
+#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_TPAT_CPU_STATE_ALIGN_HALTED		 (1L<<7)
+#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_TPAT_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_TPAT_CPU_STATE_INTERRUPT			 (1L<<12)
+#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_TPAT_CPU_STATE_BLOCKED_READ		 (1L<<31)
+
+#define BNX2_TPAT_CPU_EVENT_MASK			0x00085008
+#define BNX2_TPAT_CPU_EVENT_MASK_BREAKPOINT_MASK	 (1L<<0)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_TPAT_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_TPAT_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_TPAT_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_TPAT_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_TPAT_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_TPAT_CPU_PROGRAM_COUNTER			0x0008501c
+#define BNX2_TPAT_CPU_INSTRUCTION			0x00085020
+#define BNX2_TPAT_CPU_DATA_ACCESS			0x00085024
+#define BNX2_TPAT_CPU_INTERRUPT_ENABLE			0x00085028
+#define BNX2_TPAT_CPU_INTERRUPT_VECTOR			0x0008502c
+#define BNX2_TPAT_CPU_INTERRUPT_SAVED_PC		0x00085030
+#define BNX2_TPAT_CPU_HW_BREAKPOINT			0x00085034
+#define BNX2_TPAT_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_TPAT_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK			0x00085038
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR			0x00085048
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_JUMP	 (0L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_TPAT_CPU_REG_FILE				0x00085200
+#define BNX2_TPAT_TPATQ					0x000853c0
+#define BNX2_TPAT_FTQ_CMD				0x000853f8
+#define BNX2_TPAT_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TPAT_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TPAT_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TPAT_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TPAT_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TPAT_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TPAT_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TPAT_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TPAT_FTQ_CTL				0x000853fc
+#define BNX2_TPAT_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TPAT_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TPAT_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TPAT_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TPAT_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_TPAT_SCRATCH				0x000a0000
+
+
+/*
+ *  rxp_reg definition
+ *  offset: 0xc0000
+ */
+#define BNX2_RXP_CPU_MODE				0x000c5000
+#define BNX2_RXP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_RXP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_RXP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_RXP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_RXP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_RXP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_RXP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_RXP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_RXP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_RXP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_RXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_RXP_CPU_STATE				0x000c5004
+#define BNX2_RXP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_RXP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_RXP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_RXP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_RXP_CPU_STATE_INTERRUPT			 (1L<<12)
+#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_RXP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_RXP_CPU_EVENT_MASK				0x000c5008
+#define BNX2_RXP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_RXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_RXP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_RXP_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_RXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_RXP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_RXP_CPU_PROGRAM_COUNTER			0x000c501c
+#define BNX2_RXP_CPU_INSTRUCTION			0x000c5020
+#define BNX2_RXP_CPU_DATA_ACCESS			0x000c5024
+#define BNX2_RXP_CPU_INTERRUPT_ENABLE			0x000c5028
+#define BNX2_RXP_CPU_INTERRUPT_VECTOR			0x000c502c
+#define BNX2_RXP_CPU_INTERRUPT_SAVED_PC			0x000c5030
+#define BNX2_RXP_CPU_HW_BREAKPOINT			0x000c5034
+#define BNX2_RXP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_RXP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK			0x000c5038
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR			0x000c5048
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_RXP_CPU_REG_FILE				0x000c5200
+#define BNX2_RXP_PFE_PFE_CTL				0x000c537c
+#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE			 (0xfL<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0			 (0L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1			 (1L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2			 (2L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3			 (3L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4			 (4L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5			 (5L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6			 (6L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7			 (7L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8			 (8L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9			 (9L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT			 (0xfL<<12)
+#define BNX2_RXP_PFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_RXP_RXPCQ					0x000c5380
+#define BNX2_RXP_CFTQ_CMD				0x000c53b8
+#define BNX2_RXP_CFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RXP_CFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RXP_CFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RXP_CFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RXP_CFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RXP_CFTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_RXP_CFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RXP_CFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RXP_CFTQ_CTL				0x000c53bc
+#define BNX2_RXP_CFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RXP_CFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RXP_CFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RXP_CFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RXP_CFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RXP_RXPQ					0x000c53c0
+#define BNX2_RXP_FTQ_CMD				0x000c53f8
+#define BNX2_RXP_FTQ_CMD_OFFSET				 (0x3ffL<<0)
+#define BNX2_RXP_FTQ_CMD_WR_TOP				 (1L<<10)
+#define BNX2_RXP_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RXP_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RXP_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RXP_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RXP_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RXP_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RXP_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_RXP_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_RXP_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RXP_FTQ_CTL				0x000c53fc
+#define BNX2_RXP_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RXP_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RXP_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RXP_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RXP_SCRATCH				0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD			 0x000e0024
+#define BNX2_RXP_SCRATCH_RSS_TBL_SZ			 0x000e0038
+#define BNX2_RXP_SCRATCH_RSS_TBL			 0x000e003c
+#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES		 128
+
+
+/*
+ *  com_reg definition
+ *  offset: 0x100000
+ */
+#define BNX2_COM_CKSUM_ERROR_STATUS			0x00100000
+#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_COM_CPU_MODE				0x00105000
+#define BNX2_COM_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_COM_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_COM_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_COM_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_COM_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_COM_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_COM_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_COM_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_COM_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_COM_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_COM_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_COM_CPU_STATE				0x00105004
+#define BNX2_COM_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_COM_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_COM_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_COM_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_COM_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_COM_CPU_STATE_INTERRUPT			 (1L<<12)
+#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_COM_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_COM_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_COM_CPU_EVENT_MASK				0x00105008
+#define BNX2_COM_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_COM_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_COM_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_COM_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_COM_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_COM_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_COM_CPU_PROGRAM_COUNTER			0x0010501c
+#define BNX2_COM_CPU_INSTRUCTION			0x00105020
+#define BNX2_COM_CPU_DATA_ACCESS			0x00105024
+#define BNX2_COM_CPU_INTERRUPT_ENABLE			0x00105028
+#define BNX2_COM_CPU_INTERRUPT_VECTOR			0x0010502c
+#define BNX2_COM_CPU_INTERRUPT_SAVED_PC			0x00105030
+#define BNX2_COM_CPU_HW_BREAKPOINT			0x00105034
+#define BNX2_COM_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_COM_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK			0x00105038
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR			0x00105048
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_COM_CPU_REG_FILE				0x00105200
+#define BNX2_COM_COMTQ_PFE_PFE_CTL			0x001052bc
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT	 (1L<<0)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE		 (0xfL<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET		 (0x1ffL<<16)
+
+#define BNX2_COM_COMXQ					0x00105340
+#define BNX2_COM_COMXQ_FTQ_CMD				0x00105378
+#define BNX2_COM_COMXQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_SFT_RESET		 (1L<<25)
+#define BNX2_COM_COMXQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_COM_COMXQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_COM_COMXQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_COM_COMXQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_COM_COMXQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_COM_COMXQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_COM_COMXQ_FTQ_CTL				0x0010537c
+#define BNX2_COM_COMXQ_FTQ_CTL_INTERVENE		 (1L<<0)
+#define BNX2_COM_COMXQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_COM_COMXQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH		 (0x3ffL<<12)
+#define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH		 (0x3ffL<<22)
+
+#define BNX2_COM_COMTQ					0x00105380
+#define BNX2_COM_COMTQ_FTQ_CMD				0x001053b8
+#define BNX2_COM_COMTQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_SFT_RESET		 (1L<<25)
+#define BNX2_COM_COMTQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_COM_COMTQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_COM_COMTQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_COM_COMTQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_COM_COMTQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_COM_COMTQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_COM_COMTQ_FTQ_CTL				0x001053bc
+#define BNX2_COM_COMTQ_FTQ_CTL_INTERVENE		 (1L<<0)
+#define BNX2_COM_COMTQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_COM_COMTQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH		 (0x3ffL<<12)
+#define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH		 (0x3ffL<<22)
+
+#define BNX2_COM_COMQ					0x001053c0
+#define BNX2_COM_COMQ_FTQ_CMD				0x001053f8
+#define BNX2_COM_COMQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_COM_COMQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_COM_COMQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_COM_COMQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_COM_COMQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_COM_COMQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_COM_COMQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_COM_COMQ_FTQ_CTL				0x001053fc
+#define BNX2_COM_COMQ_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_COM_COMQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_COM_COMQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_COM_COMQ_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_COM_COMQ_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_COM_SCRATCH				0x00120000
+
+#define BNX2_FW_RX_LOW_LATENCY				 0x00120058
+#define BNX2_FW_RX_DROP_COUNT				 0x00120084
+
+
+/*
+ *  cp_reg definition
+ *  offset: 0x180000
+ */
+#define BNX2_CP_CKSUM_ERROR_STATUS			0x00180000
+#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_CP_CPU_MODE				0x00185000
+#define BNX2_CP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_CP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_CP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_CP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_CP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_CP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_CP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_CP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_CP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_CP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_CP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_CP_CPU_STATE				0x00185004
+#define BNX2_CP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_CP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_CP_CPU_STATE_BAD_PC_HALTED			 (1L<<6)
+#define BNX2_CP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_CP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_CP_CPU_STATE_INTERRUPT			 (1L<<12)
+#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_CP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_CP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_CP_CPU_EVENT_MASK				0x00185008
+#define BNX2_CP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_CP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_CP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_CP_CPU_EVENT_MASK_SOFT_HALTED_MASK		 (1L<<10)
+#define BNX2_CP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_CP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_CP_CPU_PROGRAM_COUNTER			0x0018501c
+#define BNX2_CP_CPU_INSTRUCTION				0x00185020
+#define BNX2_CP_CPU_DATA_ACCESS				0x00185024
+#define BNX2_CP_CPU_INTERRUPT_ENABLE			0x00185028
+#define BNX2_CP_CPU_INTERRUPT_VECTOR			0x0018502c
+#define BNX2_CP_CPU_INTERRUPT_SAVED_PC			0x00185030
+#define BNX2_CP_CPU_HW_BREAKPOINT			0x00185034
+#define BNX2_CP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_CP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK			0x00185038
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR			0x00185048
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_CP_CPU_REG_FILE				0x00185200
+#define BNX2_CP_CPQ_PFE_PFE_CTL				0x001853bc
+#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE		 (0xfL<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_CP_CPQ					0x001853c0
+#define BNX2_CP_CPQ_FTQ_CMD				0x001853f8
+#define BNX2_CP_CPQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_CP_CPQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_CP_CPQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_CP_CPQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_CP_CPQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_CP_CPQ_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_CP_CPQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_CP_CPQ_FTQ_CTL				0x001853fc
+#define BNX2_CP_CPQ_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_CP_CPQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_CP_CPQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_CP_CPQ_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_CP_CPQ_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_CP_SCRATCH					0x001a0000
+
+#define BNX2_FW_MAX_ISCSI_CONN				 0x001a0080
+
+
+/*
+ *  mcp_reg definition
+ *  offset: 0x140000
+ */
+#define BNX2_MCP_MCP_CONTROL				0x00140080
+#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL			 (1L<<30)
+#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE		 (1L<<31)
+
+#define BNX2_MCP_MCP_ATTENTION_STATUS			0x00140084
+#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL	 (1L<<29)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT	 (1L<<30)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT		 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL			0x00140088
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE	 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS			0x0014008c
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD	 (0x7ffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID		 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT				0x00140090
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT	 (0x3fffffffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC	 (1L<<30)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET	 (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_RESET				0x00140094
+#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET		 (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_CONTROL			0x00140098
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT	 (0xfffffffL<<0)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN		 (1L<<29)
+#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE	 (1L<<30)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE	 (1L<<31)
+
+#define BNX2_MCP_ACCESS_LOCK				0x0014009c
+#define BNX2_MCP_ACCESS_LOCK_LOCK			 (1L<<31)
+
+#define BNX2_MCP_TOE_ID					0x001400a0
+#define BNX2_MCP_TOE_ID_FUNCTION_ID			 (1L<<31)
+
+#define BNX2_MCP_MAILBOX_CFG				0x001400a4
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET		 (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE		 (0xfffL<<20)
+
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC			0x001400a8
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET	 (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE	 (0xfffL<<20)
+
+#define BNX2_MCP_MCP_DOORBELL				0x001400ac
+#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL		 (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL			0x001400b0
+#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL	 (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC		0x001400b4
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL	 (1L<<31)
+
+#define BNX2_MCP_CPU_MODE				0x00145000
+#define BNX2_MCP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_MCP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_MCP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_MCP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_MCP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_MCP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_MCP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_MCP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_MCP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_MCP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_MCP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_MCP_CPU_STATE				0x00145004
+#define BNX2_MCP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_MCP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_MCP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_MCP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_MCP_CPU_STATE_INTERRUPT			 (1L<<12)
+#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_MCP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_MCP_CPU_EVENT_MASK				0x00145008
+#define BNX2_MCP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_MCP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_MCP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_MCP_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_MCP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_MCP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_MCP_CPU_PROGRAM_COUNTER			0x0014501c
+#define BNX2_MCP_CPU_INSTRUCTION			0x00145020
+#define BNX2_MCP_CPU_DATA_ACCESS			0x00145024
+#define BNX2_MCP_CPU_INTERRUPT_ENABLE			0x00145028
+#define BNX2_MCP_CPU_INTERRUPT_VECTOR			0x0014502c
+#define BNX2_MCP_CPU_INTERRUPT_SAVED_PC			0x00145030
+#define BNX2_MCP_CPU_HW_BREAKPOINT			0x00145034
+#define BNX2_MCP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_MCP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK			0x00145038
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR			0x00145048
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_MCP_CPU_REG_FILE				0x00145200
+#define BNX2_MCP_MCPQ					0x001453c0
+#define BNX2_MCP_MCPQ_FTQ_CMD				0x001453f8
+#define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_MCP_MCPQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_MCP_MCPQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_MCP_MCPQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_MCP_MCPQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_MCP_MCPQ_FTQ_CTL				0x001453fc
+#define BNX2_MCP_MCPQ_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_MCP_MCPQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_MCP_MCPQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_MCP_MCPQ_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_MCP_MCPQ_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_MCP_ROM					0x00150000
+#define BNX2_MCP_SCRATCH				0x00160000
+#define BNX2_MCP_STATE_P1				 0x0016f9c8
+#define BNX2_MCP_STATE_P0				 0x0016fdc8
+#define BNX2_MCP_STATE_P1_5708				 0x001699c8
+#define BNX2_MCP_STATE_P0_5708				 0x00169dc8
+
+#define BNX2_SHM_HDR_SIGNATURE				BNX2_MCP_SCRATCH
+#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK			 0xffff0000
+#define BNX2_SHM_HDR_SIGNATURE_SIG			 0x53530000
+#define BNX2_SHM_HDR_SIGNATURE_VER_MASK			 0x000000ff
+#define BNX2_SHM_HDR_SIGNATURE_VER_ONE			 0x00000001
+
+#define BNX2_SHM_HDR_ADDR_0				BNX2_MCP_SCRATCH + 4
+#define BNX2_SHM_HDR_ADDR_1				BNX2_MCP_SCRATCH + 8
+
+
+#define NUM_MC_HASH_REGISTERS   8
+
+
+/* PHY_ID1: bits 31-16; PHY_ID2: bits 15-0.  */
+#define PHY_BCM5706_PHY_ID                          0x00206160
+
+#define PHY_ID(id)                                  ((id) & 0xfffffff0)
+#define PHY_REV_ID(id)                              ((id) & 0xf)
+
+/* 5708 Serdes PHY registers */
+
+#define BCM5708S_BMCR_FORCE_2500		0x20
+
+#define BCM5708S_UP1				0xb
+
+#define BCM5708S_UP1_2G5			0x1
+
+#define BCM5708S_BLK_ADDR			0x1f
+
+#define BCM5708S_BLK_ADDR_DIG			0x0000
+#define BCM5708S_BLK_ADDR_DIG3			0x0002
+#define BCM5708S_BLK_ADDR_TX_MISC		0x0005
+
+/* Digital Block */
+#define BCM5708S_1000X_CTL1			0x10
+
+#define BCM5708S_1000X_CTL1_FIBER_MODE		0x0001
+#define BCM5708S_1000X_CTL1_AUTODET_EN		0x0010
+
+#define BCM5708S_1000X_CTL2			0x11
+
+#define BCM5708S_1000X_CTL2_PLLEL_DET_EN	0x0001
+
+#define BCM5708S_1000X_STAT1			0x14
+
+#define BCM5708S_1000X_STAT1_SGMII		0x0001
+#define BCM5708S_1000X_STAT1_LINK		0x0002
+#define BCM5708S_1000X_STAT1_FD			0x0004
+#define BCM5708S_1000X_STAT1_SPEED_MASK		0x0018
+#define BCM5708S_1000X_STAT1_SPEED_10		0x0000
+#define BCM5708S_1000X_STAT1_SPEED_100		0x0008
+#define BCM5708S_1000X_STAT1_SPEED_1G		0x0010
+#define BCM5708S_1000X_STAT1_SPEED_2G5		0x0018
+#define BCM5708S_1000X_STAT1_TX_PAUSE		0x0020
+#define BCM5708S_1000X_STAT1_RX_PAUSE		0x0040
+
+/* Digital3 Block */
+#define BCM5708S_DIG_3_0			0x10
+
+#define BCM5708S_DIG_3_0_USE_IEEE		0x0001
+
+/* Tx/Misc Block */
+#define BCM5708S_TX_ACTL1			0x15
+
+#define BCM5708S_TX_ACTL1_DRIVER_VCM		0x30
+
+#define BCM5708S_TX_ACTL3			0x17
+
+#define MII_BNX2_EXT_STATUS			0x11
+#define EXT_STATUS_MDIX				 (1 << 13)
+
+#define MII_BNX2_AUX_CTL			0x18
+#define AUX_CTL_MISC_CTL			 0x7007
+#define AUX_CTL_MISC_CTL_WIRESPEED		  (1 << 4)
+#define AUX_CTL_MISC_CTL_AUTOMDIX		  (1 << 9)
+#define AUX_CTL_MISC_CTL_WR			  (1 << 15)
+
+#define MII_BNX2_DSP_RW_PORT			0x15
+#define MII_BNX2_DSP_ADDRESS			0x17
+#define MII_BNX2_DSP_EXPAND_REG			 0x0f00
+#define MII_EXPAND_REG1				  (MII_BNX2_DSP_EXPAND_REG | 1)
+#define MII_EXPAND_REG1_RUDI_C			   0x20
+#define MII_EXPAND_SERDES_CTL			  (MII_BNX2_DSP_EXPAND_REG | 3)
+
+#define MII_BNX2_MISC_SHADOW			0x1c
+#define MISC_SHDW_AN_DBG			 0x6800
+#define MISC_SHDW_AN_DBG_NOSYNC			  0x0002
+#define MISC_SHDW_AN_DBG_RUDI_INVALID		  0x0100
+#define MISC_SHDW_MODE_CTL			 0x7c00
+#define MISC_SHDW_MODE_CTL_SIG_DET		  0x0010
+
+#define MII_BNX2_BLK_ADDR			0x1f
+#define MII_BNX2_BLK_ADDR_IEEE0			 0x0000
+#define MII_BNX2_BLK_ADDR_GP_STATUS		 0x8120
+#define MII_BNX2_GP_TOP_AN_STATUS1		  0x1b
+#define MII_BNX2_GP_TOP_AN_SPEED_MSK		   0x3f00
+#define MII_BNX2_GP_TOP_AN_SPEED_10		   0x0000
+#define MII_BNX2_GP_TOP_AN_SPEED_100		   0x0100
+#define MII_BNX2_GP_TOP_AN_SPEED_1G		   0x0200
+#define MII_BNX2_GP_TOP_AN_SPEED_2_5G		   0x0300
+#define MII_BNX2_GP_TOP_AN_SPEED_1GKV		   0x0d00
+#define MII_BNX2_GP_TOP_AN_FD			   0x8
+#define MII_BNX2_BLK_ADDR_SERDES_DIG		 0x8300
+#define MII_BNX2_SERDES_DIG_1000XCTL1		  0x10
+#define MII_BNX2_SD_1000XCTL1_FIBER		   0x01
+#define MII_BNX2_SD_1000XCTL1_AUTODET		   0x10
+#define MII_BNX2_SERDES_DIG_MISC1		  0x18
+#define MII_BNX2_SD_MISC1_FORCE_MSK		   0xf
+#define MII_BNX2_SD_MISC1_FORCE_2_5G		   0x0
+#define MII_BNX2_SD_MISC1_FORCE			   0x10
+#define MII_BNX2_BLK_ADDR_OVER1G		 0x8320
+#define MII_BNX2_OVER1G_UP1			  0x19
+#define MII_BNX2_BLK_ADDR_BAM_NXTPG		 0x8350
+#define MII_BNX2_BAM_NXTPG_CTL			  0x10
+#define MII_BNX2_NXTPG_CTL_BAM			   0x1
+#define MII_BNX2_NXTPG_CTL_T2			   0x2
+#define MII_BNX2_BLK_ADDR_CL73_USERB0		 0x8370
+#define MII_BNX2_CL73_BAM_CTL1			  0x12
+#define MII_BNX2_CL73_BAM_EN			   0x8000
+#define MII_BNX2_CL73_BAM_STA_MGR_EN		   0x4000
+#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN		   0x2000
+#define MII_BNX2_BLK_ADDR_AER			 0xffd0
+#define MII_BNX2_AER_AER			  0x1e
+#define MII_BNX2_AER_AER_AN_MMD			   0x3800
+#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0		 0xffe0
+
+#define MIN_ETHERNET_PACKET_SIZE	60
+#define MAX_ETHERNET_PACKET_SIZE	1514
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE	9014
+
+#define BNX2_RX_COPY_THRESH		128
+
+#define BNX2_MISC_ENABLE_DEFAULT	0x17ffffff
+
+#define BNX2_START_UNICAST_ADDRESS_INDEX	4
+#define BNX2_END_UNICAST_ADDRESS_INDEX		7
+#define BNX2_MAX_UNICAST_ADDRESSES     	(BNX2_END_UNICAST_ADDRESS_INDEX - \
+					 BNX2_START_UNICAST_ADDRESS_INDEX + 1)
+
+#define DMA_READ_CHANS	5
+#define DMA_WRITE_CHANS	3
+
+/* Use CPU native page size up to 16K for the ring sizes.  */
+#if (PAGE_SHIFT > 14)
+#define BNX2_PAGE_BITS	14
+#else
+#define BNX2_PAGE_BITS	PAGE_SHIFT
+#endif
+#define BNX2_PAGE_SIZE	(1 << BNX2_PAGE_BITS)
+
+#define BNX2_TX_DESC_CNT  (BNX2_PAGE_SIZE / sizeof(struct bnx2_tx_bd))
+#define BNX2_MAX_TX_DESC_CNT (BNX2_TX_DESC_CNT - 1)
+
+#define BNX2_MAX_RX_RINGS	8
+#define BNX2_MAX_RX_PG_RINGS	32
+#define BNX2_RX_DESC_CNT  (BNX2_PAGE_SIZE / sizeof(struct bnx2_rx_bd))
+#define BNX2_MAX_RX_DESC_CNT (BNX2_RX_DESC_CNT - 1)
+#define BNX2_MAX_TOTAL_RX_DESC_CNT (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_RINGS)
+#define BNX2_MAX_TOTAL_RX_PG_DESC_CNT	\
+	(BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_PG_RINGS)
+
+#define BNX2_NEXT_TX_BD(x) (((x) & (BNX2_MAX_TX_DESC_CNT - 1)) ==	\
+		(BNX2_MAX_TX_DESC_CNT - 1)) ?				\
+	(x) + 2 : (x) + 1
+
+#define BNX2_TX_RING_IDX(x) ((x) & BNX2_MAX_TX_DESC_CNT)
+
+#define BNX2_NEXT_RX_BD(x) (((x) & (BNX2_MAX_RX_DESC_CNT - 1)) ==	\
+		(BNX2_MAX_RX_DESC_CNT - 1)) ?				\
+	(x) + 2 : (x) + 1
+
+#define BNX2_RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
+#define BNX2_RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
+
+#define BNX2_RX_RING(x) (((x) & ~BNX2_MAX_RX_DESC_CNT) >> (BNX2_PAGE_BITS - 4))
+#define BNX2_RX_IDX(x) ((x) & BNX2_MAX_RX_DESC_CNT)
+
+/* Context size. */
+#define CTX_SHIFT                   7
+#define CTX_SIZE                    (1 << CTX_SHIFT)
+#define CTX_MASK                    (CTX_SIZE - 1)
+#define GET_CID_ADDR(_cid)          ((_cid) << CTX_SHIFT)
+#define GET_CID(_cid_addr)          ((_cid_addr) >> CTX_SHIFT)
+
+#define PHY_CTX_SHIFT               6
+#define PHY_CTX_SIZE                (1 << PHY_CTX_SHIFT)
+#define PHY_CTX_MASK                (PHY_CTX_SIZE - 1)
+#define GET_PCID_ADDR(_pcid)        ((_pcid) << PHY_CTX_SHIFT)
+#define GET_PCID(_pcid_addr)        ((_pcid_addr) >> PHY_CTX_SHIFT)
+
+#define MB_KERNEL_CTX_SHIFT         8
+#define MB_KERNEL_CTX_SIZE          (1 << MB_KERNEL_CTX_SHIFT)
+#define MB_KERNEL_CTX_MASK          (MB_KERNEL_CTX_SIZE - 1)
+#define MB_GET_CID_ADDR(_cid)       (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT))
+
+#define MAX_CID_CNT                 0x4000
+#define MAX_CID_ADDR                (GET_CID_ADDR(MAX_CID_CNT))
+#define INVALID_CID_ADDR            0xffffffff
+
+#define TX_CID		16
+#define TX_TSS_CID	32
+#define RX_CID		0
+#define RX_RSS_CID	4
+#define RX_MAX_RSS_RINGS	7
+#define RX_MAX_RINGS		(RX_MAX_RSS_RINGS + 1)
+#define TX_MAX_TSS_RINGS	7
+#define TX_MAX_RINGS		(TX_MAX_TSS_RINGS + 1)
+
+#define MB_TX_CID_ADDR	MB_GET_CID_ADDR(TX_CID)
+#define MB_RX_CID_ADDR	MB_GET_CID_ADDR(RX_CID)
+
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
+struct bnx2_sw_bd {
+	u8			*data;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+	return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
+struct bnx2_sw_pg {
+	struct page		*page;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnx2_sw_tx_bd {
+	struct sk_buff		*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	unsigned short		is_gso;
+	unsigned short		nr_frags;
+};
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnx2_sw_bd) * BNX2_RX_DESC_CNT)
+#define SW_RXPG_RING_SIZE (sizeof(struct bnx2_sw_pg) * BNX2_RX_DESC_CNT)
+#define RXBD_RING_SIZE (sizeof(struct bnx2_rx_bd) * BNX2_RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnx2_sw_tx_bd) * BNX2_TX_DESC_CNT)
+#define TXBD_RING_SIZE (sizeof(struct bnx2_tx_bd) * BNX2_TX_DESC_CNT)
+
+/* Buffered flash (Atmel: AT45DB011B) specific information */
+#define SEEPROM_PAGE_BITS			2
+#define SEEPROM_PHY_PAGE_SIZE			(1 << SEEPROM_PAGE_BITS)
+#define SEEPROM_BYTE_ADDR_MASK			(SEEPROM_PHY_PAGE_SIZE-1)
+#define SEEPROM_PAGE_SIZE			4
+#define SEEPROM_TOTAL_SIZE			65536
+
+#define BUFFERED_FLASH_PAGE_BITS		9
+#define BUFFERED_FLASH_PHY_PAGE_SIZE		(1 << BUFFERED_FLASH_PAGE_BITS)
+#define BUFFERED_FLASH_BYTE_ADDR_MASK		(BUFFERED_FLASH_PHY_PAGE_SIZE-1)
+#define BUFFERED_FLASH_PAGE_SIZE		264
+#define BUFFERED_FLASH_TOTAL_SIZE		0x21000
+
+#define SAIFUN_FLASH_PAGE_BITS			8
+#define SAIFUN_FLASH_PHY_PAGE_SIZE		(1 << SAIFUN_FLASH_PAGE_BITS)
+#define SAIFUN_FLASH_BYTE_ADDR_MASK		(SAIFUN_FLASH_PHY_PAGE_SIZE-1)
+#define SAIFUN_FLASH_PAGE_SIZE			256
+#define SAIFUN_FLASH_BASE_TOTAL_SIZE		65536
+
+#define ST_MICRO_FLASH_PAGE_BITS		8
+#define ST_MICRO_FLASH_PHY_PAGE_SIZE		(1 << ST_MICRO_FLASH_PAGE_BITS)
+#define ST_MICRO_FLASH_BYTE_ADDR_MASK		(ST_MICRO_FLASH_PHY_PAGE_SIZE-1)
+#define ST_MICRO_FLASH_PAGE_SIZE		256
+#define ST_MICRO_FLASH_BASE_TOTAL_SIZE		65536
+
+#define BCM5709_FLASH_PAGE_BITS			8
+#define BCM5709_FLASH_PHY_PAGE_SIZE		(1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK		(BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE			256
+
+#define NVRAM_TIMEOUT_COUNT			30000
+
+
+#define FLASH_STRAP_MASK			(BNX2_NVM_CFG1_FLASH_MODE   | \
+						 BNX2_NVM_CFG1_BUFFER_MODE  | \
+						 BNX2_NVM_CFG1_PROTECT_MODE | \
+						 BNX2_NVM_CFG1_FLASH_SIZE)
+
+#define FLASH_BACKUP_STRAP_MASK			(0xf << 26)
+
+struct flash_spec {
+	u32 strapping;
+	u32 config1;
+	u32 config2;
+	u32 config3;
+	u32 write1;
+	u32 flags;
+#define BNX2_NV_BUFFERED	0x00000001
+#define BNX2_NV_TRANSLATE	0x00000002
+#define BNX2_NV_WREN		0x00000004
+	u32 page_bits;
+	u32 page_size;
+	u32 addr_mask;
+	u32 total_size;
+	u8  *name;
+};
+
+#define BNX2_MAX_MSIX_HW_VEC	9
+#define BNX2_MAX_MSIX_VEC	9
+#ifdef BCM_CNIC
+#define BNX2_MIN_MSIX_VEC	2
+#else
+#define BNX2_MIN_MSIX_VEC	1
+#endif
+
+
+struct bnx2_irq {
+	irq_handler_t	handler;
+	unsigned int	vector;
+	u8		requested;
+	char		name[IFNAMSIZ + 2];
+};
+
+struct bnx2_tx_ring_info {
+	u32			tx_prod_bseq;
+	u16			tx_prod;
+	u32			tx_bidx_addr;
+	u32			tx_bseq_addr;
+
+	struct bnx2_tx_bd	*tx_desc_ring;
+	struct bnx2_sw_tx_bd	*tx_buf_ring;
+
+	u16			tx_cons;
+	u16			hw_tx_cons;
+
+	dma_addr_t		tx_desc_mapping;
+};
+
+struct bnx2_rx_ring_info {
+	u32			rx_prod_bseq;
+	u16			rx_prod;
+	u16			rx_cons;
+
+	u32			rx_bidx_addr;
+	u32			rx_bseq_addr;
+	u32			rx_pg_bidx_addr;
+
+	u16			rx_pg_prod;
+	u16			rx_pg_cons;
+
+	struct bnx2_sw_bd	*rx_buf_ring;
+	struct bnx2_rx_bd	*rx_desc_ring[BNX2_MAX_RX_RINGS];
+	struct bnx2_sw_pg	*rx_pg_ring;
+	struct bnx2_rx_bd	*rx_pg_desc_ring[BNX2_MAX_RX_PG_RINGS];
+
+	dma_addr_t		rx_desc_mapping[BNX2_MAX_RX_RINGS];
+	dma_addr_t		rx_pg_desc_mapping[BNX2_MAX_RX_PG_RINGS];
+};
+
+struct bnx2_napi {
+	struct napi_struct	napi		____cacheline_aligned;
+	struct bnx2		*bp;
+	union {
+		struct status_block		*msi;
+		struct status_block_msix	*msix;
+	} status_blk;
+	u16			*hw_tx_cons_ptr;
+	u16			*hw_rx_cons_ptr;
+	u32 			last_status_idx;
+	u32			int_num;
+
+#ifdef BCM_CNIC
+	u32			cnic_tag;
+	int			cnic_present;
+#endif
+
+	struct bnx2_rx_ring_info	rx_ring;
+	struct bnx2_tx_ring_info	tx_ring;
+};
+
+struct bnx2 {
+	/* Fields used in the tx and intr/napi performance paths are grouped */
+	/* together in the beginning of the structure. */
+	void __iomem		*regview;
+
+	struct net_device	*dev;
+	struct pci_dev		*pdev;
+
+	atomic_t		intr_sem;
+
+	u32			flags;
+#define BNX2_FLAG_PCIX			0x00000001
+#define BNX2_FLAG_PCI_32BIT		0x00000002
+#define BNX2_FLAG_MSIX_CAP		0x00000004
+#define BNX2_FLAG_NO_WOL		0x00000008
+#define BNX2_FLAG_USING_MSI		0x00000020
+#define BNX2_FLAG_ASF_ENABLE		0x00000040
+#define BNX2_FLAG_MSI_CAP		0x00000080
+#define BNX2_FLAG_ONE_SHOT_MSI		0x00000100
+#define BNX2_FLAG_PCIE			0x00000200
+#define BNX2_FLAG_USING_MSIX		0x00000400
+#define BNX2_FLAG_USING_MSI_OR_MSIX	(BNX2_FLAG_USING_MSI | \
+					 BNX2_FLAG_USING_MSIX)
+#define BNX2_FLAG_JUMBO_BROKEN		0x00000800
+#define BNX2_FLAG_CAN_KEEP_VLAN		0x00001000
+#define BNX2_FLAG_BROKEN_STATS		0x00002000
+#define BNX2_FLAG_AER_ENABLED		0x00004000
+
+	struct bnx2_napi	bnx2_napi[BNX2_MAX_MSIX_VEC];
+
+	u32			rx_buf_use_size;	/* useable size */
+	u32			rx_buf_size;		/* with alignment */
+	u32			rx_copy_thresh;
+	u32			rx_jumbo_thresh;
+	u32			rx_max_ring_idx;
+	u32			rx_max_pg_ring_idx;
+
+	/* TX constants */
+	int		tx_ring_size;
+	u32		tx_wake_thresh;
+
+#ifdef BCM_CNIC
+	struct cnic_ops	__rcu	*cnic_ops;
+	void			*cnic_data;
+#endif
+
+	/* End of fields used in the performance code paths. */
+
+	unsigned int		current_interval;
+#define BNX2_TIMER_INTERVAL		HZ
+#define BNX2_SERDES_AN_TIMEOUT		(HZ / 3)
+#define BNX2_SERDES_FORCED_TIMEOUT	(HZ / 10)
+
+	struct			timer_list timer;
+	struct work_struct	reset_task;
+
+	/* Used to synchronize phy accesses. */
+	spinlock_t		phy_lock;
+	spinlock_t		indirect_lock;
+
+	u32			phy_flags;
+#define BNX2_PHY_FLAG_SERDES			0x00000001
+#define BNX2_PHY_FLAG_CRC_FIX			0x00000002
+#define BNX2_PHY_FLAG_PARALLEL_DETECT		0x00000004
+#define BNX2_PHY_FLAG_2_5G_CAPABLE		0x00000008
+#define BNX2_PHY_FLAG_INT_MODE_MASK		0x00000300
+#define BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING	0x00000100
+#define BNX2_PHY_FLAG_INT_MODE_LINK_READY	0x00000200
+#define BNX2_PHY_FLAG_DIS_EARLY_DAC		0x00000400
+#define BNX2_PHY_FLAG_REMOTE_PHY_CAP		0x00000800
+#define BNX2_PHY_FLAG_FORCED_DOWN		0x00001000
+#define BNX2_PHY_FLAG_NO_PARALLEL		0x00002000
+#define BNX2_PHY_FLAG_MDIX			0x00004000
+
+	u32			mii_bmcr;
+	u32			mii_bmsr;
+	u32			mii_bmsr1;
+	u32			mii_adv;
+	u32			mii_lpa;
+	u32			mii_up1;
+
+	u32			chip_id;
+	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define BNX2_CHIP(bp)			(((bp)->chip_id) & 0xffff0000)
+#define BNX2_CHIP_5706			0x57060000
+#define BNX2_CHIP_5708			0x57080000
+#define BNX2_CHIP_5709			0x57090000
+
+#define BNX2_CHIP_REV(bp)		(((bp)->chip_id) & 0x0000f000)
+#define BNX2_CHIP_REV_Ax		0x00000000
+#define BNX2_CHIP_REV_Bx		0x00001000
+#define BNX2_CHIP_REV_Cx		0x00002000
+
+#define BNX2_CHIP_METAL(bp)		(((bp)->chip_id) & 0x00000ff0)
+#define BNX2_CHIP_BOND(bp)		(((bp)->chip_id) & 0x0000000f)
+
+#define BNX2_CHIP_ID(bp)		(((bp)->chip_id) & 0xfffffff0)
+#define BNX2_CHIP_ID_5706_A0		0x57060000
+#define BNX2_CHIP_ID_5706_A1			0x57060010
+#define BNX2_CHIP_ID_5706_A2			0x57060020
+#define BNX2_CHIP_ID_5708_A0			0x57080000
+#define BNX2_CHIP_ID_5708_B0			0x57081000
+#define BNX2_CHIP_ID_5708_B1			0x57081010
+#define BNX2_CHIP_ID_5709_A0			0x57090000
+#define BNX2_CHIP_ID_5709_A1			0x57090010
+
+/* A serdes chip will have the first bit of the bond id set. */
+#define BNX2_CHIP_BOND_SERDES_BIT		0x01
+
+	u32			phy_addr;
+	u32			phy_id;
+
+	u16			bus_speed_mhz;
+	u8			wol;
+
+	u8			pad;
+
+	u16			fw_wr_seq;
+	u16			fw_drv_pulse_wr_seq;
+	u32			fw_last_msg;
+
+	int			rx_max_ring;
+	int			rx_ring_size;
+
+	int			rx_max_pg_ring;
+	int			rx_pg_ring_size;
+
+	u16			tx_quick_cons_trip;
+	u16			tx_quick_cons_trip_int;
+	u16			rx_quick_cons_trip;
+	u16			rx_quick_cons_trip_int;
+	u16			comp_prod_trip;
+	u16			comp_prod_trip_int;
+	u16			tx_ticks;
+	u16			tx_ticks_int;
+	u16			com_ticks;
+	u16			com_ticks_int;
+	u16			cmd_ticks;
+	u16			cmd_ticks_int;
+	u16			rx_ticks;
+	u16			rx_ticks_int;
+
+	u32			stats_ticks;
+
+	dma_addr_t		status_blk_mapping;
+
+	void *status_blk;
+	struct statistics_block	*stats_blk;
+	struct statistics_block	*temp_stats_blk;
+	dma_addr_t		stats_blk_mapping;
+
+	int			ctx_pages;
+	void			*ctx_blk[4];
+	dma_addr_t		ctx_blk_mapping[4];
+
+	u32			hc_cmd;
+	u32			rx_mode;
+
+	u16			req_line_speed;
+	u8			req_duplex;
+
+	u8			phy_port;
+	u8			link_up;
+
+	u16			line_speed;
+	u8			duplex;
+	u8			flow_ctrl;	/* actual flow ctrl settings */
+						/* may be different from     */
+						/* req_flow_ctrl if autoneg  */
+	u32			advertising;
+
+	u8			req_flow_ctrl;	/* flow ctrl advertisement */
+						/* settings or forced      */
+						/* settings                */
+	u8			autoneg;
+#define AUTONEG_SPEED		1
+#define AUTONEG_FLOW_CTRL	2
+
+	u8			loopback;
+#define MAC_LOOPBACK		1
+#define PHY_LOOPBACK		2
+
+	u8			serdes_an_pending;
+
+	u8			mac_addr[8];
+
+	u32			shmem_base;
+
+	char			fw_version[32];
+
+	int			pm_cap;
+	int			pcix_cap;
+
+	const struct flash_spec	*flash_info;
+	u32			flash_size;
+
+	int			status_stats_size;
+
+	struct bnx2_irq		irq_tbl[BNX2_MAX_MSIX_VEC];
+	int			irq_nvecs;
+
+	u8			func;
+
+	u8			num_tx_rings;
+	u8			num_rx_rings;
+
+	int			num_req_tx_rings;
+	int			num_req_rx_rings;
+
+	u32 			leds_save;
+	u32			idle_chk_status_idx;
+
+#ifdef BCM_CNIC
+	struct mutex		cnic_lock;
+	struct cnic_eth_dev	cnic_eth_dev;
+	struct cnic_eth_dev	*(*cnic_probe)(struct net_device *);
+#endif
+
+	const struct firmware	*mips_firmware;
+	const struct firmware	*rv2p_firmware;
+};
+
+#define BNX2_RD(bp, offset)					\
+	readl(bp->regview + offset)
+
+#define BNX2_WR(bp, offset, val)					\
+	writel(val, bp->regview + offset)
+
+#define BNX2_WR16(bp, offset, val)				\
+	writew(val, bp->regview + offset)
+
+struct cpu_reg {
+	u32 mode;
+	u32 mode_value_halt;
+	u32 mode_value_sstep;
+
+	u32 state;
+	u32 state_value_clear;
+
+	u32 gpr0;
+	u32 evmask;
+	u32 pc;
+	u32 inst;
+	u32 bp;
+
+	u32 spad_base;
+
+	u32 mips_view_base;
+};
+
+struct bnx2_fw_file_section {
+	__be32 addr;
+	__be32 len;
+	__be32 offset;
+};
+
+struct bnx2_mips_fw_file_entry {
+	__be32 start_addr;
+	struct bnx2_fw_file_section text;
+	struct bnx2_fw_file_section data;
+	struct bnx2_fw_file_section rodata;
+};
+
+struct bnx2_rv2p_fw_file_entry {
+	struct bnx2_fw_file_section rv2p;
+	__be32 fixup[8];
+};
+
+struct bnx2_mips_fw_file {
+	struct bnx2_mips_fw_file_entry com;
+	struct bnx2_mips_fw_file_entry cp;
+	struct bnx2_mips_fw_file_entry rxp;
+	struct bnx2_mips_fw_file_entry tpat;
+	struct bnx2_mips_fw_file_entry txp;
+};
+
+struct bnx2_rv2p_fw_file {
+	struct bnx2_rv2p_fw_file_entry proc1;
+	struct bnx2_rv2p_fw_file_entry proc2;
+};
+
+#define RV2P_P1_FIXUP_PAGE_SIZE_IDX		0
+#define RV2P_BD_PAGE_SIZE_MSK			0xffff
+#define RV2P_BD_PAGE_SIZE			((BNX2_PAGE_SIZE / 16) - 1)
+
+#define RV2P_PROC1                              0
+#define RV2P_PROC2                              1
+
+
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code.  The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define BNX2_DRV_PULSE_PERIOD_MS                 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out.  Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define BNX2_FW_ACK_TIME_OUT_MS                  1000
+
+
+#define BNX2_DRV_RESET_SIGNATURE		0x00000000
+#define BNX2_DRV_RESET_SIGNATURE_MAGIC		 0x4841564b /* HAVK */
+//#define DRV_RESET_SIGNATURE_MAGIC		 0x47495352 /* RSIG */
+
+#define BNX2_DRV_MB				0x00000004
+#define BNX2_DRV_MSG_CODE			 0xff000000
+#define BNX2_DRV_MSG_CODE_RESET			 0x01000000
+#define BNX2_DRV_MSG_CODE_UNLOAD		 0x02000000
+#define BNX2_DRV_MSG_CODE_SHUTDOWN		 0x03000000
+#define BNX2_DRV_MSG_CODE_SUSPEND_WOL		 0x04000000
+#define BNX2_DRV_MSG_CODE_FW_TIMEOUT		 0x05000000
+#define BNX2_DRV_MSG_CODE_PULSE			 0x06000000
+#define BNX2_DRV_MSG_CODE_DIAG			 0x07000000
+#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL	 0x09000000
+#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN		 0x0b000000
+#define BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE	 0x0d000000
+#define BNX2_DRV_MSG_CODE_CMD_SET_LINK		 0x10000000
+
+#define BNX2_DRV_MSG_DATA			 0x00ff0000
+#define BNX2_DRV_MSG_DATA_WAIT0			 0x00010000
+#define BNX2_DRV_MSG_DATA_WAIT1			 0x00020000
+#define BNX2_DRV_MSG_DATA_WAIT2			 0x00030000
+#define BNX2_DRV_MSG_DATA_WAIT3			 0x00040000
+
+#define BNX2_DRV_MSG_SEQ			 0x0000ffff
+
+#define BNX2_FW_MB				0x00000008
+#define BNX2_FW_MSG_ACK				 0x0000ffff
+#define BNX2_FW_MSG_STATUS_MASK			 0x00ff0000
+#define BNX2_FW_MSG_STATUS_OK			 0x00000000
+#define BNX2_FW_MSG_STATUS_FAILURE		 0x00ff0000
+
+#define BNX2_LINK_STATUS			0x0000000c
+#define BNX2_LINK_STATUS_INIT_VALUE		 0xffffffff
+#define BNX2_LINK_STATUS_LINK_UP		 0x1
+#define BNX2_LINK_STATUS_LINK_DOWN		 0x0
+#define BNX2_LINK_STATUS_SPEED_MASK		 0x1e
+#define BNX2_LINK_STATUS_AN_INCOMPLETE		 (0<<1)
+#define BNX2_LINK_STATUS_10HALF			 (1<<1)
+#define BNX2_LINK_STATUS_10FULL			 (2<<1)
+#define BNX2_LINK_STATUS_100HALF		 (3<<1)
+#define BNX2_LINK_STATUS_100BASE_T4		 (4<<1)
+#define BNX2_LINK_STATUS_100FULL		 (5<<1)
+#define BNX2_LINK_STATUS_1000HALF		 (6<<1)
+#define BNX2_LINK_STATUS_1000FULL		 (7<<1)
+#define BNX2_LINK_STATUS_2500HALF		 (8<<1)
+#define BNX2_LINK_STATUS_2500FULL		 (9<<1)
+#define BNX2_LINK_STATUS_AN_ENABLED		 (1<<5)
+#define BNX2_LINK_STATUS_AN_COMPLETE		 (1<<6)
+#define BNX2_LINK_STATUS_PARALLEL_DET		 (1<<7)
+#define BNX2_LINK_STATUS_RESERVED		 (1<<8)
+#define BNX2_LINK_STATUS_PARTNER_AD_1000FULL	 (1<<9)
+#define BNX2_LINK_STATUS_PARTNER_AD_1000HALF	 (1<<10)
+#define BNX2_LINK_STATUS_PARTNER_AD_100BT4	 (1<<11)
+#define BNX2_LINK_STATUS_PARTNER_AD_100FULL	 (1<<12)
+#define BNX2_LINK_STATUS_PARTNER_AD_100HALF	 (1<<13)
+#define BNX2_LINK_STATUS_PARTNER_AD_10FULL	 (1<<14)
+#define BNX2_LINK_STATUS_PARTNER_AD_10HALF	 (1<<15)
+#define BNX2_LINK_STATUS_TX_FC_ENABLED		 (1<<16)
+#define BNX2_LINK_STATUS_RX_FC_ENABLED		 (1<<17)
+#define BNX2_LINK_STATUS_PARTNER_SYM_PAUSE_CAP	 (1<<18)
+#define BNX2_LINK_STATUS_PARTNER_ASYM_PAUSE_CAP	 (1<<19)
+#define BNX2_LINK_STATUS_SERDES_LINK		 (1<<20)
+#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL	 (1<<21)
+#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF	 (1<<22)
+#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED	 (1<<31)
+
+#define BNX2_DRV_PULSE_MB			0x00000010
+#define BNX2_DRV_PULSE_SEQ_MASK			 0x00007fff
+
+/* Indicate to the firmware not to go into the
+ * OS absent when it is not getting driver pulse.
+ * This is used for debugging. */
+#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE	 0x00080000
+
+#define BNX2_DRV_MB_ARG0			0x00000014
+#define BNX2_NETLINK_SET_LINK_SPEED_10HALF	 (1<<0)
+#define BNX2_NETLINK_SET_LINK_SPEED_10FULL	 (1<<1)
+#define BNX2_NETLINK_SET_LINK_SPEED_10		 \
+	(BNX2_NETLINK_SET_LINK_SPEED_10HALF |	 \
+	 BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_100HALF	 (1<<2)
+#define BNX2_NETLINK_SET_LINK_SPEED_100FULL	 (1<<3)
+#define BNX2_NETLINK_SET_LINK_SPEED_100		 \
+	(BNX2_NETLINK_SET_LINK_SPEED_100HALF |	 \
+	 BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF	 (1<<4)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL	 (1<<5)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF	 (1<<6)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL	 (1<<7)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF	 (1<<8)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL	 (1<<9)
+#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG	 (1<<10)
+#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE	 (1<<11)
+#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE	 (1<<12)
+#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE	 (1<<13)
+#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED	 (1<<14)
+#define BNX2_NETLINK_SET_LINK_PHY_RESET		 (1<<15)
+
+#define BNX2_DEV_INFO_SIGNATURE			0x00000020
+#define BNX2_DEV_INFO_SIGNATURE_MAGIC		 0x44564900
+#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK	 0xffffff00
+#define BNX2_DEV_INFO_FEATURE_CFG_VALID		 0x01
+#define BNX2_DEV_INFO_SECONDARY_PORT		 0x80
+#define BNX2_DEV_INFO_DRV_ALWAYS_ALIVE		 0x40
+
+#define BNX2_SHARED_HW_CFG_PART_NUM		0x00000024
+
+#define BNX2_SHARED_HW_CFG_POWER_DISSIPATED	0x00000034
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D3_MASK	 0xff000000
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D2_MASK	 0xff0000
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D1_MASK	 0xff00
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D0_MASK	 0xff
+
+#define BNX2_SHARED_HW_CFG POWER_CONSUMED	0x00000038
+#define BNX2_SHARED_HW_CFG_CONFIG		0x0000003c
+#define BNX2_SHARED_HW_CFG_DESIGN_NIC		 0
+#define BNX2_SHARED_HW_CFG_DESIGN_LOM		 0x1
+#define BNX2_SHARED_HW_CFG_PHY_COPPER		 0
+#define BNX2_SHARED_HW_CFG_PHY_FIBER		 0x2
+#define BNX2_SHARED_HW_CFG_PHY_2_5G		 0x20
+#define BNX2_SHARED_HW_CFG_PHY_BACKPLANE	 0x40
+#define BNX2_SHARED_HW_CFG_LED_MODE_SHIFT_BITS	 8
+#define BNX2_SHARED_HW_CFG_LED_MODE_MASK	 0x300
+#define BNX2_SHARED_HW_CFG_LED_MODE_MAC		 0
+#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1	 0x100
+#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2	 0x200
+#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX	 0x8000
+
+#define BNX2_SHARED_HW_CFG_CONFIG2		0x00000040
+#define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK	 0x00fff000
+
+#define BNX2_DEV_INFO_BC_REV			0x0000004c
+
+#define BNX2_PORT_HW_CFG_MAC_UPPER		0x00000050
+#define BNX2_PORT_HW_CFG_UPPERMAC_MASK		 0xffff
+
+#define BNX2_PORT_HW_CFG_MAC_LOWER		0x00000054
+#define BNX2_PORT_HW_CFG_CONFIG			0x00000058
+#define BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK	 0x0000ffff
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK	 0x001f0000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN	 0x00000000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G	 0x00030000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_2_5G	 0x00040000
+
+#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER	0x00000068
+#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER	0x0000006c
+#define BNX2_PORT_HW_CFG_IMD_MAC_B_UPPER	0x00000070
+#define BNX2_PORT_HW_CFG_IMD_MAC_B_LOWER	0x00000074
+#define BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER	0x00000078
+#define BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER	0x0000007c
+
+#define BNX2_DEV_INFO_PER_PORT_HW_CONFIG2	0x000000b4
+
+#define BNX2_DEV_INFO_FORMAT_REV		0x000000c4
+#define BNX2_DEV_INFO_FORMAT_REV_MASK		 0xff000000
+#define BNX2_DEV_INFO_FORMAT_REV_ID		 ('A' << 24)
+
+#define BNX2_SHARED_FEATURE			0x000000c8
+#define BNX2_SHARED_FEATURE_MASK		 0xffffffff
+
+#define BNX2_PORT_FEATURE			0x000000d8
+#define BNX2_PORT2_FEATURE			0x00000014c
+#define BNX2_PORT_FEATURE_WOL_ENABLED		 0x01000000
+#define BNX2_PORT_FEATURE_MBA_ENABLED		 0x02000000
+#define BNX2_PORT_FEATURE_ASF_ENABLED		 0x04000000
+#define BNX2_PORT_FEATURE_IMD_ENABLED		 0x08000000
+#define BNX2_PORT_FEATURE_BAR1_SIZE_MASK	 0xf
+#define BNX2_PORT_FEATURE_BAR1_SIZE_DISABLED	 0x0
+#define BNX2_PORT_FEATURE_BAR1_SIZE_64K		 0x1
+#define BNX2_PORT_FEATURE_BAR1_SIZE_128K	 0x2
+#define BNX2_PORT_FEATURE_BAR1_SIZE_256K	 0x3
+#define BNX2_PORT_FEATURE_BAR1_SIZE_512K	 0x4
+#define BNX2_PORT_FEATURE_BAR1_SIZE_1M		 0x5
+#define BNX2_PORT_FEATURE_BAR1_SIZE_2M		 0x6
+#define BNX2_PORT_FEATURE_BAR1_SIZE_4M		 0x7
+#define BNX2_PORT_FEATURE_BAR1_SIZE_8M		 0x8
+#define BNX2_PORT_FEATURE_BAR1_SIZE_16M		 0x9
+#define BNX2_PORT_FEATURE_BAR1_SIZE_32M		 0xa
+#define BNX2_PORT_FEATURE_BAR1_SIZE_64M		 0xb
+#define BNX2_PORT_FEATURE_BAR1_SIZE_128M	 0xc
+#define BNX2_PORT_FEATURE_BAR1_SIZE_256M	 0xd
+#define BNX2_PORT_FEATURE_BAR1_SIZE_512M	 0xe
+#define BNX2_PORT_FEATURE_BAR1_SIZE_1G		 0xf
+
+#define BNX2_PORT_FEATURE_WOL			0xdc
+#define BNX2_PORT2_FEATURE_WOL			0x150
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_SHIFT_BITS	 4
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MASK	 0x30
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_DISABLE	 0
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC	 0x10
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_ACPI	 0x20
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI	 0x30
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_MASK	 0xf
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_AUTONEG	 0
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10HALF	 1
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10FULL	 2
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100HALF 3
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100FULL 4
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000HALF	 5
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000FULL	 6
+#define BNX2_PORT_FEATURE_WOL_AUTONEG_ADVERTISE_1000	 0x40
+#define BNX2_PORT_FEATURE_WOL_RESERVED_PAUSE_CAP 0x400
+#define BNX2_PORT_FEATURE_WOL_RESERVED_ASYM_PAUSE_CAP	 0x800
+
+#define BNX2_PORT_FEATURE_MBA			0xe0
+#define BNX2_PORT2_FEATURE_MBA			0x154
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT_BITS	 0
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK	 0x3
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE	 0
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL	 1
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP	 2
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_SHIFT_BITS	 2
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_MASK	 0x3c
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_AUTONEG	 0
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10HALF	 0x4
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10FULL	 0x8
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100HALF	 0xc
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100FULL	 0x10
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000HALF	 0x14
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000FULL	 0x18
+#define BNX2_PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE	 0x40
+#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_S	 0
+#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_B	 0x80
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT_BITS	 8
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK	 0xff00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED	 0
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1K	 0x100
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2K	 0x200
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4K	 0x300
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8K	 0x400
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16K	 0x500
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_32K	 0x600
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_64K	 0x700
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_128K	 0x800
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_256K	 0x900
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_512K	 0xa00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1M	 0xb00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2M	 0xc00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4M	 0xd00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8M	 0xe00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16M	 0xf00
+#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT_BITS	 16
+#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_MASK	 0xf0000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT_BITS	 20
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK	 0x300000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO	 0
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS	 0x100000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H	 0x200000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H	 0x300000
+
+#define BNX2_PORT_FEATURE_IMD			0xe4
+#define BNX2_PORT2_FEATURE_IMD			0x158
+#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_DEFAULT	 0
+#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_ENABLE	 1
+
+#define BNX2_PORT_FEATURE_VLAN			0xe8
+#define BNX2_PORT2_FEATURE_VLAN			0x15c
+#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK	 0xffff
+#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE	 0x10000
+
+#define BNX2_MFW_VER_PTR			0x00000014c
+
+#define BNX2_BC_STATE_RESET_TYPE		0x000001c0
+#define BNX2_BC_STATE_RESET_TYPE_SIG		 0x00005254
+#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK	 0x0000ffff
+#define BNX2_BC_STATE_RESET_TYPE_NONE	 (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  0x00010000)
+#define BNX2_BC_STATE_RESET_TYPE_PCI	 (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  0x00020000)
+#define BNX2_BC_STATE_RESET_TYPE_VAUX	 (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  0x00030000)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_MASK	 DRV_MSG_CODE
+#define BNX2_BC_STATE_RESET_TYPE_DRV_RESET (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					    DRV_MSG_CODE_RESET)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_UNLOAD (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					     DRV_MSG_CODE_UNLOAD)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_SHUTDOWN (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					       DRV_MSG_CODE_SHUTDOWN)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_WOL (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  DRV_MSG_CODE_WOL)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_DIAG (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					   DRV_MSG_CODE_DIAG)
+#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					     (msg))
+
+#define BNX2_BC_RESET_TYPE			0x000001c0
+
+#define BNX2_BC_STATE				0x000001c4
+#define BNX2_BC_STATE_ERR_MASK			 0x0000ff00
+#define BNX2_BC_STATE_SIGN			 0x42530000
+#define BNX2_BC_STATE_SIGN_MASK			 0xffff0000
+#define BNX2_BC_STATE_BC1_START			 (BNX2_BC_STATE_SIGN | 0x1)
+#define BNX2_BC_STATE_GET_NVM_CFG1		 (BNX2_BC_STATE_SIGN | 0x2)
+#define BNX2_BC_STATE_PROG_BAR			 (BNX2_BC_STATE_SIGN | 0x3)
+#define BNX2_BC_STATE_INIT_VID			 (BNX2_BC_STATE_SIGN | 0x4)
+#define BNX2_BC_STATE_GET_NVM_CFG2		 (BNX2_BC_STATE_SIGN | 0x5)
+#define BNX2_BC_STATE_APPLY_WKARND		 (BNX2_BC_STATE_SIGN | 0x6)
+#define BNX2_BC_STATE_LOAD_BC2			 (BNX2_BC_STATE_SIGN | 0x7)
+#define BNX2_BC_STATE_GOING_BC2			 (BNX2_BC_STATE_SIGN | 0x8)
+#define BNX2_BC_STATE_GOING_DIAG		 (BNX2_BC_STATE_SIGN | 0x9)
+#define BNX2_BC_STATE_RT_FINAL_INIT		 (BNX2_BC_STATE_SIGN | 0x81)
+#define BNX2_BC_STATE_RT_WKARND			 (BNX2_BC_STATE_SIGN | 0x82)
+#define BNX2_BC_STATE_RT_DRV_PULSE		 (BNX2_BC_STATE_SIGN | 0x83)
+#define BNX2_BC_STATE_RT_FIOEVTS		 (BNX2_BC_STATE_SIGN | 0x84)
+#define BNX2_BC_STATE_RT_DRV_CMD		 (BNX2_BC_STATE_SIGN | 0x85)
+#define BNX2_BC_STATE_RT_LOW_POWER		 (BNX2_BC_STATE_SIGN | 0x86)
+#define BNX2_BC_STATE_RT_SET_WOL		 (BNX2_BC_STATE_SIGN | 0x87)
+#define BNX2_BC_STATE_RT_OTHER_FW		 (BNX2_BC_STATE_SIGN | 0x88)
+#define BNX2_BC_STATE_RT_GOING_D3		 (BNX2_BC_STATE_SIGN | 0x89)
+#define BNX2_BC_STATE_ERR_BAD_VERSION		 (BNX2_BC_STATE_SIGN | 0x0100)
+#define BNX2_BC_STATE_ERR_BAD_BC2_CRC		 (BNX2_BC_STATE_SIGN | 0x0200)
+#define BNX2_BC_STATE_ERR_BC1_LOOP		 (BNX2_BC_STATE_SIGN | 0x0300)
+#define BNX2_BC_STATE_ERR_UNKNOWN_CMD		 (BNX2_BC_STATE_SIGN | 0x0400)
+#define BNX2_BC_STATE_ERR_DRV_DEAD		 (BNX2_BC_STATE_SIGN | 0x0500)
+#define BNX2_BC_STATE_ERR_NO_RXP		 (BNX2_BC_STATE_SIGN | 0x0600)
+#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF		 (BNX2_BC_STATE_SIGN | 0x0700)
+
+#define BNX2_BC_STATE_CONDITION			0x000001c8
+#define BNX2_CONDITION_MFW_RUN_UNKNOWN		 0x00000000
+#define BNX2_CONDITION_MFW_RUN_IPMI		 0x00002000
+#define BNX2_CONDITION_MFW_RUN_UMP		 0x00004000
+#define BNX2_CONDITION_MFW_RUN_NCSI		 0x00006000
+#define BNX2_CONDITION_MFW_RUN_NONE		 0x0000e000
+#define BNX2_CONDITION_MFW_RUN_MASK		 0x0000e000
+#define BNX2_CONDITION_PM_STATE_MASK		 0x00030000
+#define BNX2_CONDITION_PM_STATE_FULL		 0x00030000
+#define BNX2_CONDITION_PM_STATE_PREP		 0x00020000
+#define BNX2_CONDITION_PM_STATE_UNPREP		 0x00010000
+
+#define BNX2_BC_STATE_DEBUG_CMD			0x1dc
+#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE	 0x42440000
+#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK	 0xffff0000
+#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK	 0xffff
+#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE	 0xffff
+
+#define BNX2_FW_EVT_CODE_MB			0x354
+#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
+#define BNX2_FW_EVT_CODE_LINK_EVENT		 0x00000001
+
+#define BNX2_DRV_ACK_CAP_MB			0x364
+#define BNX2_DRV_ACK_CAP_SIGNATURE		 0x35450000
+#define BNX2_CAPABILITY_SIGNATURE_MASK		 0xFFFF0000
+
+#define BNX2_FW_CAP_MB				0x368
+#define BNX2_FW_CAP_SIGNATURE			 0xaa550000
+#define BNX2_FW_ACK_DRV_SIGNATURE		 0x52500000
+#define BNX2_FW_CAP_SIGNATURE_MASK		 0xffff0000
+#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE		 0x00000001
+#define BNX2_FW_CAP_REMOTE_PHY_PRESENT		 0x00000002
+#define BNX2_FW_CAP_MFW_CAN_KEEP_VLAN		 0x00000008
+#define BNX2_FW_CAP_BC_CAN_KEEP_VLAN		 0x00000010
+#define BNX2_FW_CAP_CAN_KEEP_VLAN	(BNX2_FW_CAP_BC_CAN_KEEP_VLAN | \
+					 BNX2_FW_CAP_MFW_CAN_KEEP_VLAN)
+
+#define BNX2_RPHY_SIGNATURE			0x36c
+#define BNX2_RPHY_LOAD_SIGNATURE		 0x5a5a5a5a
+
+#define BNX2_RPHY_FLAGS				0x370
+#define BNX2_RPHY_SERDES_LINK			0x374
+#define BNX2_RPHY_COPPER_LINK			0x378
+
+#define BNX2_ISCSI_INITIATOR			0x3dc
+#define BNX2_ISCSI_INITIATOR_EN			 0x00080000
+
+#define BNX2_ISCSI_MAX_CONN			0x3e4
+#define BNX2_ISCSI_MAX_CONN_MASK		 0xffff0000
+#define BNX2_ISCSI_MAX_CONN_SHIFT		 16
+
+#define HOST_VIEW_SHMEM_BASE			0x167c00
+
+#define DP_SHMEM_LINE(bp, offset)					\
+	netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n",	\
+		   offset,						\
+		   bnx2_shmem_rd(bp, offset),				\
+		   bnx2_shmem_rd(bp, offset + 4),			\
+		   bnx2_shmem_rd(bp, offset + 8),			\
+		   bnx2_shmem_rd(bp, offset + 12))
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h
new file mode 100644
index 0000000..b0f2cca
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2_fw.h
@@ -0,0 +1,89 @@
+/* bnx2_fw.h: QLogic bnx2 network driver.
+ *
+ * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+/* Initialized Values for the Completion Processor. */
+static const struct cpu_reg cpu_reg_com = {
+	.mode = BNX2_COM_CPU_MODE,
+	.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA,
+	.state = BNX2_COM_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_COM_CPU_REG_FILE,
+	.evmask = BNX2_COM_CPU_EVENT_MASK,
+	.pc = BNX2_COM_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_COM_CPU_INSTRUCTION,
+	.bp = BNX2_COM_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_COM_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values the Command Processor. */
+static const struct cpu_reg cpu_reg_cp = {
+	.mode = BNX2_CP_CPU_MODE,
+	.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA,
+	.state = BNX2_CP_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_CP_CPU_REG_FILE,
+	.evmask = BNX2_CP_CPU_EVENT_MASK,
+	.pc = BNX2_CP_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_CP_CPU_INSTRUCTION,
+	.bp = BNX2_CP_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_CP_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the RX Processor. */
+static const struct cpu_reg cpu_reg_rxp = {
+	.mode = BNX2_RXP_CPU_MODE,
+	.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA,
+	.state = BNX2_RXP_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_RXP_CPU_REG_FILE,
+	.evmask = BNX2_RXP_CPU_EVENT_MASK,
+	.pc = BNX2_RXP_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_RXP_CPU_INSTRUCTION,
+	.bp = BNX2_RXP_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_RXP_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the TX Patch-up Processor. */
+static const struct cpu_reg cpu_reg_tpat = {
+	.mode = BNX2_TPAT_CPU_MODE,
+	.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA,
+	.state = BNX2_TPAT_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_TPAT_CPU_REG_FILE,
+	.evmask = BNX2_TPAT_CPU_EVENT_MASK,
+	.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_TPAT_CPU_INSTRUCTION,
+	.bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_TPAT_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the TX Processor. */
+static const struct cpu_reg cpu_reg_txp = {
+	.mode = BNX2_TXP_CPU_MODE,
+	.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA,
+	.state = BNX2_TXP_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_TXP_CPU_REG_FILE,
+	.evmask = BNX2_TXP_CPU_EVENT_MASK,
+	.pc = BNX2_TXP_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_TXP_CPU_INSTRUCTION,
+	.bp = BNX2_TXP_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_TXP_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
new file mode 100644
index 0000000..116762d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+obj-$(CONFIG_BNX2X) += bnx2x.o
+
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
new file mode 100644
index 0000000..b5e64b0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -0,0 +1,2624 @@
+/* bnx2x.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_H
+#define BNX2X_H
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/pci_regs.h>
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+
+/* compilation time flags */
+
+/* define this to make the driver freeze on error to allow getting debug info
+ * (you will need to reboot afterwards) */
+/* #define BNX2X_STOP_ON_ERROR */
+
+#define DRV_MODULE_VERSION      "1.712.30-0"
+#define DRV_MODULE_RELDATE      "2014/02/10"
+#define BNX2X_BC_VER            0x040200
+
+#if defined(CONFIG_DCB)
+#define BCM_DCBNL
+#endif
+
+#include "bnx2x_hsi.h"
+
+#include "../cnic_if.h"
+
+#define BNX2X_MIN_MSIX_VEC_CNT(bp)		((bp)->min_msix_vec_cnt)
+
+#include <linux/mdio.h>
+
+#include "bnx2x_reg.h"
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
+#include "bnx2x_link.h"
+#include "bnx2x_sp.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+enum bnx2x_int_mode {
+	BNX2X_INT_MODE_MSIX,
+	BNX2X_INT_MODE_INTX,
+	BNX2X_INT_MODE_MSI
+};
+
+/* error/debug prints */
+
+#define DRV_MODULE_NAME		"bnx2x"
+
+/* for messages that are currently off */
+#define BNX2X_MSG_OFF			0x0
+#define BNX2X_MSG_MCP			0x0010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS			0x0020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM			0x0040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE			0x0080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP			0x0100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP			0x0200000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_IOV			0x0800000
+#define BNX2X_MSG_PTP			0x1000000
+#define BNX2X_MSG_IDLE			0x2000000 /* used for idle check*/
+#define BNX2X_MSG_ETHTOOL		0x4000000
+#define BNX2X_MSG_DCB			0x8000000
+
+/* regular debug print */
+#define DP_INNER(fmt, ...)					\
+	pr_notice("[%s:%d(%s)]" fmt,				\
+		  __func__, __LINE__,				\
+		  bp->dev ? (bp->dev->name) : "?",		\
+		  ##__VA_ARGS__);
+
+#define DP(__mask, fmt, ...)					\
+do {								\
+	if (unlikely(bp->msg_enable & (__mask)))		\
+		DP_INNER(fmt, ##__VA_ARGS__);			\
+} while (0)
+
+#define DP_AND(__mask, fmt, ...)				\
+do {								\
+	if (unlikely((bp->msg_enable & (__mask)) == __mask))	\
+		DP_INNER(fmt, ##__VA_ARGS__);			\
+} while (0)
+
+#define DP_CONT(__mask, fmt, ...)				\
+do {								\
+	if (unlikely(bp->msg_enable & (__mask)))		\
+		pr_cont(fmt, ##__VA_ARGS__);			\
+} while (0)
+
+/* errors debug print */
+#define BNX2X_DBG_ERR(fmt, ...)					\
+do {								\
+	if (unlikely(netif_msg_probe(bp)))			\
+		pr_err("[%s:%d(%s)]" fmt,			\
+		       __func__, __LINE__,			\
+		       bp->dev ? (bp->dev->name) : "?",		\
+		       ##__VA_ARGS__);				\
+} while (0)
+
+/* for errors (never masked) */
+#define BNX2X_ERR(fmt, ...)					\
+do {								\
+	pr_err("[%s:%d(%s)]" fmt,				\
+	       __func__, __LINE__,				\
+	       bp->dev ? (bp->dev->name) : "?",			\
+	       ##__VA_ARGS__);					\
+} while (0)
+
+#define BNX2X_ERROR(fmt, ...)					\
+	pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+/* before we have a dev->name use dev_info() */
+#define BNX2X_DEV_INFO(fmt, ...)				 \
+do {								 \
+	if (unlikely(netif_msg_probe(bp)))			 \
+		dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__);	 \
+} while (0)
+
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
+#ifdef BNX2X_STOP_ON_ERROR
+#define bnx2x_panic()				\
+do {						\
+	bp->panic = 1;				\
+	BNX2X_ERR("driver assert\n");		\
+	bnx2x_panic_dump(bp, true);		\
+} while (0)
+#else
+#define bnx2x_panic()				\
+do {						\
+	bp->panic = 1;				\
+	BNX2X_ERR("driver assert\n");		\
+	bnx2x_panic_dump(bp, false);		\
+} while (0)
+#endif
+
+#define bnx2x_mc_addr(ha)      ((ha)->addr)
+#define bnx2x_uc_addr(ha)      ((ha)->addr)
+
+#define U64_LO(x)			((u32)(((u64)(x)) & 0xffffffff))
+#define U64_HI(x)			((u32)(((u64)(x)) >> 32))
+#define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
+
+#define REG_ADDR(bp, offset)		((bp->regview) + (offset))
+
+#define REG_RD(bp, offset)		readl(REG_ADDR(bp, offset))
+#define REG_RD8(bp, offset)		readb(REG_ADDR(bp, offset))
+#define REG_RD16(bp, offset)		readw(REG_ADDR(bp, offset))
+
+#define REG_WR(bp, offset, val)		writel((u32)val, REG_ADDR(bp, offset))
+#define REG_WR8(bp, offset, val)	writeb((u8)val, REG_ADDR(bp, offset))
+#define REG_WR16(bp, offset, val)	writew((u16)val, REG_ADDR(bp, offset))
+
+#define REG_RD_IND(bp, offset)		bnx2x_reg_rd_ind(bp, offset)
+#define REG_WR_IND(bp, offset, val)	bnx2x_reg_wr_ind(bp, offset, val)
+
+#define REG_RD_DMAE(bp, offset, valp, len32) \
+	do { \
+		bnx2x_read_dmae(bp, offset, len32);\
+		memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
+	} while (0)
+
+#define REG_WR_DMAE(bp, offset, valp, len32) \
+	do { \
+		memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
+		bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
+				 offset, len32); \
+	} while (0)
+
+#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
+	REG_WR_DMAE(bp, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
+	do { \
+		memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
+		bnx2x_write_big_buf_wb(bp, addr, len32); \
+	} while (0)
+
+#define SHMEM_ADDR(bp, field)		(bp->common.shmem_base + \
+					 offsetof(struct shmem_region, field))
+#define SHMEM_RD(bp, field)		REG_RD(bp, SHMEM_ADDR(bp, field))
+#define SHMEM_WR(bp, field, val)	REG_WR(bp, SHMEM_ADDR(bp, field), val)
+
+#define SHMEM2_ADDR(bp, field)		(bp->common.shmem2_base + \
+					 offsetof(struct shmem2_region, field))
+#define SHMEM2_RD(bp, field)		REG_RD(bp, SHMEM2_ADDR(bp, field))
+#define SHMEM2_WR(bp, field, val)	REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_ADDR(bp, field)		(bp->common.mf_cfg_base + \
+					 offsetof(struct mf_cfg, field))
+#define MF2_CFG_ADDR(bp, field)		(bp->common.mf2_cfg_base + \
+					 offsetof(struct mf2_cfg, field))
+
+#define MF_CFG_RD(bp, field)		REG_RD(bp, MF_CFG_ADDR(bp, field))
+#define MF_CFG_WR(bp, field, val)	REG_WR(bp,\
+					       MF_CFG_ADDR(bp, field), (val))
+#define MF2_CFG_RD(bp, field)		REG_RD(bp, MF2_CFG_ADDR(bp, field))
+
+#define SHMEM2_HAS(bp, field)		((bp)->common.shmem2_base &&	\
+					 (SHMEM2_RD((bp), size) >	\
+					 offsetof(struct shmem2_region, field)))
+
+#define EMAC_RD(bp, reg)		REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val)		REG_WR(bp, emac_base + reg, val)
+
+/* SP SB indices */
+
+/* General SP events - stats query, cfc delete, etc  */
+#define HC_SP_INDEX_ETH_DEF_CONS		3
+
+/* EQ completions */
+#define HC_SP_INDEX_EQ_CONS			7
+
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS		6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS		4
+/* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS		5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS	1
+
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+	(&bp->def_status_blk->sp_sb.\
+	index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+	(&bp->def_status_blk->sp_sb.\
+	index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
+/**
+ *  CIDs and CLIDs:
+ *  CLIDs below is a CLID for func 0, then the CLID for other
+ *  functions will be calculated by the formula:
+ *
+ *  FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
+ *
+ */
+enum {
+	BNX2X_ISCSI_ETH_CL_ID_IDX,
+	BNX2X_FCOE_ETH_CL_ID_IDX,
+	BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
+};
+
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp)	(BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
+					 (bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM				8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp)			(roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+					 UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp)		((UIO_ROUNDUP(bp) + UIO_DPM) % \
+					 (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp)		(UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp)		((UIO_DPM * 2) * \
+					 (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp)	(UIO_DPM_ALIGN(bp) + \
+					 (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted  - need this value for cid allocation */
+#define UIO_CID_PAD(bp)			(BNX2X_CNIC_START_ETH_CID(bp) - \
+					 BNX2X_1st_NON_L2_ETH_CID(bp))
+	/* iSCSI L2 */
+#define	BNX2X_ISCSI_ETH_CID(bp)		(BNX2X_CNIC_START_ETH_CID(bp))
+	/* FCoE L2 */
+#define	BNX2X_FCOE_ETH_CID(bp)		(BNX2X_CNIC_START_ETH_CID(bp) + 1)
+
+#define CNIC_SUPPORT(bp)		((bp)->cnic_support)
+#define CNIC_ENABLED(bp)		((bp)->cnic_enabled)
+#define CNIC_LOADED(bp)			((bp)->cnic_loaded)
+#define FCOE_INIT(bp)			((bp)->fcoe_init)
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define SM_RX_ID			0
+#define SM_TX_ID			1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX		1
+#define FIRST_TX_COS_INDEX		0
+
+/* rules for calculating the cids of tx-only connections */
+#define CID_TO_FP(cid, bp)		((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+				(cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* fp index inside class of service range */
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+			((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
+ */
+enum {
+	FCOE_TXQ_IDX_OFFSET,
+	FWD_TXQ_IDX_OFFSET,
+	OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp)	(BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#define FCOE_TXQ_IDX(bp)	(MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+
+/* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
+struct sw_rx_bd {
+	u8		*data;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+	struct sk_buff	*skb;
+	u16		first_bd;
+	u8		flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD		(1<<0)
+#define BNX2X_HAS_SECOND_PBD		(1<<1)
+};
+
+struct sw_rx_page {
+	struct page	*page;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	unsigned int	offset;
+};
+
+union db_prod {
+	struct doorbell_set_prod data;
+	u32		raw;
+};
+
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp)		(CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp)		(CHIP_IS_E1(bp) ? \
+					ETH_MAX_AGGREGATION_QUEUES_E1 :\
+					ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp)	(3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT		16
+#define DROPLESS_FC_HEADROOM	100
+
+/* MC hsi */
+#define BCM_PAGE_SHIFT		12
+#define BCM_PAGE_SIZE		(1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK		(~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_ALIGN(addr)	(((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
+
+#define PAGES_PER_SGE_SHIFT	0
+#define PAGES_PER_SGE		(1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SHIFT		12
+#define SGE_PAGE_SIZE		(1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK		(~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr)	(((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
+#define SGE_PAGES		(SGE_PAGE_SIZE * PAGES_PER_SGE)
+#define TPA_AGG_SIZE		min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
+					    SGE_PAGES), 0xffff)
+
+/* SGE ring related macros */
+#define NUM_RX_SGE_PAGES	2
+#define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
+#define NEXT_PAGE_SGE_DESC_CNT	2
+#define MAX_RX_SGE_CNT		(RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
+/* RX_SGE_CNT is promised to be a power of 2 */
+#define RX_SGE_MASK		(RX_SGE_CNT - 1)
+#define NUM_RX_SGE		(RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE		(NUM_RX_SGE - 1)
+#define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \
+				  (MAX_RX_SGE_CNT - 1)) ? \
+					(x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+					(x) + 1)
+#define RX_SGE(x)		((x) & MAX_RX_SGE)
+
+/*
+ * Number of required  SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ *    these aggregations will probably consume SGE immediately)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ *    after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ		(MAX_AGG_QS(bp) + \
+					(BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ		((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+						MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp)		(NUM_SGE_REQ + \
+				 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp)		(SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+/* Manipulate a bit vector defined as an array of u64 */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ		64
+#define BIT_VEC64_ELEM_SHIFT		6
+#define BIT_VEC64_ELEM_MASK		((u64)BIT_VEC64_ELEM_SZ - 1)
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+	do { \
+		el = ((el) | ((u64)0x1 << (bit))); \
+	} while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+	do { \
+		el = ((el) & (~((u64)0x1 << (bit)))); \
+	} while (0)
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+	__BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+			   (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+	__BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+			     (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+	(((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+	((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/* Creates a bitmask of all ones in less significant bits.
+   idx - index of the most significant bit in the created mask */
+#define BIT_VEC64_ONES_MASK(idx) \
+		(((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK	((u64)(~0))
+
+/*******************************************************/
+
+/* Number of u64 elements in SGE mask array */
+#define RX_SGE_MASK_LEN			(NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN_MASK		(RX_SGE_MASK_LEN - 1)
+#define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)
+
+union host_hc_status_block {
+	/* pointer to fp status block e1x */
+	struct host_hc_status_block_e1x *e1x_sb;
+	/* pointer to fp status block e2 */
+	struct host_hc_status_block_e2  *e2_sb;
+};
+
+struct bnx2x_agg_info {
+	/*
+	 * First aggregation buffer is a data buffer, the following - are pages.
+	 * We will preallocate the data buffer for each aggregation when
+	 * we open the interface and will replace the BD at the consumer
+	 * with this one when we receive the TPA_START CQE in order to
+	 * keep the Rx BD ring consistent.
+	 */
+	struct sw_rx_bd		first_buf;
+	u8			tpa_state;
+#define BNX2X_TPA_START			1
+#define BNX2X_TPA_STOP			2
+#define BNX2X_TPA_ERROR			3
+	u8			placement_offset;
+	u16			parsing_flags;
+	u16			vlan_tag;
+	u16			len_on_bd;
+	u32			rxhash;
+	enum pkt_hash_types	rxhash_type;
+	u16			gro_size;
+	u16			full_page;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct bnx2x_fp_txdata {
+
+	struct sw_tx_bd		*tx_buf_ring;
+
+	union eth_tx_bd_types	*tx_desc_ring;
+	dma_addr_t		tx_desc_mapping;
+
+	u32			cid;
+
+	union db_prod		tx_db;
+
+	u16			tx_pkt_prod;
+	u16			tx_pkt_cons;
+	u16			tx_bd_prod;
+	u16			tx_bd_cons;
+
+	unsigned long		tx_pkt;
+
+	__le16			*tx_cons_sb;
+
+	int			txq_index;
+	struct bnx2x_fastpath	*parent_fp;
+	int			tx_ring_size;
+};
+
+enum bnx2x_tpa_mode_t {
+	TPA_MODE_DISABLED,
+	TPA_MODE_LRO,
+	TPA_MODE_GRO
+};
+
+struct bnx2x_alloc_pool {
+	struct page	*page;
+	unsigned int	offset;
+};
+
+struct bnx2x_fastpath {
+	struct bnx2x		*bp; /* parent */
+
+	struct napi_struct	napi;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	unsigned long		busy_poll_state;
+#endif
+
+	union host_hc_status_block	status_blk;
+	/* chip independent shortcuts into sb structure */
+	__le16			*sb_index_values;
+	__le16			*sb_running_index;
+	/* chip independent shortcut into rx_prods_offset memory */
+	u32			ustorm_rx_prods_offset;
+
+	u32			rx_buf_size;
+	u32			rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */
+	dma_addr_t		status_blk_mapping;
+
+	enum bnx2x_tpa_mode_t	mode;
+
+	u8			max_cos; /* actual number of active tx coses */
+	struct bnx2x_fp_txdata	*txdata_ptr[BNX2X_MULTI_TX_COS];
+
+	struct sw_rx_bd		*rx_buf_ring;	/* BDs mappings ring */
+	struct sw_rx_page	*rx_page_ring;	/* SGE pages mappings ring */
+
+	struct eth_rx_bd	*rx_desc_ring;
+	dma_addr_t		rx_desc_mapping;
+
+	union eth_rx_cqe	*rx_comp_ring;
+	dma_addr_t		rx_comp_mapping;
+
+	/* SGE ring */
+	struct eth_rx_sge	*rx_sge_ring;
+	dma_addr_t		rx_sge_mapping;
+
+	u64			sge_mask[RX_SGE_MASK_LEN];
+
+	u32			cid;
+
+	__le16			fp_hc_idx;
+
+	u8			index;		/* number in fp array */
+	u8			rx_queue;	/* index for skb_record */
+	u8			cl_id;		/* eth client id */
+	u8			cl_qzone_id;
+	u8			fw_sb_id;	/* status block number in FW */
+	u8			igu_sb_id;	/* status block number in HW */
+
+	u16			rx_bd_prod;
+	u16			rx_bd_cons;
+	u16			rx_comp_prod;
+	u16			rx_comp_cons;
+	u16			rx_sge_prod;
+	/* The last maximal completed SGE */
+	u16			last_max_sge;
+	__le16			*rx_cons_sb;
+	unsigned long		rx_pkt,
+				rx_calls;
+
+	/* TPA related */
+	struct bnx2x_agg_info	*tpa_info;
+#ifdef BNX2X_STOP_ON_ERROR
+	u64			tpa_queue_used;
+#endif
+	/* The size is calculated using the following:
+	     sizeof name field from netdev structure +
+	     4 ('-Xx-' string) +
+	     4 (for the digits and to make it DWORD aligned) */
+#define FP_NAME_SIZE		(sizeof(((struct net_device *)0)->name) + 8)
+	char			name[FP_NAME_SIZE];
+
+	struct bnx2x_alloc_pool	page_pool;
+};
+
+#define bnx2x_fp(bp, nr, var)	((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp)	((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp)	(&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp)	(&((bp)->fp_stats[(fp)->index].eth_q_stats))
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+enum bnx2x_fp_state {
+	BNX2X_STATE_FP_NAPI	= BIT(0), /* NAPI handler owns the queue */
+
+	BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
+	BNX2X_STATE_FP_NAPI_REQ = BIT(1),
+
+	BNX2X_STATE_FP_POLL_BIT = 2,
+	BNX2X_STATE_FP_POLL     = BIT(2), /* busy_poll owns the queue */
+
+	BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
+};
+
+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+{
+	WRITE_ONCE(fp->busy_poll_state, 0);
+}
+
+/* called from the device poll routine to get ownership of a FP */
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+	unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
+
+	while (1) {
+		switch (old) {
+		case BNX2X_STATE_FP_POLL:
+			/* make sure bnx2x_fp_lock_poll() wont starve us */
+			set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
+				&fp->busy_poll_state);
+			/* fallthrough */
+		case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
+			return false;
+		default:
+			break;
+		}
+		prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
+		if (unlikely(prev != old)) {
+			old = prev;
+			continue;
+		}
+		return true;
+	}
+}
+
+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+	smp_wmb();
+	fp->busy_poll_state = 0;
+}
+
+/* called from bnx2x_low_latency_poll() */
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+	return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
+}
+
+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+	smp_mb__before_atomic();
+	clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
+}
+
+/* true if a socket is polling */
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+	return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
+}
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+	set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
+	return !bnx2x_fp_ll_polling(fp);
+
+}
+#else
+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+{
+}
+
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+	return true;
+}
+
+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+}
+
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+	return false;
+}
+
+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+}
+
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+	return false;
+}
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+	return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU	2500
+
+#define	FCOE_IDX_OFFSET		0
+
+#define FCOE_IDX(bp)		(BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+				 FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp)	(&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var)	(bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp)	(&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var)	(bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var)	(bnx2x_fcoe_fp(bp)-> \
+						txdata_ptr[FIRST_TX_COS_INDEX] \
+						->var)
+
+#define IS_ETH_FP(fp)		((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
+#define IS_FCOE_FP(fp)		((fp)->index == FCOE_IDX((fp)->bp))
+#define IS_FCOE_IDX(idx)	((idx) == FCOE_IDX(bp))
+
+/* MC hsi */
+#define MAX_FETCH_BD		13	/* HW max BDs per packet */
+#define RX_COPY_THRESH		92
+
+#define NUM_TX_RINGS		16
+#define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
+#define NEXT_PAGE_TX_DESC_CNT	1
+#define MAX_TX_DESC_CNT		(TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
+#define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD		(NUM_TX_BD - 1)
+#define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \
+				  (MAX_TX_DESC_CNT - 1)) ? \
+					(x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+					(x) + 1)
+#define TX_BD(x)		((x) & MAX_TX_BD)
+#define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)
+
+/* number of NEXT_PAGE descriptors may be required during placement */
+#define NEXT_CNT_PER_TX_PKT(bds)	\
+				(((bds) + MAX_TX_DESC_CNT - 1) / \
+				 MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
+/* max BDs per tx packet w/o next_pages:
+ * START_BD		- describes packed
+ * START_BD(splitted)	- includes unpaged data segment for GSO
+ * PARSING_BD		- for TSO and CSUM data
+ * PARSING_BD2		- for encapsulation data
+ * Frag BDs		- describes pages for frags
+ */
+#define BDS_PER_TX_PKT		4
+#define MAX_BDS_PER_TX_PKT	(MAX_SKB_FRAGS + BDS_PER_TX_PKT)
+/* max BDs per tx packet including next pages */
+#define MAX_DESC_PER_TX_PKT	(MAX_BDS_PER_TX_PKT + \
+				 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
+
+/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
+#define NUM_RX_RINGS		8
+#define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define NEXT_PAGE_RX_DESC_CNT	2
+#define MAX_RX_DESC_CNT		(RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
+#define RX_DESC_MASK		(RX_DESC_CNT - 1)
+#define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD		(NUM_RX_BD - 1)
+#define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ		BRB_SIZE(bp)
+#define NUM_BD_PG_REQ		((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+					      MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp)		(NUM_BD_REQ + \
+				 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+				 FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp)		(BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL		((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
+
+#define MIN_RX_SIZE_TPA_HW	(CHIP_IS_E1(bp) ? \
+					ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+					ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW   ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA		(max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA	(max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+								MIN_RX_AVAIL))
+
+#define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \
+				  (MAX_RX_DESC_CNT - 1)) ? \
+					(x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+					(x) + 1)
+#define RX_BD(x)		((x) & MAX_RX_BD)
+
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL	(sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS		(NUM_RX_RINGS * CQE_BD_REL)
+#define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define NEXT_PAGE_RCQ_DESC_CNT	1
+#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
+#define NUM_RCQ_BD		(RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD		(NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL		(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \
+				  (MAX_RCQ_DESC_CNT - 1)) ? \
+					(x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+					(x) + 1)
+#define RCQ_BD(x)		((x) & MAX_RCQ_BD)
+
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ		BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ		((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+					      MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp)		(NUM_RCQ_REQ + \
+				 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+				 FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp)		(RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+/* This is needed for determining of last_max */
+#define SUB_S16(a, b)		(s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b)		(s32)((s32)(a) - (s32)(b))
+
+#define BNX2X_SWCID_SHIFT	17
+#define BNX2X_SWCID_MASK	((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+/* used on a CID received from the HW */
+#define SW_CID(x)			(le32_to_cpu(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x)			(le32_to_cpu(x) >> \
+					COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr_hi), \
+						 le32_to_cpu((bd)->addr_lo))
+#define BD_UNMAP_LEN(bd)		(le16_to_cpu((bd)->nbytes))
+
+#define BNX2X_DB_MIN_SHIFT		3	/* 8 bytes */
+#define BNX2X_DB_SHIFT			3	/* 8 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
+#define DOORBELL(bp, cid, val) \
+	do { \
+		writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
+	} while (0)
+
+/* TX CSUM helpers */
+#define SKB_CS_OFF(skb)		(offsetof(struct tcphdr, check) - \
+				 skb->csum_offset)
+#define SKB_CS(skb)		(*(u16 *)(skb_transport_header(skb) + \
+					  skb->csum_offset))
+
+#define pbd_tcp_flags(tcp_hdr)	(ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
+
+#define XMIT_PLAIN		0
+#define XMIT_CSUM_V4		(1 << 0)
+#define XMIT_CSUM_V6		(1 << 1)
+#define XMIT_CSUM_TCP		(1 << 2)
+#define XMIT_GSO_V4		(1 << 3)
+#define XMIT_GSO_V6		(1 << 4)
+#define XMIT_CSUM_ENC_V4	(1 << 5)
+#define XMIT_CSUM_ENC_V6	(1 << 6)
+#define XMIT_GSO_ENC_V4		(1 << 7)
+#define XMIT_GSO_ENC_V6		(1 << 8)
+
+#define XMIT_CSUM_ENC		(XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC		(XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+
+#define XMIT_CSUM		(XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO		(XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
+
+/* stuff added to make the code fit 80Col */
+#define CQE_TYPE(cqe_fp_flags)	 ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+#define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+				(((le16_to_cpu(flags) & \
+				   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+				  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
+				 == PRS_FLAG_OVERETH_IPV4)
+#define BNX2X_RX_SUM_FIX(cqe) \
+	BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
+
+#define FP_USB_FUNC_OFF	\
+			offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF	\
+			offsetof(struct cstorm_status_block_c, func)
+
+#define HC_INDEX_ETH_RX_CQ_CONS		1
+
+#define HC_INDEX_OOO_TX_CQ_CONS		4
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+#define BNX2X_RX_SB_INDEX \
+	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
+
+#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
+
+#define BNX2X_TX_SB_INDEX_COS0 \
+	(&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
+
+/* end of fast path */
+
+/* common */
+
+struct bnx2x_common {
+
+	u32			chip_id;
+/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_ID(bp)			(bp->common.chip_id & 0xfffffff0)
+
+#define CHIP_NUM(bp)			(bp->common.chip_id >> 16)
+#define CHIP_NUM_57710			0x164e
+#define CHIP_NUM_57711			0x164f
+#define CHIP_NUM_57711E			0x1650
+#define CHIP_NUM_57712			0x1662
+#define CHIP_NUM_57712_MF		0x1663
+#define CHIP_NUM_57712_VF		0x166f
+#define CHIP_NUM_57713			0x1651
+#define CHIP_NUM_57713E			0x1652
+#define CHIP_NUM_57800			0x168a
+#define CHIP_NUM_57800_MF		0x16a5
+#define CHIP_NUM_57800_VF		0x16a9
+#define CHIP_NUM_57810			0x168e
+#define CHIP_NUM_57810_MF		0x16ae
+#define CHIP_NUM_57810_VF		0x16af
+#define CHIP_NUM_57811			0x163d
+#define CHIP_NUM_57811_MF		0x163e
+#define CHIP_NUM_57811_VF		0x163f
+#define CHIP_NUM_57840_OBSOLETE		0x168d
+#define CHIP_NUM_57840_MF_OBSOLETE	0x16ab
+#define CHIP_NUM_57840_4_10		0x16a1
+#define CHIP_NUM_57840_2_20		0x16a2
+#define CHIP_NUM_57840_MF		0x16a4
+#define CHIP_NUM_57840_VF		0x16ad
+#define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
+#define CHIP_IS_57711(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711E)
+#define CHIP_IS_57712(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712_VF)
+#define CHIP_IS_57712_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_VF)
+#define CHIP_IS_57810(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_VF)
+#define CHIP_IS_57811(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811_VF)
+#define CHIP_IS_57840(bp)		\
+		((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
+		 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
+		 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
+#define CHIP_IS_57840_MF(bp)	((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
+				 (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
+#define CHIP_IS_57840_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840_VF)
+#define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
+					 CHIP_IS_57711E(bp))
+#define CHIP_IS_57811xx(bp)		(CHIP_IS_57811(bp) || \
+					 CHIP_IS_57811_MF(bp) || \
+					 CHIP_IS_57811_VF(bp))
+#define CHIP_IS_E2(bp)			(CHIP_IS_57712(bp) || \
+					 CHIP_IS_57712_MF(bp) || \
+					 CHIP_IS_57712_VF(bp))
+#define CHIP_IS_E3(bp)			(CHIP_IS_57800(bp) || \
+					 CHIP_IS_57800_MF(bp) || \
+					 CHIP_IS_57800_VF(bp) || \
+					 CHIP_IS_57810(bp) || \
+					 CHIP_IS_57810_MF(bp) || \
+					 CHIP_IS_57810_VF(bp) || \
+					 CHIP_IS_57811xx(bp) || \
+					 CHIP_IS_57840(bp) || \
+					 CHIP_IS_57840_MF(bp) || \
+					 CHIP_IS_57840_VF(bp))
+#define CHIP_IS_E1x(bp)			(CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
+#define USES_WARPCORE(bp)		(CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET			(!CHIP_IS_E1(bp))
+
+#define CHIP_REV_SHIFT			12
+#define CHIP_REV_MASK			(0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp)		(bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax			(0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx			(0x1 << CHIP_REV_SHIFT)
+/* assume maximum 5 revisions */
+#define CHIP_REV_IS_SLOW(bp)		(CHIP_REV_VAL(bp) > 0x00005000)
+/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
+#define CHIP_REV_IS_EMUL(bp)		((CHIP_REV_IS_SLOW(bp)) && \
+					 !(CHIP_REV_VAL(bp) & 0x00001000))
+/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
+#define CHIP_REV_IS_FPGA(bp)		((CHIP_REV_IS_SLOW(bp)) && \
+					 (CHIP_REV_VAL(bp) & 0x00001000))
+
+#define CHIP_TIME(bp)			((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
+					((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
+
+#define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
+#define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
+#define CHIP_REV_SIM(bp)		(((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+					   (CHIP_REV_SHIFT + 1)) \
+						<< CHIP_REV_SHIFT)
+#define CHIP_REV(bp)			(CHIP_REV_IS_SLOW(bp) ? \
+						CHIP_REV_SIM(bp) :\
+						CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp)		(CHIP_IS_E3(bp) && \
+					 (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp)		(CHIP_IS_E3(bp) && \
+					 (CHIP_REV(bp) == CHIP_REV_Ax))
+/* This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either the
+ * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
+ * registered for this port (which means that the user wants storage services).
+ * 2. During cnic-related load, to know if offload mode is already configured in
+ * the HW or needs to be configured.
+ * Since the transition from nic-mode to offload-mode in HW causes traffic
+ * corruption, nic-mode is configured only in ports on which storage services
+ * where never requested.
+ */
+#define CONFIGURE_NIC_MODE(bp)		(!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
+
+	int			flash_size;
+#define BNX2X_NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT		30000
+#define BNX2X_NVRAM_PAGE_SIZE			256
+
+	u32			shmem_base;
+	u32			shmem2_base;
+	u32			mf_cfg_base;
+	u32			mf2_cfg_base;
+
+	u32			hw_config;
+
+	u32			bc_ver;
+
+	u8			int_block;
+#define INT_BLOCK_HC			0
+#define INT_BLOCK_IGU			1
+#define INT_BLOCK_MODE_NORMAL		0
+#define INT_BLOCK_MODE_BW_COMP		2
+#define CHIP_INT_MODE_IS_NBC(bp)		\
+			(!CHIP_IS_E1x(bp) &&	\
+			!((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
+
+	u8			chip_port_mode;
+#define CHIP_4_PORT_MODE			0x0
+#define CHIP_2_PORT_MODE			0x1
+#define CHIP_PORT_MODE_NONE			0x2
+#define CHIP_MODE(bp)			(bp->common.chip_port_mode)
+#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+	u32			boot_mode;
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+#define MAX_IGU_ATTN_ACK_TO       100
+/* end of common */
+
+/* port */
+
+struct bnx2x_port {
+	u32			pmf;
+
+	u32			link_config[LINK_CONFIG_SIZE];
+
+	u32			supported[LINK_CONFIG_SIZE];
+
+	u32			advertising[LINK_CONFIG_SIZE];
+
+	u32			phy_addr;
+
+	/* used to synchronize phy accesses */
+	struct mutex		phy_mutex;
+
+	u32			port_stx;
+
+	struct nig_stats	old_nig_stats;
+};
+
+/* end of port */
+
+#define STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* slow path */
+#define BNX2X_MAX_NUM_OF_VFS	64
+#define BNX2X_VF_CID_WND	4 /* log num of queues per VF. HW config. */
+#define BNX2X_CIDS_PER_VF	(1 << BNX2X_VF_CID_WND)
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
+#define BNX2X_VF_CIDS		(BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID	BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES	64
+#define BNX2X_VF_ID_INVALID	0xFF
+
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE	512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ *    regular L2 queues is Y=X-1
+ * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ *    is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ *    slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ *    FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ *    L2 queue is supported. The cid for the FCoE L2 queue is always X.
+ */
+
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x		16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2		HC_SB_MAX_SB_E2
+
+union cdu_context {
+	struct eth_context eth;
+	char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW	2
+#define CDU_ILT_PAGE_SZ		(8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
+#define ILT_PAGE_CIDS		(CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#define CNIC_ISCSI_CID_MAX	256
+#define CNIC_FCOE_CID_MAX	2048
+#define CNIC_CID_MAX		(CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES		DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+
+#define QM_ILT_PAGE_SZ_HW	0
+#define QM_ILT_PAGE_SZ		(4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND		1024
+
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW	0
+#define TM_ILT_PAGE_SZ		(4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+#define TM_CONN_NUM		(BNX2X_FIRST_VF_CID + \
+				 BNX2X_VF_CIDS + \
+				 CNIC_ISCSI_CID_MAX)
+#define TM_ILT_SZ		(8 * TM_CONN_NUM)
+#define TM_ILT_LINES		DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW	0
+#define SRC_ILT_PAGE_SZ		(4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS		10
+#define SRC_CONN_NUM		(1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ		(sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ		SRC_ILT_SZ
+#define SRC_ILT_LINES		DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+#define MAX_DMAE_C		8
+
+/* DMA memory not used in fastpath */
+struct bnx2x_slowpath {
+	union {
+		struct mac_configuration_cmd		e1x;
+		struct eth_classify_rules_ramrod_data	e2;
+	} mac_rdata;
+
+	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_rdata;
+
+	union {
+		struct tstorm_eth_mac_filter_config	e1x;
+		struct eth_filter_rules_ramrod_data	e2;
+	} rx_mode_rdata;
+
+	union {
+		struct mac_configuration_cmd		e1;
+		struct eth_multicast_rules_ramrod_data  e2;
+	} mcast_rdata;
+
+	struct eth_rss_update_ramrod_data	rss_rdata;
+
+	/* Queue State related ramrods are always sent under rtnl_lock */
+	union {
+		struct client_init_ramrod_data  init_data;
+		struct client_update_ramrod_data update_data;
+		struct tpa_update_ramrod_data tpa_data;
+	} q_rdata;
+
+	union {
+		struct function_start_data	func_start;
+		/* pfc configuration for DCBX ramrod */
+		struct flow_control_configuration pfc_config;
+	} func_rdata;
+
+	/* afex ramrod can not be a part of func_rdata union because these
+	 * events might arrive in parallel to other events from func_rdata.
+	 * Therefore, if they would have been defined in the same union,
+	 * data can get corrupted.
+	 */
+	union {
+		struct afex_vif_list_ramrod_data	viflist_data;
+		struct function_update_data		func_update;
+	} func_afex_rdata;
+
+	/* used by dmae command executer */
+	struct dmae_command		dmae[MAX_DMAE_C];
+
+	u32				stats_comp;
+	union mac_stats			mac_stats;
+	struct nig_stats		nig_stats;
+	struct host_port_stats		port_stats;
+	struct host_func_stats		func_stats;
+
+	u32				wb_comp;
+	u32				wb_data[4];
+
+	union drv_info_to_mcp		drv_info_to_mcp;
+};
+
+#define bnx2x_sp(bp, var)		(&bp->slowpath->var)
+#define bnx2x_sp_mapping(bp, var) \
+		(bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS		8
+
+struct attn_route {
+	u32 sig[5];
+};
+
+struct iro {
+	u32 base;
+	u16 m1;
+	u16 m2;
+	u16 m3;
+	u16 size;
+};
+
+struct hw_context {
+	union cdu_context *vcxt;
+	dma_addr_t cxt_mapping;
+	size_t size;
+};
+
+/* forward */
+struct bnx2x_ilt;
+
+struct bnx2x_vfdb;
+
+enum bnx2x_recovery_state {
+	BNX2X_RECOVERY_DONE,
+	BNX2X_RECOVERY_INIT,
+	BNX2X_RECOVERY_WAIT,
+	BNX2X_RECOVERY_FAILED,
+	BNX2X_RECOVERY_NIC_LOADING
+};
+
+/*
+ * Event queue (EQ or event ring) MC hsi
+ * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
+ */
+#define NUM_EQ_PAGES		1
+#define EQ_DESC_CNT_PAGE	(BCM_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE	(EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC		(EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK		(NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL		(EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+
+/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x)		((((x) & EQ_DESC_MAX_PAGE) == \
+				  (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x)		((x) & EQ_DESC_MASK)
+
+#define BNX2X_EQ_INDEX \
+	(&bp->def_status_blk->sp_sb.\
+	index_values[HC_SP_INDEX_EQ_CONS])
+
+/* This is a data that will be used to create a link report message.
+ * We will keep the data used for the last link report in order
+ * to prevent reporting the same link parameters twice.
+ */
+struct bnx2x_link_report_data {
+	u16 line_speed;			/* Effective line speed */
+	unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+	BNX2X_LINK_REPORT_FD,		/* Full DUPLEX */
+	BNX2X_LINK_REPORT_LINK_DOWN,
+	BNX2X_LINK_REPORT_RX_FC_ON,
+	BNX2X_LINK_REPORT_TX_FC_ON,
+};
+
+enum {
+	BNX2X_PORT_QUERY_IDX,
+	BNX2X_PF_QUERY_IDX,
+	BNX2X_FCOE_QUERY_IDX,
+	BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+	struct stats_query_header hdr;
+	struct stats_query_entry query[FP_SB_MAX_E1x+
+		BNX2X_FIRST_QUEUE_QUERY_IDX];
+};
+
+struct bnx2x_fw_stats_data {
+	struct stats_counter		storm_counters;
+	struct per_port_stats		port;
+	struct per_pf_stats		pf;
+	struct fcoe_statistics_params	fcoe;
+	struct per_queue_stats		queue_stats[1];
+};
+
+/* Public slow path states */
+enum sp_rtnl_flag {
+	BNX2X_SP_RTNL_SETUP_TC,
+	BNX2X_SP_RTNL_TX_TIMEOUT,
+	BNX2X_SP_RTNL_FAN_FAILURE,
+	BNX2X_SP_RTNL_AFEX_F_UPDATE,
+	BNX2X_SP_RTNL_ENABLE_SRIOV,
+	BNX2X_SP_RTNL_VFPF_MCAST,
+	BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+	BNX2X_SP_RTNL_RX_MODE,
+	BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+	BNX2X_SP_RTNL_TX_STOP,
+	BNX2X_SP_RTNL_GET_DRV_VERSION,
+	BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+	BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+};
+
+enum bnx2x_iov_flag {
+	BNX2X_IOV_HANDLE_VF_MSG,
+	BNX2X_IOV_HANDLE_FLR,
+};
+
+struct bnx2x_prev_path_list {
+	struct list_head list;
+	u8 bus;
+	u8 slot;
+	u8 path;
+	u8 aer;
+	u8 undi;
+};
+
+struct bnx2x_sp_objs {
+	/* MACs object */
+	struct bnx2x_vlan_mac_obj mac_obj;
+
+	/* Queue State object */
+	struct bnx2x_queue_sp_obj q_obj;
+
+	/* VLANs object */
+	struct bnx2x_vlan_mac_obj vlan_obj;
+};
+
+struct bnx2x_fp_stats {
+	struct tstorm_per_queue_stats old_tclient;
+	struct ustorm_per_queue_stats old_uclient;
+	struct xstorm_per_queue_stats old_xclient;
+	struct bnx2x_eth_q_stats eth_q_stats;
+	struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
+enum {
+	SUB_MF_MODE_UNKNOWN = 0,
+	SUB_MF_MODE_UFP,
+	SUB_MF_MODE_NPAR1_DOT_5,
+	SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+	struct list_head link;
+	u16 vid;
+	bool hw;
+};
+
+struct bnx2x {
+	/* Fields used in the tx and intr/napi performance paths
+	 * are grouped together in the beginning of the structure
+	 */
+	struct bnx2x_fastpath	*fp;
+	struct bnx2x_sp_objs	*sp_objs;
+	struct bnx2x_fp_stats	*fp_stats;
+	struct bnx2x_fp_txdata	*bnx2x_txq;
+	void __iomem		*regview;
+	void __iomem		*doorbells;
+	u16			db_size;
+
+	u8			pf_num;	/* absolute PF number */
+	u8			pfid;	/* per-path PF number */
+	int			base_fw_ndsb; /**/
+#define BP_PATH(bp)			(CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp)			(bp->pfid & 1)
+#define BP_FUNC(bp)			(bp->pfid)
+#define BP_ABS_FUNC(bp)			(bp->pf_num)
+#define BP_VN(bp)			((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp)		(CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp)			(BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn)		(BP_PORT(bp) +\
+	  (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
+#define BP_FW_MB_IDX(bp)		BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+
+#ifdef CONFIG_BNX2X_SRIOV
+	/* protects vf2pf mailbox from simultaneous access */
+	struct mutex		vf2pf_mutex;
+	/* vf pf channel mailbox contains request and response buffers */
+	struct bnx2x_vf_mbx_msg	*vf2pf_mbox;
+	dma_addr_t		vf2pf_mbox_mapping;
+
+	/* we set aside a copy of the acquire response */
+	struct pfvf_acquire_resp_tlv acquire_resp;
+
+	/* bulletin board for messages from pf to vf */
+	union pf_vf_bulletin   *pf2vf_bulletin;
+	dma_addr_t		pf2vf_bulletin_mapping;
+
+	union pf_vf_bulletin		shadow_bulletin;
+	struct pf_vf_bulletin_content	old_bulletin;
+
+	u16 requested_nr_virtfn;
+#endif /* CONFIG_BNX2X_SRIOV */
+
+	struct net_device	*dev;
+	struct pci_dev		*pdev;
+
+	const struct iro	*iro_arr;
+#define IRO (bp->iro_arr)
+
+	enum bnx2x_recovery_state recovery_state;
+	int			is_leader;
+	struct msix_entry	*msix_table;
+
+	int			tx_ring_size;
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD		(ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE		60
+#define ETH_MAX_PACKET_SIZE		1500
+#define ETH_MAX_JUMBO_PACKET_SIZE	9600
+/* TCP with Timestamp Option (32) + IPv6 (40) */
+#define ETH_MAX_TPA_HEADER_SIZE		72
+
+	/* Max supported alignment is 256 (8 shift)
+	 * minimal alignment shift 6 is optimal for 57xxx HW performance
+	 */
+#define BNX2X_RX_ALIGN_SHIFT		max(6, min(8, L1_CACHE_SHIFT))
+
+	/* FW uses 2 Cache lines Alignment for start packet and size
+	 *
+	 * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+	 * at the end of skb->data, to avoid wasting a full cache line.
+	 * This reduces memory use (skb->truesize).
+	 */
+#define BNX2X_FW_RX_ALIGN_START	(1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END					\
+	max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT,			\
+	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define BNX2X_PXP_DRAM_ALIGN		(BNX2X_RX_ALIGN_SHIFT - 5)
+
+	struct host_sp_status_block *def_status_blk;
+#define DEF_SB_IGU_ID			16
+#define DEF_SB_ID			HC_SP_SB_ID
+	__le16			def_idx;
+	__le16			def_att_idx;
+	u32			attn_state;
+	struct attn_route	attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+	/* slow path ring */
+	struct eth_spe		*spq;
+	dma_addr_t		spq_mapping;
+	u16			spq_prod_idx;
+	struct eth_spe		*spq_prod_bd;
+	struct eth_spe		*spq_last_bd;
+	__le16			*dsb_sp_prod;
+	atomic_t		cq_spq_left; /* ETH_XXX ramrods credit */
+	/* used to synchronize spq accesses */
+	spinlock_t		spq_lock;
+
+	/* event queue */
+	union event_ring_elem	*eq_ring;
+	dma_addr_t		eq_mapping;
+	u16			eq_prod;
+	u16			eq_cons;
+	__le16			*eq_cons_sb;
+	atomic_t		eq_spq_left; /* COMMON_XXX ramrods credit */
+
+	/* Counter for marking that there is a STAT_QUERY ramrod pending */
+	u16			stats_pending;
+	/*  Counter for completed statistics ramrods */
+	u16			stats_comp;
+
+	/* End of fields used in the performance code paths */
+
+	int			panic;
+	int			msg_enable;
+
+	u32			flags;
+#define PCIX_FLAG			(1 << 0)
+#define PCI_32BIT_FLAG			(1 << 1)
+#define ONE_PORT_FLAG			(1 << 2)
+#define NO_WOL_FLAG			(1 << 3)
+#define USING_MSIX_FLAG			(1 << 5)
+#define USING_MSI_FLAG			(1 << 6)
+#define DISABLE_MSI_FLAG		(1 << 7)
+#define NO_MCP_FLAG			(1 << 9)
+#define MF_FUNC_DIS			(1 << 11)
+#define OWN_CNIC_IRQ			(1 << 12)
+#define NO_ISCSI_OOO_FLAG		(1 << 13)
+#define NO_ISCSI_FLAG			(1 << 14)
+#define NO_FCOE_FLAG			(1 << 15)
+#define BC_SUPPORTS_PFC_STATS		(1 << 17)
+#define TX_SWITCHING			(1 << 18)
+#define BC_SUPPORTS_FCOE_FEATURES	(1 << 19)
+#define USING_SINGLE_MSIX_FLAG		(1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF	(1 << 21)
+#define IS_VF_FLAG			(1 << 22)
+#define BC_SUPPORTS_RMMOD_CMD		(1 << 23)
+#define HAS_PHYS_PORT_ID		(1 << 24)
+#define AER_ENABLED			(1 << 25)
+#define PTP_SUPPORTED			(1 << 26)
+#define TX_TIMESTAMPING_EN		(1 << 27)
+
+#define BP_NOMCP(bp)			((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
+#define IS_VF(bp)			((bp)->flags & IS_VF_FLAG)
+#define IS_PF(bp)			(!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp)			false
+#define IS_PF(bp)			true
+#endif
+
+#define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
+#define NO_FCOE(bp)		((bp)->flags & NO_FCOE_FLAG)
+
+	u8			cnic_support;
+	bool			cnic_enabled;
+	bool			cnic_loaded;
+	struct cnic_eth_dev	*(*cnic_probe)(struct net_device *);
+
+	/* Flag that indicates that we can start looking for FCoE L2 queue
+	 * completions in the default status block.
+	 */
+	bool			fcoe_init;
+
+	int			mrrs;
+
+	struct delayed_work	sp_task;
+	struct delayed_work	iov_task;
+
+	atomic_t		interrupt_occurred;
+	struct delayed_work	sp_rtnl_task;
+
+	struct delayed_work	period_task;
+	struct timer_list	timer;
+	int			current_interval;
+
+	u16			fw_seq;
+	u16			fw_drv_pulse_wr_seq;
+	u32			func_stx;
+
+	struct link_params	link_params;
+	struct link_vars	link_vars;
+	u32			link_cnt;
+	struct bnx2x_link_report_data last_reported_link;
+
+	struct mdio_if_info	mdio;
+
+	struct bnx2x_common	common;
+	struct bnx2x_port	port;
+
+	struct cmng_init	cmng;
+
+	u32			mf_config[E1HVN_MAX];
+	u32			mf_ext_config;
+	u32			path_has_ovlan; /* E3 */
+	u16			mf_ov;
+	u8			mf_mode;
+#define IS_MF(bp)		(bp->mf_mode != 0)
+#define IS_MF_SI(bp)		(bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp)		(bp->mf_mode == MULTI_FUNCTION_SD)
+#define IS_MF_AFEX(bp)		(bp->mf_mode == MULTI_FUNCTION_AFEX)
+	u8			mf_sub_mode;
+#define IS_MF_UFP(bp)		(IS_MF_SD(bp) && \
+				 bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp)		(IS_MF_SD(bp) && \
+				 bp->mf_sub_mode == SUB_MF_MODE_BD)
+
+	u8			wol;
+
+	int			rx_ring_size;
+
+	u16			tx_quick_cons_trip_int;
+	u16			tx_quick_cons_trip;
+	u16			tx_ticks_int;
+	u16			tx_ticks;
+
+	u16			rx_quick_cons_trip_int;
+	u16			rx_quick_cons_trip;
+	u16			rx_ticks_int;
+	u16			rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT		(0xff*BNX2X_BTR)
+
+	u32			lin_cnt;
+
+	u16			state;
+#define BNX2X_STATE_CLOSED		0
+#define BNX2X_STATE_OPENING_WAIT4_LOAD	0x1000
+#define BNX2X_STATE_OPENING_WAIT4_PORT	0x2000
+#define BNX2X_STATE_OPEN		0x3000
+#define BNX2X_STATE_CLOSING_WAIT4_HALT	0x4000
+#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+
+#define BNX2X_STATE_DIAG		0xe000
+#define BNX2X_STATE_ERROR		0xf000
+
+#define BNX2X_MAX_PRIORITY		8
+	int			num_queues;
+	uint			num_ethernet_queues;
+	uint			num_cnic_queues;
+	int			disable_tpa;
+
+	u32			rx_mode;
+#define BNX2X_RX_MODE_NONE		0
+#define BNX2X_RX_MODE_NORMAL		1
+#define BNX2X_RX_MODE_ALLMULTI		2
+#define BNX2X_RX_MODE_PROMISC		3
+#define BNX2X_MAX_MULTICAST		64
+
+	u8			igu_dsb_id;
+	u8			igu_base_sb;
+	u8			igu_sb_cnt;
+	u8			min_msix_vec_cnt;
+
+	u32			igu_base_addr;
+	dma_addr_t		def_status_blk_mapping;
+
+	struct bnx2x_slowpath	*slowpath;
+	dma_addr_t		slowpath_mapping;
+
+	/* Mechanism protecting the drv_info_to_mcp */
+	struct mutex		drv_info_mutex;
+	bool			drv_info_mng_owner;
+
+	/* Total number of FW statistics requests */
+	u8			fw_stats_num;
+
+	/*
+	 * This is a memory buffer that will contain both statistics
+	 * ramrod request and data.
+	 */
+	void			*fw_stats;
+	dma_addr_t		fw_stats_mapping;
+
+	/*
+	 * FW statistics request shortcut (points at the
+	 * beginning of fw_stats buffer).
+	 */
+	struct bnx2x_fw_stats_req	*fw_stats_req;
+	dma_addr_t			fw_stats_req_mapping;
+	int				fw_stats_req_sz;
+
+	/*
+	 * FW statistics data shortcut (points at the beginning of
+	 * fw_stats buffer + fw_stats_req_sz).
+	 */
+	struct bnx2x_fw_stats_data	*fw_stats_data;
+	dma_addr_t			fw_stats_data_mapping;
+	int				fw_stats_data_sz;
+
+	/* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
+	 * context size we need 8 ILT entries.
+	 */
+#define ILT_MAX_L2_LINES	32
+	struct hw_context	context[ILT_MAX_L2_LINES];
+
+	struct bnx2x_ilt	*ilt;
+#define BP_ILT(bp)		((bp)->ilt)
+#define ILT_MAX_LINES		256
+/*
+ * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
+ * to CNIC.
+ */
+#define BNX2X_MAX_RSS_COUNT(bp)	((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
+
+/*
+ * Maximum CID count that might be required by the bnx2x:
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
+ */
+
+#define BNX2X_L2_CID_COUNT(bp)	(BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+				+ CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
+#define BNX2X_L2_MAX_CID(bp)	(BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+				+ CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
+#define L2_ILT_LINES(bp)	(DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
+					ILT_PAGE_CIDS))
+
+	int			qm_cid_count;
+
+	bool			dropless_fc;
+
+	void			*t2;
+	dma_addr_t		t2_mapping;
+	struct cnic_ops	__rcu	*cnic_ops;
+	void			*cnic_data;
+	u32			cnic_tag;
+	struct cnic_eth_dev	cnic_eth_dev;
+	union host_hc_status_block cnic_sb;
+	dma_addr_t		cnic_sb_mapping;
+	struct eth_spe		*cnic_kwq;
+	struct eth_spe		*cnic_kwq_prod;
+	struct eth_spe		*cnic_kwq_cons;
+	struct eth_spe		*cnic_kwq_last;
+	u16			cnic_kwq_pending;
+	u16			cnic_spq_pending;
+	u8			fip_mac[ETH_ALEN];
+	struct mutex		cnic_mutex;
+	struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+	/* Start index of the "special" (CNIC related) L2 clients */
+	u8				cnic_base_cl_id;
+
+	int			dmae_ready;
+	/* used to synchronize dmae accesses */
+	spinlock_t		dmae_lock;
+
+	/* used to protect the FW mail box */
+	struct mutex		fw_mb_mutex;
+
+	/* used to synchronize stats collecting */
+	int			stats_state;
+
+	/* used for synchronization of concurrent threads statistics handling */
+	struct semaphore	stats_lock;
+
+	/* used by dmae command loader */
+	struct dmae_command	stats_dmae;
+	int			executer_idx;
+
+	u16			stats_counter;
+	struct bnx2x_eth_stats	eth_stats;
+	struct host_func_stats		func_stats;
+	struct bnx2x_eth_stats_old	eth_stats_old;
+	struct bnx2x_net_stats_old	net_stats_old;
+	struct bnx2x_fw_port_stats_old	fw_stats_old;
+	bool			stats_init;
+
+	struct z_stream_s	*strm;
+	void			*gunzip_buf;
+	dma_addr_t		gunzip_mapping;
+	int			gunzip_outlen;
+#define FW_BUF_SIZE			0x8000
+#define GUNZIP_BUF(bp)			(bp->gunzip_buf)
+#define GUNZIP_PHYS(bp)			(bp->gunzip_mapping)
+#define GUNZIP_OUTLEN(bp)		(bp->gunzip_outlen)
+
+	struct raw_op		*init_ops;
+	/* Init blocks offsets inside init_ops */
+	u16			*init_ops_offsets;
+	/* Data blob - has 32 bit granularity */
+	u32			*init_data;
+	u32			init_mode_flags;
+#define INIT_MODE_FLAGS(bp)	(bp->init_mode_flags)
+	/* Zipped PRAM blobs - raw data */
+	const u8		*tsem_int_table_data;
+	const u8		*tsem_pram_data;
+	const u8		*usem_int_table_data;
+	const u8		*usem_pram_data;
+	const u8		*xsem_int_table_data;
+	const u8		*xsem_pram_data;
+	const u8		*csem_int_table_data;
+	const u8		*csem_pram_data;
+#define INIT_OPS(bp)			(bp->init_ops)
+#define INIT_OPS_OFFSETS(bp)		(bp->init_ops_offsets)
+#define INIT_DATA(bp)			(bp->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(bp)	(bp->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(bp)		(bp->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(bp)	(bp->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(bp)		(bp->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(bp)	(bp->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(bp)		(bp->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(bp)	(bp->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(bp)		(bp->csem_pram_data)
+
+#define PHY_FW_VER_LEN			20
+	char			fw_ver[32];
+	const struct firmware	*firmware;
+
+	struct bnx2x_vfdb	*vfdb;
+#define IS_SRIOV(bp)		((bp)->vfdb)
+
+	/* DCB support on/off */
+	u16 dcb_state;
+#define BNX2X_DCB_STATE_OFF			0
+#define BNX2X_DCB_STATE_ON			1
+
+	/* DCBX engine mode */
+	int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF			0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF		1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON		2
+#define BNX2X_DCBX_ENABLED_INVALID		(-1)
+
+	bool dcbx_mode_uset;
+
+	struct bnx2x_config_dcbx_params		dcbx_config_params;
+	struct bnx2x_dcbx_port_params		dcbx_port_params;
+	int					dcb_version;
+
+	/* CAM credit pools */
+	struct bnx2x_credit_pool_obj		vlans_pool;
+
+	struct bnx2x_credit_pool_obj		macs_pool;
+
+	/* RX_MODE object */
+	struct bnx2x_rx_mode_obj		rx_mode_obj;
+
+	/* MCAST object */
+	struct bnx2x_mcast_obj			mcast_obj;
+
+	/* RSS configuration object */
+	struct bnx2x_rss_config_obj		rss_conf_obj;
+
+	/* Function State controlling object */
+	struct bnx2x_func_sp_obj		func_obj;
+
+	unsigned long				sp_state;
+
+	/* operation indication for the sp_rtnl task */
+	unsigned long				sp_rtnl_state;
+
+	/* Indication of the IOV tasks */
+	unsigned long				iov_task_state;
+
+	/* DCBX Negotiation results */
+	struct dcbx_features			dcbx_local_feat;
+	u32					dcbx_error;
+
+#ifdef BCM_DCBNL
+	struct dcbx_features			dcbx_remote_feat;
+	u32					dcbx_remote_flags;
+#endif
+	/* AFEX: store default vlan used */
+	int					afex_def_vlan_tag;
+	enum mf_cfg_afex_vlan_mode		afex_vlan_mode;
+	u32					pending_max;
+
+	/* multiple tx classes of service */
+	u8					max_cos;
+
+	/* priority to cos mapping */
+	u8					prio_to_cos[8];
+
+	int fp_array_size;
+	u32 dump_preset_idx;
+
+	u8					phys_port_id[ETH_ALEN];
+
+	/* PTP related context */
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_clock_info;
+	struct work_struct ptp_task;
+	struct cyclecounter cyclecounter;
+	struct timecounter timecounter;
+	bool timecounter_init_done;
+	struct sk_buff *ptp_tx_skb;
+	unsigned long ptp_tx_start;
+	bool hwtstamp_ioctl_called;
+	u16 tx_type;
+	u16 rx_filter;
+
+	struct bnx2x_link_report_data		vf_link_vars;
+	struct list_head vlan_reg;
+	u16 vlan_cnt;
+	u16 vlan_credit;
+	u16 vxlan_dst_port;
+	u8 vxlan_dst_port_count;
+	bool accept_any_vlan;
+};
+
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
+#define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp)	(BNX2X_NUM_QUEUES(bp) - \
+					 (bp)->num_cnic_queues)
+#define BNX2X_NUM_RX_QUEUES(bp)	BNX2X_NUM_QUEUES(bp)
+
+#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
+
+#define BNX2X_MAX_QUEUES(bp)	BNX2X_MAX_RSS_COUNT(bp)
+/* #define is_eth_multi(bp)	(BNX2X_NUM_ETH_QUEUES(bp) > 1) */
+
+#define RSS_IPV4_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
+
+#define RSS_IPV4_TCP_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
+
+#define RSS_IPV6_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
+
+#define RSS_IPV6_TCP_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
+
+struct bnx2x_func_init_params {
+	/* dma */
+	bool		spq_active;
+	dma_addr_t	spq_map;
+	u16		spq_prod;
+
+	u16		func_id;	/* abs fid */
+	u16		pf_id;
+};
+
+#define for_each_cnic_queue(bp, var) \
+	for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+	     (var)++) \
+		if (skip_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_eth_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+	for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_queue(bp, var))	\
+			continue;		\
+		else
+
+/* Skip forwarding FP */
+#define for_each_valid_rx_queue(bp, var)			\
+	for ((var) = 0;						\
+	     (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) :	\
+		      BNX2X_NUM_ETH_QUEUES(bp));		\
+	     (var)++)						\
+		if (skip_rx_queue(bp, var))			\
+			continue;				\
+		else
+
+#define for_each_rx_queue_cnic(bp, var) \
+	for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+	     (var)++) \
+		if (skip_rx_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_rx_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_rx_queue(bp, var))	\
+			continue;		\
+		else
+
+/* Skip OOO FP */
+#define for_each_valid_tx_queue(bp, var)			\
+	for ((var) = 0;						\
+	     (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) :	\
+		      BNX2X_NUM_ETH_QUEUES(bp));		\
+	     (var)++)						\
+		if (skip_tx_queue(bp, var))			\
+			continue;				\
+		else
+
+#define for_each_tx_queue_cnic(bp, var) \
+	for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+	     (var)++) \
+		if (skip_tx_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_tx_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_tx_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_nondefault_queue(bp, var) \
+	for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_cos_in_tx_queue(fp, var) \
+	for ((var) = 0; (var) < (fp)->max_cos; (var)++)
+
+/* skip rx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/* skip tx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+#define skip_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp:			driver handle
+ * @mac:		MAC to configure
+ * @obj:		MAC object handle
+ * @set:		if 'true' add a new MAC, otherwise - delete
+ * @mac_type:		the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags:	RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+		      struct bnx2x_vlan_mac_obj *obj, bool set,
+		      int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags);
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp:			driver handle
+ * @mac_obj:		MAC object handle
+ * @mac_type:		type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp:	if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_obj *mac_obj,
+		       int mac_type, bool wait_for_comp);
+
+/* Init Function API  */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+		    u8 vf_valid, int fw_sb_id, int igu_sb_id);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
+
+/* dmae */
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+		      u32 len32);
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+		      bool with_comp, u8 comp_type);
+
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+			       u8 src_type, u8 dst_type);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+			       u32 *comp);
+
+/* FLR related routines */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+				    char *msg, u32 poll_cnt);
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+		  u32 data_hi, u32 data_lo, int cmd_type);
+void bnx2x_update_coalesce(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+
+bool bnx2x_port_after_undi(struct bnx2x *bp);
+
+static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
+			   int wait)
+{
+	u32 val;
+
+	do {
+		val = REG_RD(bp, reg);
+		if (val == expected)
+			break;
+		ms -= wait;
+		msleep(wait);
+
+	} while (ms > 0);
+
+	return val;
+}
+
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+			    bool is_pf);
+
+#define BNX2X_ILT_ZALLOC(x, y, size)					\
+	x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
+
+#define BNX2X_ILT_FREE(x, y, size) \
+	do { \
+		if (x) { \
+			dma_free_coherent(&bp->pdev->dev, size, x, y); \
+			x = NULL; \
+			y = 0; \
+		} \
+	} while (0)
+
+#define ILOG2(x)	(ilog2((x)))
+
+#define ILT_NUM_PAGE_ENTRIES	(3072)
+/* In 57710/11 we use whole table since we have 8 func
+ * In 57712 we have only 4 func, but use same size per func, then only half of
+ * the table in use
+ */
+#define ILT_PER_FUNC		(ILT_NUM_PAGE_ENTRIES/8)
+
+#define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
+
+/* load/unload mode */
+#define LOAD_NORMAL			0
+#define LOAD_OPEN			1
+#define LOAD_DIAG			2
+#define LOAD_LOOPBACK_EXT		3
+#define UNLOAD_NORMAL			0
+#define UNLOAD_CLOSE			1
+#define UNLOAD_RECOVERY			2
+
+/* DMAE command defines */
+#define DMAE_TIMEOUT			-1
+#define DMAE_PCI_ERROR			-2	/* E2 and onward */
+#define DMAE_NOT_RDY			-3
+#define DMAE_PCI_ERR_FLAG		0x80000000
+
+#define DMAE_SRC_PCI			0
+#define DMAE_SRC_GRC			1
+
+#define DMAE_DST_NONE			0
+#define DMAE_DST_PCI			1
+#define DMAE_DST_GRC			2
+
+#define DMAE_COMP_PCI			0
+#define DMAE_COMP_GRC			1
+
+/* E2 and onward - PCI error handling in the completion */
+
+#define DMAE_COMP_REGULAR		0
+#define DMAE_COM_SET_ERR		1
+
+#define DMAE_CMD_SRC_PCI		(DMAE_SRC_PCI << \
+						DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC		(DMAE_SRC_GRC << \
+						DMAE_COMMAND_SRC_SHIFT)
+
+#define DMAE_CMD_DST_PCI		(DMAE_DST_PCI << \
+						DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC		(DMAE_DST_GRC << \
+						DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI		(DMAE_COMP_PCI << \
+						DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC		(DMAE_COMP_GRC << \
+						DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_C_ENABLE		DMAE_COMMAND_C_TYPE_ENABLE
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP	(0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP	(1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP	(2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP	(3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0			0
+#define DMAE_CMD_PORT_1			DMAE_COMMAND_PORT
+
+#define DMAE_CMD_SRC_RESET		DMAE_COMMAND_SRC_RESET
+#define DMAE_CMD_DST_RESET		DMAE_COMMAND_DST_RESET
+#define DMAE_CMD_E1HVN_SHIFT		DMAE_COMMAND_E1HVN_SHIFT
+
+#define DMAE_SRC_PF			0
+#define DMAE_SRC_VF			1
+
+#define DMAE_DST_PF			0
+#define DMAE_DST_VF			1
+
+#define DMAE_C_SRC			0
+#define DMAE_C_DST			1
+
+#define DMAE_LEN32_RD_MAX		0x80
+#define DMAE_LEN32_WR_MAX(bp)		(CHIP_IS_E1(bp) ? 0x400 : 0x2000)
+
+#define DMAE_COMP_VAL			0x60d0d0ae /* E2 and on - upper bit
+						    * indicates error
+						    */
+
+#define MAX_DMAE_C_PER_PORT		8
+#define INIT_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+					 BP_VN(bp))
+#define PMF_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+					 E1HVN_MAX)
+
+/* PCIE link and speed */
+#define PCICFG_LINK_WIDTH		0x1f00000
+#define PCICFG_LINK_WIDTH_SHIFT		20
+#define PCICFG_LINK_SPEED		0xf0000
+#define PCICFG_LINK_SPEED_SHIFT		16
+
+#define BNX2X_NUM_TESTS_SF		7
+#define BNX2X_NUM_TESTS_MF		3
+#define BNX2X_NUM_TESTS(bp)		(IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+					     IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
+
+#define BNX2X_PHY_LOOPBACK		0
+#define BNX2X_MAC_LOOPBACK		1
+#define BNX2X_EXT_LOOPBACK		2
+#define BNX2X_PHY_LOOPBACK_FAILED	1
+#define BNX2X_MAC_LOOPBACK_FAILED	2
+#define BNX2X_EXT_LOOPBACK_FAILED	3
+#define BNX2X_LOOPBACK_FAILED		(BNX2X_MAC_LOOPBACK_FAILED | \
+					 BNX2X_PHY_LOOPBACK_FAILED)
+
+#define STROM_ASSERT_ARRAY_SIZE		50
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(bp, x)			((BP_PORT(bp) << 23) | \
+					 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
+					 (x))
+
+#define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT			(SP_DESC_CNT - 1)
+
+#define BNX2X_BTR			4
+#define MAX_SPQ_PENDING			8
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE					100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC			400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES					160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES						100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH				32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF	((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM					2
+
+#define ATTN_NIG_FOR_FUNC		(1L << 8)
+#define ATTN_SW_TIMER_4_FUNC		(1L << 9)
+#define GPIO_2_FUNC			(1L << 10)
+#define GPIO_3_FUNC			(1L << 11)
+#define GPIO_4_FUNC			(1L << 12)
+#define ATTN_GENERAL_ATTN_1		(1L << 13)
+#define ATTN_GENERAL_ATTN_2		(1L << 14)
+#define ATTN_GENERAL_ATTN_3		(1L << 15)
+#define ATTN_GENERAL_ATTN_4		(1L << 13)
+#define ATTN_GENERAL_ATTN_5		(1L << 14)
+#define ATTN_GENERAL_ATTN_6		(1L << 15)
+
+#define ATTN_HARD_WIRED_MASK		0xff00
+#define ATTENTION_ID			4
+
+#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \
+				 IS_MF_FCOE_AFEX(bp))
+
+/* stuff added to make the code fit 80Col */
+
+#define BNX2X_PMF_LINK_ASSERT \
+	GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
+
+#define BNX2X_MC_ASSERT_BITS \
+	(GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+	GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT	GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV		(GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define HW_INTERRUT_ASSERT_SET_0 \
+				(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_0	(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_1 \
+				(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_1	(AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+			     AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_2 \
+				(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
+			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+				 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_2	(AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
+			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+
+#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
+		(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+		 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+		 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
+			      AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+			      AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+
+#define MULTI_MASK			0x7f
+
+#define DEF_USB_FUNC_OFF	offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF	offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF	offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF	offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+			offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+			offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+			offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+			offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+			offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+			offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+			offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+			offsetof(struct tstorm_def_status_block, segment)
+
+#define BNX2X_SP_DSB_INDEX \
+		(&bp->def_status_blk->sp_sb.\
+					index_values[HC_SP_INDEX_ETH_DEF_CONS])
+
+#define CAM_IS_INVALID(x) \
+	(GET_FLAG(x.flags, \
+	MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
+	(T_ETH_MAC_COMMAND_INVALIDATE))
+
+/* Number of u32 elements in MC hash array */
+#define MC_HASH_SIZE			8
+#define MC_HASH_OFFSET(bp, i)		(BAR_TSTRORM_INTMEM + \
+	TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
+
+#ifndef PXP2_REG_PXP2_INT_STS
+#define PXP2_REG_PXP2_INT_STS		PXP2_REG_PXP2_INT_STS_0
+#endif
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2		ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define BNX2X_VPD_LEN			128
+#define VENDOR_ID_LEN			4
+
+#define VF_ACQUIRE_THRESH		3
+#define VF_ACQUIRE_MAC_FILTERS		1
+#define VF_ACQUIRE_MC_FILTERS		10
+#define VF_ACQUIRE_VLAN_FILTERS		2 /* VLAN0 + 'real' VLAN */
+
+#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
+			    (!((me_reg) & ME_REG_VF_ERR)))
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
+/* Congestion management fairness mode */
+#define CMNG_FNS_NONE			0
+#define CMNG_FNS_MINMAX			1
+
+#define HC_SEG_ACCESS_DEF		0   /*Driver decision 0-3*/
+#define HC_SEG_ACCESS_ATTN		4
+#define HC_SEG_ACCESS_NORM		0   /*Driver decision 0-1*/
+
+static const u32 dmae_reg_go_c[] = {
+	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+#define BNX2X_MF_SD_PROTOCOL(bp) \
+	((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
+	(BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \
+	(BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
+#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
+#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp))
+
+#define IS_MF_ISCSI_ONLY(bp)    (IS_MF_ISCSI_SD(bp) ||  IS_MF_ISCSI_SI(bp))
+
+#define BNX2X_MF_EXT_PROTOCOL_MASK					\
+				(MACP_FUNC_CFG_FLAGS_ETHERNET |		\
+				 MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD |	\
+				 MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_MF_EXT_PROT(bp)	((bp)->mf_ext_config &			\
+				 BNX2X_MF_EXT_PROTOCOL_MASK)
+
+#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)				\
+		(BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)				\
+		(BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp)				\
+		(BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)
+
+#define IS_MF_FCOE_AFEX(bp)						\
+		(IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))
+
+#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)				\
+				(IS_MF_SD(bp) &&			\
+				 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) ||	\
+				  BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+
+#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)				\
+				(IS_MF_SI(bp) &&			\
+				 (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) ||	\
+				  BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)))
+
+#define IS_MF_STORAGE_PERSONALITY_ONLY(bp)				\
+			(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) ||	\
+			 IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
+
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
+
+#define SET_FLAG(value, mask, flag) \
+	do {\
+		(value) &= ~(mask);\
+		(value) |= ((flag) << (mask##_SHIFT));\
+	} while (0)
+
+#define GET_FLAG(value, mask) \
+	(((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+	(((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
+enum {
+	SWITCH_UPDATE,
+	AFEX_UPDATE,
+};
+
+#define NUM_MACS	8
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
+void bnx2x_init_ptp(struct bnx2x *bp);
+int bnx2x_configure_ptp_filters(struct bnx2x *bp);
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+
+#define BNX2X_MAX_PHC_DRIFT 31000000
+#define BNX2X_PTP_TX_TIMEOUT
+
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
+#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
new file mode 100644
index 0000000..e5911cc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,5150 @@
+/* bnx2x_cmn.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/crash_dump.h>
+#include <net/tcp.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/busy_poll.h>
+#include <linux/prefetch.h>
+#include "bnx2x_cmn.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_rx_queue_cnic(bp, i) {
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, NAPI_POLL_WEIGHT);
+		napi_hash_add(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_eth_queue(bp, i) {
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, NAPI_POLL_WEIGHT);
+		napi_hash_add(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+	/* Reduce memory usage in kdump environment by using only one queue */
+	if (is_kdump_kernel())
+		nq = 1;
+
+	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+	return nq;
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp:		driver handle
+ * @from:	source FP index
+ * @to:		destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target. Update txdata pointers and related
+ * content.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+	struct bnx2x_fastpath *from_fp = &bp->fp[from];
+	struct bnx2x_fastpath *to_fp = &bp->fp[to];
+	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+	int old_max_eth_txqs, new_max_eth_txqs;
+	int old_txdata_index = 0, new_txdata_index = 0;
+	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
+
+	/* Copy the NAPI object as it has been already initialized */
+	from_fp->napi = to_fp->napi;
+
+	/* Move bnx2x_fastpath contents */
+	memcpy(to_fp, from_fp, sizeof(*to_fp));
+	to_fp->index = to;
+
+	/* Retain the tpa_info of the original `to' version as we don't want
+	 * 2 FPs to contain the same tpa_info pointer.
+	 */
+	to_fp->tpa_info = old_tpa_info;
+
+	/* move sp_objs contents as well, as their indices match fp ones */
+	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+	/* move fp_stats contents as well, as their indices match fp ones */
+	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+	/* Update txdata pointers in fp and move txdata content accordingly:
+	 * Each fp consumes 'max_cos' txdata structures, so the index should be
+	 * decremented by max_cos x delta.
+	 */
+
+	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+				(bp)->max_cos;
+	if (from == FCOE_IDX(bp)) {
+		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+	}
+
+	memcpy(&bp->bnx2x_txq[new_txdata_index],
+	       &bp->bnx2x_txq[old_txdata_index],
+	       sizeof(struct bnx2x_fp_txdata));
+	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
+}
+
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string.
+ *
+ * @bp:        driver handle
+ * @buf:       character buffer to fill with the fw name
+ * @buf_len:   length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+{
+	if (IS_PF(bp)) {
+		u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+		phy_fw_ver[0] = '\0';
+		bnx2x_get_ext_phy_fw_version(&bp->link_params,
+					     phy_fw_ver, PHY_FW_VER_LEN);
+		strlcpy(buf, bp->fw_ver, buf_len);
+		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+			 "bc %d.%d.%d%s%s",
+			 (bp->common.bc_ver & 0xff0000) >> 16,
+			 (bp->common.bc_ver & 0xff00) >> 8,
+			 (bp->common.bc_ver & 0xff),
+			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+	} else {
+		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
+	}
+}
+
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp:	driver handle
+ * @delta:	number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+	 * backward along the array could cause memory to be overridden
+	 */
+	for (cos = 1; cos < bp->max_cos; cos++) {
+		for (i = 0; i < old_eth_num - delta; i++) {
+			struct bnx2x_fastpath *fp = &bp->fp[i];
+			int new_idx = cos * (old_eth_num - delta) + i;
+
+			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+			       sizeof(struct bnx2x_fp_txdata));
+			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+		}
+	}
+}
+
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+			     u16 idx, unsigned int *pkts_compl,
+			     unsigned int *bytes_compl)
+{
+	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
+	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_bd *tx_data_bd;
+	struct sk_buff *skb = tx_buf->skb;
+	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+	int nbd;
+	u16 split_bd_len = 0;
+
+	/* prefetch skb end pointer to speedup dev_kfree_skb() */
+	prefetch(&skb->end);
+
+	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
+	   txdata->txq_index, idx, tx_buf, skb);
+
+	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+
+	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+		BNX2X_ERR("BAD nbd!\n");
+		bnx2x_panic();
+	}
+#endif
+	new_cons = nbd + tx_buf->first_bd;
+
+	/* Get the next bd */
+	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	/* Skip a parse bd... */
+	--nbd;
+	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+		/* Skip second parse bd... */
+		--nbd;
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
+		--nbd;
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* unmap first bd */
+	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+			 DMA_TO_DEVICE);
+
+	/* now free frags */
+	while (nbd > 0) {
+
+		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+		if (--nbd)
+			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* release skb */
+	WARN_ON(!skb);
+	if (likely(skb)) {
+		(*pkts_compl)++;
+		(*bytes_compl) += skb->len;
+		dev_kfree_skb_any(skb);
+	}
+
+	tx_buf->first_bd = 0;
+	tx_buf->skb = NULL;
+
+	return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+{
+	struct netdev_queue *txq;
+	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -1;
+#endif
+
+	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+	sw_cons = txdata->tx_pkt_cons;
+
+	while (sw_cons != hw_cons) {
+		u16 pkt_cons;
+
+		pkt_cons = TX_BD(sw_cons);
+
+		DP(NETIF_MSG_TX_DONE,
+		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
+		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
+
+		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+					    &pkts_compl, &bytes_compl);
+
+		sw_cons++;
+	}
+
+	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
+	txdata->tx_pkt_cons = sw_cons;
+	txdata->tx_bd_cons = bd_cons;
+
+	/* Need to make the tx_bd_cons update visible to start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that
+	 * start_xmit() will miss it and cause the queue to be stopped
+	 * forever.
+	 * On the other hand we need an rmb() here to ensure the proper
+	 * ordering of bit testing in the following
+	 * netif_tx_queue_stopped(txq) call.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq))) {
+		/* Taking tx_lock() is needed to prevent re-enabling the queue
+		 * while it's empty. This could have happen if rx_action() gets
+		 * suspended in bnx2x_tx_int() after the condition before
+		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+		 *
+		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
+		 * sends some packets consuming the whole queue again->
+		 * stops the queue
+		 */
+
+		__netif_tx_lock(txq, smp_processor_id());
+
+		if ((netif_tx_queue_stopped(txq)) &&
+		    (bp->state == BNX2X_STATE_OPEN) &&
+		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
+			netif_tx_wake_queue(txq);
+
+		__netif_tx_unlock(txq);
+	}
+	return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+					     u16 idx)
+{
+	u16 last_max = fp->last_max_sge;
+
+	if (SUB_S16(idx, last_max) > 0)
+		fp->last_max_sge = idx;
+}
+
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+					 u16 sge_len,
+					 struct eth_end_agg_rx_cqe *cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 last_max, last_elem, first_elem;
+	u16 delta = 0;
+	u16 i;
+
+	if (!sge_len)
+		return;
+
+	/* First mark all used pages */
+	for (i = 0; i < sge_len; i++)
+		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
+			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
+
+	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+	/* Here we assume that the last SGE index is the biggest */
+	prefetch((void *)(fp->sge_mask));
+	bnx2x_update_last_max_sge(fp,
+		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+	last_max = RX_SGE(fp->last_max_sge);
+	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
+
+	/* If ring is not full */
+	if (last_elem + 1 != first_elem)
+		last_elem++;
+
+	/* Now update the prod */
+	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+		if (likely(fp->sge_mask[i]))
+			break;
+
+		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+		delta += BIT_VEC64_ELEM_SZ;
+	}
+
+	if (delta > 0) {
+		fp->rx_sge_prod += delta;
+		/* clear page-end entries */
+		bnx2x_clear_sge_mask_next_elems(fp);
+	}
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
+	   fp->last_max_sge, fp->rx_sge_prod);
+}
+
+/* Get Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+			    const struct eth_fast_path_rx_cqe *cqe,
+			    enum pkt_hash_types *rxhash_type)
+{
+	/* Get Toeplitz hash from CQE */
+	if ((bp->dev->features & NETIF_F_RXHASH) &&
+	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+		enum eth_rss_hash_type htype;
+
+		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+				(htype == TCP_IPV6_HASH_TYPE)) ?
+			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
+		return le32_to_cpu(cqe->rss_hash_result);
+	}
+	*rxhash_type = PKT_HASH_TYPE_NONE;
+	return 0;
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+			    u16 cons, u16 prod,
+			    struct eth_fast_path_rx_cqe *cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+	dma_addr_t mapping;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+
+	/* print error if current state != stop */
+	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
+		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+	/* Try to map an empty data buffer from the aggregation info  */
+	mapping = dma_map_single(&bp->pdev->dev,
+				 first_buf->data + NET_SKB_PAD,
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
+	/*
+	 *  ...if it fails - move the skb from the consumer to the producer
+	 *  and set the current aggregation state as ERROR to drop it
+	 *  when TPA_STOP arrives.
+	 */
+
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		/* Move the BD from the consumer to the producer */
+		bnx2x_reuse_rx_data(fp, cons, prod);
+		tpa_info->tpa_state = BNX2X_TPA_ERROR;
+		return;
+	}
+
+	/* move empty data from pool to prod */
+	prod_rx_buf->data = first_buf->data;
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+	/* point prod_bd to new data */
+	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	/* move partial skb from cons to pool (don't unmap yet) */
+	*first_buf = *cons_rx_buf;
+
+	/* mark bin state as START */
+	tpa_info->parsing_flags =
+		le16_to_cpu(cqe->pars_flags.flags);
+	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+	tpa_info->tpa_state = BNX2X_TPA_START;
+	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+	tpa_info->placement_offset = cqe->placement_offset;
+	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
+	if (fp->mode == TPA_MODE_GRO) {
+		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
+		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
+		tpa_info->gro_size = gro_size;
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	fp->tpa_queue_used |= (1 << queue);
+	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+	   fp->tpa_queue_used);
+#endif
+}
+
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *		nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN	12
+/**
+ * bnx2x_set_gro_params - compute GRO values
+ *
+ * @skb:		packet skb
+ * @parsing_flags:	parsing flags from the START CQE
+ * @len_on_bd:		total length of the first packet for the
+ *			aggregation.
+ * @pkt_len:		length of all segments
+ *
+ * Approximate value of the MSS for this aggregation calculated using
+ * the first packet of it.
+ * Compute number of aggregated segments, and gso_type.
+ */
+static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
+				 u16 len_on_bd, unsigned int pkt_len,
+				 u16 num_of_coalesced_segs)
+{
+	/* TPA aggregation won't have either IP options or TCP options
+	 * other than timestamp or IPv6 extension headers.
+	 */
+	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+	    PRS_FLAG_OVERETH_IPV6) {
+		hdrs_len += sizeof(struct ipv6hdr);
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+	} else {
+		hdrs_len += sizeof(struct iphdr);
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+	}
+
+	/* Check if there was a TCP timestamp, if there is it's will
+	 * always be 12 bytes length: nop nop kind length echo val.
+	 *
+	 * Otherwise FW would close the aggregation.
+	 */
+	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+		hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
+
+	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+	 * to skb_shinfo(skb)->gso_segs
+	 */
+	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
+}
+
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			      u16 index, gfp_t gfp_mask)
+{
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+	struct bnx2x_alloc_pool *pool = &fp->page_pool;
+	dma_addr_t mapping;
+
+	if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
+
+		/* put page reference used by the memory pool, since we
+		 * won't be using this page as the mempool anymore.
+		 */
+		if (pool->page)
+			put_page(pool->page);
+
+		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+		if (unlikely(!pool->page)) {
+			BNX2X_ERR("Can't alloc sge\n");
+			return -ENOMEM;
+		}
+
+		pool->offset = 0;
+	}
+
+	mapping = dma_map_page(&bp->pdev->dev, pool->page,
+			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		BNX2X_ERR("Can't map sge\n");
+		return -ENOMEM;
+	}
+
+	get_page(pool->page);
+	sw_buf->page = pool->page;
+	sw_buf->offset = pool->offset;
+
+	dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	pool->offset += SGE_PAGE_SIZE;
+
+	return 0;
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			       struct bnx2x_agg_info *tpa_info,
+			       u16 pages,
+			       struct sk_buff *skb,
+			       struct eth_end_agg_rx_cqe *cqe,
+			       u16 cqe_idx)
+{
+	struct sw_rx_page *rx_pg, old_rx_pg;
+	u32 i, frag_len, frag_size;
+	int err, j, frag_id = 0;
+	u16 len_on_bd = tpa_info->len_on_bd;
+	u16 full_page = 0, gro_size = 0;
+
+	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
+
+	if (fp->mode == TPA_MODE_GRO) {
+		gro_size = tpa_info->gro_size;
+		full_page = tpa_info->full_page;
+	}
+
+	/* This is needed in order to enable forwarding support */
+	if (frag_size)
+		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
+				     le16_to_cpu(cqe->pkt_len),
+				     le16_to_cpu(cqe->num_of_coalesced_segs));
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
+		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+			  pages, cqe_idx);
+		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
+		bnx2x_panic();
+		return -EINVAL;
+	}
+#endif
+
+	/* Run through the SGL and compose the fragmented skb */
+	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
+
+		/* FW gives the indices of the SGE as if the ring is an array
+		   (meaning that "next" element will consume 2 indices) */
+		if (fp->mode == TPA_MODE_GRO)
+			frag_len = min_t(u32, frag_size, (u32)full_page);
+		else /* LRO */
+			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
+
+		rx_pg = &fp->rx_page_ring[sge_idx];
+		old_rx_pg = *rx_pg;
+
+		/* If we fail to allocate a substitute page, we simply stop
+		   where we are and drop the whole packet */
+		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
+		if (unlikely(err)) {
+			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
+			return err;
+		}
+
+		dma_unmap_page(&bp->pdev->dev,
+			       dma_unmap_addr(&old_rx_pg, mapping),
+			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+		/* Add one frag and update the appropriate fields in the skb */
+		if (fp->mode == TPA_MODE_LRO)
+			skb_fill_page_desc(skb, j, old_rx_pg.page,
+					   old_rx_pg.offset, frag_len);
+		else { /* GRO */
+			int rem;
+			int offset = 0;
+			for (rem = frag_len; rem > 0; rem -= gro_size) {
+				int len = rem > gro_size ? gro_size : rem;
+				skb_fill_page_desc(skb, frag_id++,
+						   old_rx_pg.page,
+						   old_rx_pg.offset + offset,
+						   len);
+				if (offset)
+					get_page(old_rx_pg.page);
+				offset += len;
+			}
+		}
+
+		skb->data_len += frag_len;
+		skb->truesize += SGE_PAGES;
+		skb->len += frag_len;
+
+		frag_size -= frag_len;
+	}
+
+	return 0;
+}
+
+static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
+{
+	if (fp->rx_frag_size)
+		skb_free_frag(data);
+	else
+		kfree(data);
+}
+
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
+{
+	if (fp->rx_frag_size) {
+		/* GFP_KERNEL allocations are used only during initialization */
+		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
+			return (void *)__get_free_page(gfp_mask);
+
+		return netdev_alloc_frag(fp->rx_frag_size);
+	}
+
+	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
+}
+
+#ifdef CONFIG_INET
+static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct tcphdr *th;
+
+	skb_set_transport_header(skb, sizeof(struct iphdr));
+	th = tcp_hdr(skb);
+
+	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+				  iph->saddr, iph->daddr, 0);
+}
+
+static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct tcphdr *th;
+
+	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+	th = tcp_hdr(skb);
+
+	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+				  &iph->saddr, &iph->daddr, 0);
+}
+
+static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
+			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
+{
+	skb_set_network_header(skb, 0);
+	gro_func(bp, skb);
+	tcp_gro_complete(skb);
+}
+#endif
+
+static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			       struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+	if (skb_shinfo(skb)->gso_size) {
+		switch (be16_to_cpu(skb->protocol)) {
+		case ETH_P_IP:
+			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
+			break;
+		case ETH_P_IPV6:
+			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
+			break;
+		default:
+			BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+				  be16_to_cpu(skb->protocol));
+		}
+	}
+#endif
+	skb_record_rx_queue(skb, fp->rx_queue);
+	napi_gro_receive(&fp->napi, skb);
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			   struct bnx2x_agg_info *tpa_info,
+			   u16 pages,
+			   struct eth_end_agg_rx_cqe *cqe,
+			   u16 cqe_idx)
+{
+	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+	u8 pad = tpa_info->placement_offset;
+	u16 len = tpa_info->len_on_bd;
+	struct sk_buff *skb = NULL;
+	u8 *new_data, *data = rx_buf->data;
+	u8 old_tpa_state = tpa_info->tpa_state;
+
+	tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+	/* If we there was an error during the handling of the TPA_START -
+	 * drop this aggregation.
+	 */
+	if (old_tpa_state == BNX2X_TPA_ERROR)
+		goto drop;
+
+	/* Try to allocate the new data */
+	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
+	/* Unmap skb in the pool anyway, as we are going to change
+	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
+	   fails. */
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+			 fp->rx_buf_size, DMA_FROM_DEVICE);
+	if (likely(new_data))
+		skb = build_skb(data, fp->rx_frag_size);
+
+	if (likely(skb)) {
+#ifdef BNX2X_STOP_ON_ERROR
+		if (pad + len > fp->rx_buf_size) {
+			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
+				  pad, len, fp->rx_buf_size);
+			bnx2x_panic();
+			return;
+		}
+#endif
+
+		skb_reserve(skb, pad + NET_SKB_PAD);
+		skb_put(skb, len);
+		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
+
+		skb->protocol = eth_type_trans(skb, bp->dev);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
+					 skb, cqe, cqe_idx)) {
+			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
+			bnx2x_gro_receive(bp, fp, skb);
+		} else {
+			DP(NETIF_MSG_RX_STATUS,
+			   "Failed to allocate new pages - dropping packet!\n");
+			dev_kfree_skb_any(skb);
+		}
+
+		/* put new data in bin */
+		rx_buf->data = new_data;
+
+		return;
+	}
+	if (new_data)
+		bnx2x_frag_free(fp, new_data);
+drop:
+	/* drop the packet and keep the buffer in the bin */
+	DP(NETIF_MSG_RX_STATUS,
+	   "Failed to allocate or map a new skb - dropping packet!\n");
+	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
+}
+
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			       u16 index, gfp_t gfp_mask)
+{
+	u8 *data;
+	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+	dma_addr_t mapping;
+
+	data = bnx2x_frag_alloc(fp, gfp_mask);
+	if (unlikely(data == NULL))
+		return -ENOMEM;
+
+	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+				 fp->rx_buf_size,
+				 DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		bnx2x_frag_free(fp, data);
+		BNX2X_ERR("Can't map rx data\n");
+		return -ENOMEM;
+	}
+
+	rx_buf->data = data;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+				 struct bnx2x_fastpath *fp,
+				 struct bnx2x_eth_q_stats *qstats)
+{
+	/* Do nothing if no L4 csum validation was done.
+	 * We do not check whether IP csum was validated. For IPv4 we assume
+	 * that if the card got as far as validating the L4 csum, it also
+	 * validated the IP csum. IPv6 has no IP csum.
+	 */
+	if (cqe->fast_path_cqe.status_flags &
+	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
+		return;
+
+	/* If L4 validation was done, check if an error was found. */
+
+	if (cqe->fast_path_cqe.type_error_flags &
+	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+		qstats->hw_csum_err++;
+	else
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+	u16 sw_comp_cons, sw_comp_prod;
+	int rx_pkt = 0;
+	union eth_rx_cqe *cqe;
+	struct eth_fast_path_rx_cqe *cqe_fp;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return 0;
+#endif
+	if (budget <= 0)
+		return rx_pkt;
+
+	bd_cons = fp->rx_bd_cons;
+	bd_prod = fp->rx_bd_prod;
+	bd_prod_fw = bd_prod;
+	sw_comp_cons = fp->rx_comp_cons;
+	sw_comp_prod = fp->rx_comp_prod;
+
+	comp_ring_cons = RCQ_BD(sw_comp_cons);
+	cqe = &fp->rx_comp_ring[comp_ring_cons];
+	cqe_fp = &cqe->fast_path_cqe;
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
+
+	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
+		struct sw_rx_bd *rx_buf = NULL;
+		struct sk_buff *skb;
+		u8 cqe_fp_flags;
+		enum eth_rx_cqe_type cqe_fp_type;
+		u16 len, pad, queue;
+		u8 *data;
+		u32 rxhash;
+		enum pkt_hash_types rxhash_type;
+
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic))
+			return 0;
+#endif
+
+		bd_prod = RX_BD(bd_prod);
+		bd_cons = RX_BD(bd_cons);
+
+		/* A rmb() is required to ensure that the CQE is not read
+		 * before it is written by the adapter DMA.  PCI ordering
+		 * rules will make sure the other fields are written before
+		 * the marker at the end of struct eth_fast_path_rx_cqe
+		 * but without rmb() a weakly ordered processor can process
+		 * stale data.  Without the barrier TPA state-machine might
+		 * enter inconsistent state and kernel stack might be
+		 * provided with incorrect packet description - these lead
+		 * to various kernel crashed.
+		 */
+		rmb();
+
+		cqe_fp_flags = cqe_fp->type_error_flags;
+		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+
+		DP(NETIF_MSG_RX_STATUS,
+		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
+		   CQE_TYPE(cqe_fp_flags),
+		   cqe_fp_flags, cqe_fp->status_flags,
+		   le32_to_cpu(cqe_fp->rss_hash_result),
+		   le16_to_cpu(cqe_fp->vlan_tag),
+		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
+
+		/* is this a slowpath msg? */
+		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
+			bnx2x_sp_event(fp, cqe);
+			goto next_cqe;
+		}
+
+		rx_buf = &fp->rx_buf_ring[bd_cons];
+		data = rx_buf->data;
+
+		if (!CQE_TYPE_FAST(cqe_fp_type)) {
+			struct bnx2x_agg_info *tpa_info;
+			u16 frag_size, pages;
+#ifdef BNX2X_STOP_ON_ERROR
+			/* sanity check */
+			if (fp->mode == TPA_MODE_DISABLED &&
+			    (CQE_TYPE_START(cqe_fp_type) ||
+			     CQE_TYPE_STOP(cqe_fp_type)))
+				BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
+					  CQE_TYPE(cqe_fp_type));
+#endif
+
+			if (CQE_TYPE_START(cqe_fp_type)) {
+				u16 queue = cqe_fp->queue_index;
+				DP(NETIF_MSG_RX_STATUS,
+				   "calling tpa_start on queue %d\n",
+				   queue);
+
+				bnx2x_tpa_start(fp, queue,
+						bd_cons, bd_prod,
+						cqe_fp);
+
+				goto next_rx;
+			}
+			queue = cqe->end_agg_cqe.queue_index;
+			tpa_info = &fp->tpa_info[queue];
+			DP(NETIF_MSG_RX_STATUS,
+			   "calling tpa_stop on queue %d\n",
+			   queue);
+
+			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
+				    tpa_info->len_on_bd;
+
+			if (fp->mode == TPA_MODE_GRO)
+				pages = (frag_size + tpa_info->full_page - 1) /
+					 tpa_info->full_page;
+			else
+				pages = SGE_PAGE_ALIGN(frag_size) >>
+					SGE_PAGE_SHIFT;
+
+			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
+				       &cqe->end_agg_cqe, comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+			if (bp->panic)
+				return 0;
+#endif
+
+			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
+			goto next_cqe;
+		}
+		/* non TPA */
+		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
+		pad = cqe_fp->placement_offset;
+		dma_sync_single_for_cpu(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
+					pad + RX_COPY_THRESH,
+					DMA_FROM_DEVICE);
+		pad += NET_SKB_PAD;
+		prefetch(data + pad); /* speedup eth_type_trans() */
+		/* is this an error packet? */
+		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+			   "ERROR  flags %x  rx packet %u\n",
+			   cqe_fp_flags, sw_comp_cons);
+			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
+			goto reuse_rx;
+		}
+
+		/* Since we don't have a jumbo ring
+		 * copy small packets if mtu > 1500
+		 */
+		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+		    (len <= RX_COPY_THRESH)) {
+			skb = napi_alloc_skb(&fp->napi, len);
+			if (skb == NULL) {
+				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+				   "ERROR  packet dropped because of alloc failure\n");
+				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
+				goto reuse_rx;
+			}
+			memcpy(skb->data, data + pad, len);
+			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+		} else {
+			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+						       GFP_ATOMIC) == 0)) {
+				dma_unmap_single(&bp->pdev->dev,
+						 dma_unmap_addr(rx_buf, mapping),
+						 fp->rx_buf_size,
+						 DMA_FROM_DEVICE);
+				skb = build_skb(data, fp->rx_frag_size);
+				if (unlikely(!skb)) {
+					bnx2x_frag_free(fp, data);
+					bnx2x_fp_qstats(bp, fp)->
+							rx_skb_alloc_failed++;
+					goto next_rx;
+				}
+				skb_reserve(skb, pad);
+			} else {
+				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+				   "ERROR  packet dropped because of alloc failure\n");
+				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
+reuse_rx:
+				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+				goto next_rx;
+			}
+		}
+
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, bp->dev);
+
+		/* Set Toeplitz hash for a none-LRO skb */
+		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+		skb_set_hash(skb, rxhash, rxhash_type);
+
+		skb_checksum_none_assert(skb);
+
+		if (bp->dev->features & NETIF_F_RXCSUM)
+			bnx2x_csum_validate(skb, cqe, fp,
+					    bnx2x_fp_qstats(bp, fp));
+
+		skb_record_rx_queue(skb, fp->rx_queue);
+
+		/* Check if this packet was timestamped */
+		if (unlikely(cqe->fast_path_cqe.type_error_flags &
+			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
+			bnx2x_set_rx_ts(bp, skb);
+
+		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+		    PARSING_FLAGS_VLAN)
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+					       le16_to_cpu(cqe_fp->vlan_tag));
+
+		skb_mark_napi_id(skb, &fp->napi);
+
+		if (bnx2x_fp_ll_polling(fp))
+			netif_receive_skb(skb);
+		else
+			napi_gro_receive(&fp->napi, skb);
+next_rx:
+		rx_buf->data = NULL;
+
+		bd_cons = NEXT_RX_IDX(bd_cons);
+		bd_prod = NEXT_RX_IDX(bd_prod);
+		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+		rx_pkt++;
+next_cqe:
+		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+		/* mark CQE as free */
+		BNX2X_SEED_CQE(cqe_fp);
+
+		if (rx_pkt == budget)
+			break;
+
+		comp_ring_cons = RCQ_BD(sw_comp_cons);
+		cqe = &fp->rx_comp_ring[comp_ring_cons];
+		cqe_fp = &cqe->fast_path_cqe;
+	} /* while */
+
+	fp->rx_bd_cons = bd_cons;
+	fp->rx_bd_prod = bd_prod_fw;
+	fp->rx_comp_cons = sw_comp_cons;
+	fp->rx_comp_prod = sw_comp_prod;
+
+	/* Update producers */
+	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+			     fp->rx_sge_prod);
+
+	fp->rx_pkt += rx_pkt;
+	fp->rx_calls++;
+
+	return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+	struct bnx2x_fastpath *fp = fp_cookie;
+	struct bnx2x *bp = fp->bp;
+	u8 cos;
+
+	DP(NETIF_MSG_INTR,
+	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
+	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
+
+	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	/* Handle Rx and Tx according to MSI-X vector */
+	for_each_cos_in_tx_queue(fp, cos)
+		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
+
+	prefetch(&fp->sb_running_index[SM_RX_ID]);
+	napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
+
+	return IRQ_HANDLED;
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+	mutex_lock(&bp->port.phy_mutex);
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+	mutex_unlock(&bp->port.phy_mutex);
+}
+
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+	u16 line_speed = bp->link_vars.line_speed;
+	if (IS_MF(bp)) {
+		u16 maxCfg = bnx2x_extract_max_cfg(bp,
+						   bp->mf_config[BP_VN(bp)]);
+
+		/* Calculate the current MAX line speed limit for the MF
+		 * devices
+		 */
+		if (IS_MF_PERCENT_BW(bp))
+			line_speed = (line_speed * maxCfg) / 100;
+		else { /* SD mode */
+			u16 vn_max_rate = maxCfg * 100;
+
+			if (vn_max_rate < line_speed)
+				line_speed = vn_max_rate;
+		}
+	}
+
+	return line_speed;
+}
+
+/**
+ * bnx2x_fill_report_data - fill link report data to report
+ *
+ * @bp:		driver handle
+ * @data:	link state to update
+ *
+ * It uses a none-atomic bit operations because is called under the mutex.
+ */
+static void bnx2x_fill_report_data(struct bnx2x *bp,
+				   struct bnx2x_link_report_data *data)
+{
+	memset(data, 0, sizeof(*data));
+
+	if (IS_PF(bp)) {
+		/* Fill the report data: effective line speed */
+		data->line_speed = bnx2x_get_mf_speed(bp);
+
+		/* Link is down */
+		if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+				  &data->link_report_flags);
+
+		if (!BNX2X_NUM_ETH_QUEUES(bp))
+			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+				  &data->link_report_flags);
+
+		/* Full DUPLEX */
+		if (bp->link_vars.duplex == DUPLEX_FULL)
+			__set_bit(BNX2X_LINK_REPORT_FD,
+				  &data->link_report_flags);
+
+		/* Rx Flow Control is ON */
+		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+				  &data->link_report_flags);
+
+		/* Tx Flow Control is ON */
+		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+				  &data->link_report_flags);
+	} else { /* VF */
+		*data = bp->vf_link_vars;
+	}
+}
+
+/**
+ * bnx2x_link_report - report link status to OS.
+ *
+ * @bp:		driver handle
+ *
+ * Calls the __bnx2x_link_report() under the same locking scheme
+ * as a link/PHY state managing code to ensure a consistent link
+ * reporting.
+ */
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+	bnx2x_acquire_phy_lock(bp);
+	__bnx2x_link_report(bp);
+	bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * __bnx2x_link_report - report link status to OS.
+ *
+ * @bp:		driver handle
+ *
+ * None atomic implementation.
+ * Should be called under the phy_lock.
+ */
+void __bnx2x_link_report(struct bnx2x *bp)
+{
+	struct bnx2x_link_report_data cur_data;
+
+	/* reread mf_cfg */
+	if (IS_PF(bp) && !CHIP_IS_E1(bp))
+		bnx2x_read_mf_cfg(bp);
+
+	/* Read the current link report info */
+	bnx2x_fill_report_data(bp, &cur_data);
+
+	/* Don't report link down or exactly the same link status twice */
+	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
+	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		      &bp->last_reported_link.link_report_flags) &&
+	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		      &cur_data.link_report_flags)))
+		return;
+
+	bp->link_cnt++;
+
+	/* We are going to report a new link parameters now -
+	 * remember the current data for the next time.
+	 */
+	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
+
+	/* propagate status to VFs */
+	if (IS_PF(bp))
+		bnx2x_iov_link_update(bp);
+
+	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		     &cur_data.link_report_flags)) {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC Link is Down\n");
+		return;
+	} else {
+		const char *duplex;
+		const char *flow;
+
+		netif_carrier_on(bp->dev);
+
+		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
+				       &cur_data.link_report_flags))
+			duplex = "full";
+		else
+			duplex = "half";
+
+		/* Handle the FC at the end so that only these flags would be
+		 * possibly set. This way we may easily check if there is no FC
+		 * enabled.
+		 */
+		if (cur_data.link_report_flags) {
+			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+				     &cur_data.link_report_flags)) {
+				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+				     &cur_data.link_report_flags))
+					flow = "ON - receive & transmit";
+				else
+					flow = "ON - receive";
+			} else {
+				flow = "ON - transmit";
+			}
+		} else {
+			flow = "none";
+		}
+		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+			    cur_data.line_speed, duplex, flow);
+	}
+}
+
+static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+		struct eth_rx_sge *sge;
+
+		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+		sge->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+
+		sge->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+	}
+}
+
+static void bnx2x_free_tpa_pool(struct bnx2x *bp,
+				struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	for (i = 0; i < last; i++) {
+		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+		u8 *data = first_buf->data;
+
+		if (data == NULL) {
+			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+			continue;
+		}
+		if (tpa_info->tpa_state == BNX2X_TPA_START)
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(first_buf, mapping),
+					 fp->rx_buf_size, DMA_FROM_DEVICE);
+		bnx2x_frag_free(fp, data);
+		first_buf->data = NULL;
+	}
+}
+
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_rx_queue_cnic(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		fp->rx_bd_cons = 0;
+
+		/* Activate BD ring */
+		/* Warning!
+		 * this will generate an interrupt (to the TSTORM)
+		 * must only be done after chip is initialized
+		 */
+		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+				     fp->rx_sge_prod);
+	}
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	u16 ring_prod;
+	int i, j;
+
+	/* Allocate TPA resources */
+	for_each_eth_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		DP(NETIF_MSG_IFUP,
+		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
+		if (fp->mode != TPA_MODE_DISABLED) {
+			/* Fill the per-aggregation pool */
+			for (i = 0; i < MAX_AGG_QS(bp); i++) {
+				struct bnx2x_agg_info *tpa_info =
+					&fp->tpa_info[i];
+				struct sw_rx_bd *first_buf =
+					&tpa_info->first_buf;
+
+				first_buf->data =
+					bnx2x_frag_alloc(fp, GFP_KERNEL);
+				if (!first_buf->data) {
+					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
+						  j);
+					bnx2x_free_tpa_pool(bp, fp, i);
+					fp->mode = TPA_MODE_DISABLED;
+					break;
+				}
+				dma_unmap_addr_set(first_buf, mapping, 0);
+				tpa_info->tpa_state = BNX2X_TPA_STOP;
+			}
+
+			/* "next page" elements initialization */
+			bnx2x_set_next_page_sgl(fp);
+
+			/* set SGEs bit mask */
+			bnx2x_init_sge_ring_bit_mask(fp);
+
+			/* Allocate SGEs and initialize the ring elements */
+			for (i = 0, ring_prod = 0;
+			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+						       GFP_KERNEL) < 0) {
+					BNX2X_ERR("was only able to allocate %d rx sges\n",
+						  i);
+					BNX2X_ERR("disabling TPA for queue[%d]\n",
+						  j);
+					/* Cleanup already allocated elements */
+					bnx2x_free_rx_sge_range(bp, fp,
+								ring_prod);
+					bnx2x_free_tpa_pool(bp, fp,
+							    MAX_AGG_QS(bp));
+					fp->mode = TPA_MODE_DISABLED;
+					ring_prod = 0;
+					break;
+				}
+				ring_prod = NEXT_SGE_IDX(ring_prod);
+			}
+
+			fp->rx_sge_prod = ring_prod;
+		}
+	}
+
+	for_each_eth_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		fp->rx_bd_cons = 0;
+
+		/* Activate BD ring */
+		/* Warning!
+		 * this will generate an interrupt (to the TSTORM)
+		 * must only be done after chip is initialized
+		 */
+		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+				     fp->rx_sge_prod);
+
+		if (j != 0)
+			continue;
+
+		if (CHIP_IS_E1(bp)) {
+			REG_WR(bp, BAR_USTRORM_INTMEM +
+			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+			       U64_LO(fp->rx_comp_mapping));
+			REG_WR(bp, BAR_USTRORM_INTMEM +
+			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+			       U64_HI(fp->rx_comp_mapping));
+		}
+	}
+}
+
+static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
+{
+	u8 cos;
+	struct bnx2x *bp = fp->bp;
+
+	for_each_cos_in_tx_queue(fp, cos) {
+		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+		unsigned pkts_compl = 0, bytes_compl = 0;
+
+		u16 sw_prod = txdata->tx_pkt_prod;
+		u16 sw_cons = txdata->tx_pkt_cons;
+
+		while (sw_cons != sw_prod) {
+			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+					  &pkts_compl, &bytes_compl);
+			sw_cons++;
+		}
+
+		netdev_tx_reset_queue(
+			netdev_get_tx_queue(bp->dev,
+					    txdata->txq_index));
+	}
+}
+
+static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_tx_queue_cnic(bp, i) {
+		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+	}
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_eth_queue(bp, i) {
+		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+	}
+}
+
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+	int i;
+
+	/* ring wasn't allocated */
+	if (fp->rx_buf_ring == NULL)
+		return;
+
+	for (i = 0; i < NUM_RX_BD; i++) {
+		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+		u8 *data = rx_buf->data;
+
+		if (data == NULL)
+			continue;
+		dma_unmap_single(&bp->pdev->dev,
+				 dma_unmap_addr(rx_buf, mapping),
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
+
+		rx_buf->data = NULL;
+		bnx2x_frag_free(fp, data);
+	}
+}
+
+static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_rx_queue_cnic(bp, j) {
+		bnx2x_free_rx_bds(&bp->fp[j]);
+	}
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_eth_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		bnx2x_free_rx_bds(fp);
+
+		if (fp->mode != TPA_MODE_DISABLED)
+			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
+	}
+}
+
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+{
+	bnx2x_free_tx_skbs_cnic(bp);
+	bnx2x_free_rx_skbs_cnic(bp);
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+	bnx2x_free_tx_skbs(bp);
+	bnx2x_free_rx_skbs(bp);
+}
+
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+	/* load old values */
+	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+		/* leave all but MAX value */
+		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+		/* set new MAX value */
+		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+				& FUNC_MF_CFG_MAX_BW_MASK;
+
+		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+	}
+}
+
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp:		driver handle
+ * @nvecs:	number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
+{
+	int i, offset = 0;
+
+	if (nvecs == offset)
+		return;
+
+	/* VFs don't have a default SB */
+	if (IS_PF(bp)) {
+		free_irq(bp->msix_table[offset].vector, bp->dev);
+		DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+		   bp->msix_table[offset].vector);
+		offset++;
+	}
+
+	if (CNIC_SUPPORT(bp)) {
+		if (nvecs == offset)
+			return;
+		offset++;
+	}
+
+	for_each_eth_queue(bp, i) {
+		if (nvecs == offset)
+			return;
+		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
+		   i, bp->msix_table[offset].vector);
+
+		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
+	}
+}
+
+void bnx2x_free_irq(struct bnx2x *bp)
+{
+	if (bp->flags & USING_MSIX_FLAG &&
+	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+		int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
+
+		/* vfs don't have a default status block */
+		if (IS_PF(bp))
+			nvecs++;
+
+		bnx2x_free_msix_irqs(bp, nvecs);
+	} else {
+		free_irq(bp->dev->irq, bp->dev);
+	}
+}
+
+int bnx2x_enable_msix(struct bnx2x *bp)
+{
+	int msix_vec = 0, i, rc;
+
+	/* VFs don't have a default status block */
+	if (IS_PF(bp)) {
+		bp->msix_table[msix_vec].entry = msix_vec;
+		BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
+			       bp->msix_table[0].entry);
+		msix_vec++;
+	}
+
+	/* Cnic requires an msix vector for itself */
+	if (CNIC_SUPPORT(bp)) {
+		bp->msix_table[msix_vec].entry = msix_vec;
+		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
+			       msix_vec, bp->msix_table[msix_vec].entry);
+		msix_vec++;
+	}
+
+	/* We need separate vectors for ETH queues only (not FCoE) */
+	for_each_eth_queue(bp, i) {
+		bp->msix_table[msix_vec].entry = msix_vec;
+		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
+			       msix_vec, msix_vec, i);
+		msix_vec++;
+	}
+
+	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
+	   msix_vec);
+
+	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
+	/*
+	 * reconfigure number of tx/rx queues according to available
+	 * MSI-X vectors
+	 */
+	if (rc == -ENOSPC) {
+		/* Get by with single vector */
+		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+		if (rc < 0) {
+			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
+				       rc);
+			goto no_msix;
+		}
+
+		BNX2X_DEV_INFO("Using single MSI-X vector\n");
+		bp->flags |= USING_SINGLE_MSIX_FLAG;
+
+		BNX2X_DEV_INFO("set number of queues to 1\n");
+		bp->num_ethernet_queues = 1;
+		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+	} else if (rc < 0) {
+		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+		goto no_msix;
+	} else if (rc < msix_vec) {
+		/* how less vectors we will have? */
+		int diff = msix_vec - rc;
+
+		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+		/*
+		 * decrease number of queues by number of unallocated entries
+		 */
+		bp->num_ethernet_queues -= diff;
+		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+		BNX2X_DEV_INFO("New queue configuration set: %d\n",
+			       bp->num_queues);
+	}
+
+	bp->flags |= USING_MSIX_FLAG;
+
+	return 0;
+
+no_msix:
+	/* fall to INTx if not enough memory */
+	if (rc == -ENOMEM)
+		bp->flags |= DISABLE_MSI_FLAG;
+
+	return rc;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+	int i, rc, offset = 0;
+
+	/* no default status block for vf */
+	if (IS_PF(bp)) {
+		rc = request_irq(bp->msix_table[offset++].vector,
+				 bnx2x_msix_sp_int, 0,
+				 bp->dev->name, bp->dev);
+		if (rc) {
+			BNX2X_ERR("request sp irq failed\n");
+			return -EBUSY;
+		}
+	}
+
+	if (CNIC_SUPPORT(bp))
+		offset++;
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+			 bp->dev->name, i);
+
+		rc = request_irq(bp->msix_table[offset].vector,
+				 bnx2x_msix_fp_int, 0, fp->name, fp);
+		if (rc) {
+			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
+			      bp->msix_table[offset].vector, rc);
+			bnx2x_free_msix_irqs(bp, offset);
+			return -EBUSY;
+		}
+
+		offset++;
+	}
+
+	i = BNX2X_NUM_ETH_QUEUES(bp);
+	if (IS_PF(bp)) {
+		offset = 1 + CNIC_SUPPORT(bp);
+		netdev_info(bp->dev,
+			    "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
+			    bp->msix_table[0].vector,
+			    0, bp->msix_table[offset].vector,
+			    i - 1, bp->msix_table[offset + i - 1].vector);
+	} else {
+		offset = CNIC_SUPPORT(bp);
+		netdev_info(bp->dev,
+			    "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
+			    0, bp->msix_table[offset].vector,
+			    i - 1, bp->msix_table[offset + i - 1].vector);
+	}
+	return 0;
+}
+
+int bnx2x_enable_msi(struct bnx2x *bp)
+{
+	int rc;
+
+	rc = pci_enable_msi(bp->pdev);
+	if (rc) {
+		BNX2X_DEV_INFO("MSI is not attainable\n");
+		return -1;
+	}
+	bp->flags |= USING_MSI_FLAG;
+
+	return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+	unsigned long flags;
+	unsigned int irq;
+
+	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
+		flags = 0;
+	else
+		flags = IRQF_SHARED;
+
+	if (bp->flags & USING_MSIX_FLAG)
+		irq = bp->msix_table[0].vector;
+	else
+		irq = bp->pdev->irq;
+
+	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
+}
+
+static int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+	int rc = 0;
+	if (bp->flags & USING_MSIX_FLAG &&
+	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+		rc = bnx2x_req_msix_irqs(bp);
+		if (rc)
+			return rc;
+	} else {
+		rc = bnx2x_req_irq(bp);
+		if (rc) {
+			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+			return rc;
+		}
+		if (bp->flags & USING_MSI_FLAG) {
+			bp->dev->irq = bp->pdev->irq;
+			netdev_info(bp->dev, "using MSI IRQ %d\n",
+				    bp->dev->irq);
+		}
+		if (bp->flags & USING_MSIX_FLAG) {
+			bp->dev->irq = bp->msix_table[0].vector;
+			netdev_info(bp->dev, "using MSIX IRQ %d\n",
+				    bp->dev->irq);
+		}
+	}
+
+	return 0;
+}
+
+static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue_cnic(bp, i) {
+		bnx2x_fp_busy_poll_init(&bp->fp[i]);
+		napi_enable(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static void bnx2x_napi_enable(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_eth_queue(bp, i) {
+		bnx2x_fp_busy_poll_init(&bp->fp[i]);
+		napi_enable(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue_cnic(bp, i) {
+		napi_disable(&bnx2x_fp(bp, i, napi));
+		while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+			usleep_range(1000, 2000);
+	}
+}
+
+static void bnx2x_napi_disable(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_eth_queue(bp, i) {
+		napi_disable(&bnx2x_fp(bp, i, napi));
+		while (!bnx2x_fp_ll_disable(&bp->fp[i]))
+			usleep_range(1000, 2000);
+	}
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+	if (netif_running(bp->dev)) {
+		bnx2x_napi_enable(bp);
+		if (CNIC_LOADED(bp))
+			bnx2x_napi_enable_cnic(bp);
+		bnx2x_int_enable(bp);
+		if (bp->state == BNX2X_STATE_OPEN)
+			netif_tx_wake_all_queues(bp->dev);
+	}
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+	bnx2x_int_disable_sync(bp, disable_hw);
+	bnx2x_napi_disable(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_napi_disable_cnic(bp);
+}
+
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+		       void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
+		struct ethhdr *hdr = (struct ethhdr *)skb->data;
+		u16 ether_type = ntohs(hdr->h_proto);
+
+		/* Skip VLAN tag if present */
+		if (ether_type == ETH_P_8021Q) {
+			struct vlan_ethhdr *vhdr =
+				(struct vlan_ethhdr *)skb->data;
+
+			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+		}
+
+		/* If ethertype is FCoE or FIP - use FCoE ring */
+		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+			return bnx2x_fcoe_tx(bp, txq_index);
+	}
+
+	/* select a non-FCoE queue */
+	return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+}
+
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+	/* RSS queues */
+	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
+
+	/* override in STORAGE SD modes */
+	if (IS_MF_STORAGE_ONLY(bp))
+		bp->num_ethernet_queues = 1;
+
+	/* Add special queues */
+	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
+	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
+}
+
+/**
+ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
+ *
+ * @bp:		Driver handle
+ *
+ * We currently support for at most 16 Tx queues for each CoS thus we will
+ * allocate a multiple of 16 for ETH L2 rings according to the value of the
+ * bp->max_cos.
+ *
+ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
+ * index after all ETH L2 indices.
+ *
+ * If the actual number of Tx queues (for each CoS) is less than 16 then there
+ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
+ * 16..31,...) with indices that are not coupled with any real Tx queue.
+ *
+ * The proper configuration of skb->queue_mapping is handled by
+ * bnx2x_select_queue() and __skb_tx_hash().
+ *
+ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
+ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
+ */
+static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
+{
+	int rc, tx, rx;
+
+	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+	rx = BNX2X_NUM_ETH_QUEUES(bp);
+
+/* account for fcoe queue */
+	if (include_cnic && !NO_FCOE(bp)) {
+		rx++;
+		tx++;
+	}
+
+	rc = netif_set_real_num_tx_queues(bp->dev, tx);
+	if (rc) {
+		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
+		return rc;
+	}
+	rc = netif_set_real_num_rx_queues(bp->dev, rx);
+	if (rc) {
+		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
+		return rc;
+	}
+
+	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
+			  tx, rx);
+
+	return rc;
+}
+
+static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		u32 mtu;
+
+		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
+		if (IS_FCOE_IDX(i))
+			/*
+			 * Although there are no IP frames expected to arrive to
+			 * this ring we still want to add an
+			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+			 * overrun attack.
+			 */
+			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+		else
+			mtu = bp->dev->mtu;
+		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+				  IP_HEADER_ALIGNMENT_PADDING +
+				  ETH_OVREHEAD +
+				  mtu +
+				  BNX2X_FW_RX_ALIGN_END;
+		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
+		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
+			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
+		else
+			fp->rx_frag_size = 0;
+	}
+}
+
+static int bnx2x_init_rss(struct bnx2x *bp)
+{
+	int i;
+	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/* Prepare the initial contents for the indirection table if RSS is
+	 * enabled
+	 */
+	for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+		bp->rss_conf_obj.ind_table[i] =
+			bp->fp->cl_id +
+			ethtool_rxfh_indir_default(i, num_eth_queues);
+
+	/*
+	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+	 * per-port, so if explicit configuration is needed , do it only
+	 * for a PMF.
+	 *
+	 * For 57712 and newer on the other hand it's a per-function
+	 * configuration.
+	 */
+	return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+	      bool config_hash, bool enable)
+{
+	struct bnx2x_config_rss_params params = {NULL};
+
+	/* Although RSS is meaningless when there is a single HW queue we
+	 * still need it enabled in order to have HW Rx hash generated.
+	 *
+	 * if (!is_eth_multi(bp))
+	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
+	 */
+
+	params.rss_obj = rss_obj;
+
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	if (enable) {
+		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+		/* RSS configuration */
+		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+		if (rss_obj->udp_rss_v4)
+			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+		if (rss_obj->udp_rss_v6)
+			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+
+		if (!CHIP_IS_E1x(bp)) {
+			/* valid only for TUNN_MODE_VXLAN tunnel mode */
+			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
+			/* valid only for TUNN_MODE_GRE tunnel mode */
+			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+		}
+	} else {
+		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+	}
+
+	/* Hash bits */
+	params.rss_result_mask = MULTI_MASK;
+
+	memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
+
+	if (config_hash) {
+		/* RSS keys */
+		netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
+		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+	}
+
+	if (IS_PF(bp))
+		return bnx2x_config_rss(bp, &params);
+	else
+		return bnx2x_vfpf_config_rss(bp, &params);
+}
+
+static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+	func_params.params.hw_init.load_phase = load_code;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrupts are disabled.
+ */
+void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+	struct bnx2x_mcast_ramrod_params rparam = {NULL};
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
+
+	/***************** Cleanup MACs' object first *************************/
+
+	/* Wait for completion of requested */
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	/* Perform a dry cleanup */
+	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+	/* Clean ETH primary MAC */
+	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+	/* Cleanup UC list */
+	vlan_mac_flags = 0;
+	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+	/***************** Now clean mcast object *****************************/
+	rparam.mcast_obj = &bp->mcast_obj;
+	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+	/* Add a DEL command... - Since we're doing a driver cleanup only,
+	 * we take a lock surrounding both the initial send and the CONTs,
+	 * as we don't want a true completion to disrupt us in the middle.
+	 */
+	netif_addr_lock_bh(bp->dev);
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
+			  rc);
+
+	/* ...and wait until all pending commands are cleared */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	while (rc != 0) {
+		if (rc < 0) {
+			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+				  rc);
+			netif_addr_unlock_bh(bp->dev);
+			return;
+		}
+
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	}
+	netif_addr_unlock_bh(bp->dev);
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		goto label; \
+	} while (0)
+
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+	do { \
+		bp->cnic_loaded = false; \
+		goto label; \
+	} while (0)
+#else /*BNX2X_STOP_ON_ERROR*/
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		(bp)->panic = 1; \
+		return -EBUSY; \
+	} while (0)
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+	do { \
+		bp->cnic_loaded = false; \
+		(bp)->panic = 1; \
+		return -EBUSY; \
+	} while (0)
+#endif /*BNX2X_STOP_ON_ERROR*/
+
+static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
+{
+	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+	return;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+	int num_groups, vf_headroom = 0;
+	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
+
+	/* number of queues for statistics is number of eth queues + FCoE */
+	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
+
+	/* Total number of FW statistics requests =
+	 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
+	 * and fcoe l2 queue) stats + num of queues (which includes another 1
+	 * for fcoe l2 queue if applicable)
+	 */
+	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
+
+	/* vf stats appear in the request list, but their data is allocated by
+	 * the VFs themselves. We don't include them in the bp->fw_stats_num as
+	 * it is used to determine where to place the vf stats queries in the
+	 * request struct
+	 */
+	if (IS_SRIOV(bp))
+		vf_headroom = bnx2x_vf_headroom(bp);
+
+	/* Request is built from stats_query_header and an array of
+	 * stats_query_cmd_group each of which contains
+	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+	 * configured in the stats_query_header.
+	 */
+	num_groups =
+		(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
+		 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
+		 1 : 0));
+
+	DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
+	   bp->fw_stats_num, vf_headroom, num_groups);
+	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+		num_groups * sizeof(struct stats_query_cmd_group);
+
+	/* Data for statistics requests + stats_counter
+	 * stats_counter holds per-STORM counters that are incremented
+	 * when STORM has finished with the current request.
+	 * memory for FCoE offloaded statistics are counted anyway,
+	 * even if they will not be sent.
+	 * VF stats are not accounted for here as the data of VF stats is stored
+	 * in memory allocated by the VF, not here.
+	 */
+	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+		sizeof(struct per_pf_stats) +
+		sizeof(struct fcoe_statistics_params) +
+		sizeof(struct per_queue_stats) * num_queue_stats +
+		sizeof(struct stats_counter);
+
+	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+				       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+	if (!bp->fw_stats)
+		goto alloc_mem_err;
+
+	/* Set shortcuts */
+	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+		bp->fw_stats_req_sz;
+
+	DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
+	   U64_HI(bp->fw_stats_req_mapping),
+	   U64_LO(bp->fw_stats_req_mapping));
+	DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
+	   U64_HI(bp->fw_stats_data_mapping),
+	   U64_LO(bp->fw_stats_data_mapping));
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_fw_stats_mem(bp);
+	BNX2X_ERR("Can't allocate FW stats memory\n");
+	return -ENOMEM;
+}
+
+/* send load request to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
+{
+	u32 param;
+
+	/* init fw_seq */
+	bp->fw_seq =
+		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+		 DRV_MSG_SEQ_NUMBER_MASK);
+	BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+	/* Get current FW pulse sequence */
+	bp->fw_drv_pulse_wr_seq =
+		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+		 DRV_PULSE_SEQ_MASK);
+	BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
+	param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
+
+	if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
+		param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
+
+	/* load request */
+	(*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
+
+	/* if mcp fails to respond we must abort */
+	if (!(*load_code)) {
+		BNX2X_ERR("MCP response failure, aborting\n");
+		return -EBUSY;
+	}
+
+	/* If mcp refused (e.g. other port is in diagnostic mode) we
+	 * must abort
+	 */
+	if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+		BNX2X_ERR("MCP refused load request, aborting\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/* check whether another PF has already loaded FW to chip. In
+ * virtualized environments a pf from another VM may have already
+ * initialized the device including loading FW
+ */
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
+{
+	/* is another pf loaded on this engine? */
+	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+	    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+		/* build my FW version dword */
+		u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+			(BCM_5710_FW_MINOR_VERSION << 8) +
+			(BCM_5710_FW_REVISION_VERSION << 16) +
+			(BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+		/* read loaded FW from chip */
+		u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+		DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
+		   loaded_fw, my_fw);
+
+		/* abort nic load if version mismatch */
+		if (my_fw != loaded_fw) {
+			if (print_err)
+				BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+					  loaded_fw, my_fw);
+			else
+				BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+					       loaded_fw, my_fw);
+			return -EBUSY;
+		}
+	}
+	return 0;
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
+{
+	int path = BP_PATH(bp);
+
+	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
+	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+	   bnx2x_load_count[path][2]);
+	bnx2x_load_count[path][0]++;
+	bnx2x_load_count[path][1 + port]++;
+	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
+	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+	   bnx2x_load_count[path][2]);
+	if (bnx2x_load_count[path][0] == 1)
+		return FW_MSG_CODE_DRV_LOAD_COMMON;
+	else if (bnx2x_load_count[path][1 + port] == 1)
+		return FW_MSG_CODE_DRV_LOAD_PORT;
+	else
+		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
+{
+	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+		bp->port.pmf = 1;
+		/* We need the barrier to ensure the ordering between the
+		 * writing to bp->port.pmf here and reading it from the
+		 * bnx2x_periodic_task().
+		 */
+		smp_mb();
+	} else {
+		bp->port.pmf = 0;
+	}
+
+	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+}
+
+static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
+{
+	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+	     (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+	    (bp->common.shmem2_base)) {
+		if (SHMEM2_HAS(bp, dcc_support))
+			SHMEM2_WR(bp, dcc_support,
+				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+		if (SHMEM2_HAS(bp, afex_driver_support))
+			SHMEM2_WR(bp, afex_driver_support,
+				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
+	}
+
+	/* Set AFEX default VLAN tag to an invalid value */
+	bp->afex_def_vlan_tag = -1;
+}
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp:		driver handle
+ * @index:	fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	int cos;
+	struct napi_struct orig_napi = fp->napi;
+	struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
+
+	/* bzero bnx2x_fastpath contents */
+	if (fp->tpa_info)
+		memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
+		       sizeof(struct bnx2x_agg_info));
+	memset(fp, 0, sizeof(*fp));
+
+	/* Restore the NAPI object as it has been already initialized */
+	fp->napi = orig_napi;
+	fp->tpa_info = orig_tpa_info;
+	fp->bp = bp;
+	fp->index = index;
+	if (IS_ETH_FP(fp))
+		fp->max_cos = bp->max_cos;
+	else
+		/* Special queues support only one CoS */
+		fp->max_cos = 1;
+
+	/* Init txdata pointers */
+	if (IS_FCOE_FP(fp))
+		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+	if (IS_ETH_FP(fp))
+		for_each_cos_in_tx_queue(fp, cos)
+			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+				BNX2X_NUM_ETH_QUEUES(bp) + index];
+
+	/* set the tpa flag for each queue. The tpa flag determines the queue
+	 * minimal size so it must be set prior to queue memory allocation
+	 */
+	if (bp->dev->features & NETIF_F_LRO)
+		fp->mode = TPA_MODE_LRO;
+	else if (bp->dev->features & NETIF_F_GRO &&
+		 bnx2x_mtu_allows_gro(bp->dev->mtu))
+		fp->mode = TPA_MODE_GRO;
+	else
+		fp->mode = TPA_MODE_DISABLED;
+
+	/* We don't want TPA if it's disabled in bp
+	 * or if this is an FCoE L2 ring.
+	 */
+	if (bp->disable_tpa || IS_FCOE_FP(fp))
+		fp->mode = TPA_MODE_DISABLED;
+}
+
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+	u32 cur;
+
+	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+		return;
+
+	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+	   cur, state);
+
+	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
+int bnx2x_load_cnic(struct bnx2x *bp)
+{
+	int i, rc, port = BP_PORT(bp);
+
+	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
+
+	mutex_init(&bp->cnic_mutex);
+
+	if (IS_PF(bp)) {
+		rc = bnx2x_alloc_mem_cnic(bp);
+		if (rc) {
+			BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+			LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+		}
+	}
+
+	rc = bnx2x_alloc_fp_mem_cnic(bp);
+	if (rc) {
+		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+	}
+
+	/* Update the number of queues with the cnic queues */
+	rc = bnx2x_set_real_num_queues(bp, 1);
+	if (rc) {
+		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+	}
+
+	/* Add all CNIC NAPI objects */
+	bnx2x_add_all_napi_cnic(bp);
+	DP(NETIF_MSG_IFUP, "cnic napi added\n");
+	bnx2x_napi_enable_cnic(bp);
+
+	rc = bnx2x_init_hw_func_cnic(bp);
+	if (rc)
+		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
+
+	bnx2x_nic_init_cnic(bp);
+
+	if (IS_PF(bp)) {
+		/* Enable Timer scan */
+		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+
+		/* setup cnic queues */
+		for_each_cnic_queue(bp, i) {
+			rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+			if (rc) {
+				BNX2X_ERR("Queue setup failed\n");
+				LOAD_ERROR_EXIT(bp, load_error_cnic2);
+			}
+		}
+	}
+
+	/* Initialize Rx filter. */
+	bnx2x_set_rx_mode_inner(bp);
+
+	/* re-read iscsi info */
+	bnx2x_get_iscsi_info(bp);
+	bnx2x_setup_cnic_irq_info(bp);
+	bnx2x_setup_cnic_info(bp);
+	bp->cnic_loaded = true;
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+
+	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
+
+	return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error_cnic2:
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+
+load_error_cnic1:
+	bnx2x_napi_disable_cnic(bp);
+	/* Update the number of queues without the cnic queues */
+	if (bnx2x_set_real_num_queues(bp, 0))
+		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
+load_error_cnic0:
+	BNX2X_ERR("CNIC-related load failed\n");
+	bnx2x_free_fp_mem_cnic(bp);
+	bnx2x_free_mem_cnic(bp);
+	return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+	int port = BP_PORT(bp);
+	int i, rc = 0, load_code = 0;
+
+	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
+	DP(NETIF_MSG_IFUP,
+	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		BNX2X_ERR("Can't load NIC when there is panic\n");
+		return -EPERM;
+	}
+#endif
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+	/* zero the structure w/o any lock, before SP handler is initialized */
+	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
+	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		&bp->last_reported_link.link_report_flags);
+
+	if (IS_PF(bp))
+		/* must be called before memory allocation and HW init */
+		bnx2x_ilt_set_info(bp);
+
+	/*
+	 * Zero fastpath structures preserving invariants like napi, which are
+	 * allocated only once, fp index, max_cos, bp pointer.
+	 * Also set fp->mode and txdata_ptr.
+	 */
+	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
+	for_each_queue(bp, i)
+		bnx2x_bz_fp(bp, i);
+	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
+				  bp->num_cnic_queues) *
+				  sizeof(struct bnx2x_fp_txdata));
+
+	bp->fcoe_init = false;
+
+	/* Set the receive queues buffer size */
+	bnx2x_set_rx_buf_size(bp);
+
+	if (IS_PF(bp)) {
+		rc = bnx2x_alloc_mem(bp);
+		if (rc) {
+			BNX2X_ERR("Unable to allocate bp memory\n");
+			return rc;
+		}
+	}
+
+	/* need to be done after alloc mem, since it's self adjusting to amount
+	 * of memory available for RSS queues
+	 */
+	rc = bnx2x_alloc_fp_mem(bp);
+	if (rc) {
+		BNX2X_ERR("Unable to allocate memory for fps\n");
+		LOAD_ERROR_EXIT(bp, load_error0);
+	}
+
+	/* Allocated memory for FW statistics  */
+	if (bnx2x_alloc_fw_stats_mem(bp))
+		LOAD_ERROR_EXIT(bp, load_error0);
+
+	/* request pf to initialize status blocks */
+	if (IS_VF(bp)) {
+		rc = bnx2x_vfpf_init(bp);
+		if (rc)
+			LOAD_ERROR_EXIT(bp, load_error0);
+	}
+
+	/* As long as bnx2x_alloc_mem() may possibly update
+	 * bp->num_queues, bnx2x_set_real_num_queues() should always
+	 * come after it. At this stage cnic queues are not counted.
+	 */
+	rc = bnx2x_set_real_num_queues(bp, 0);
+	if (rc) {
+		BNX2X_ERR("Unable to set real_num_queues\n");
+		LOAD_ERROR_EXIT(bp, load_error0);
+	}
+
+	/* configure multi cos mappings in kernel.
+	 * this configuration may be overridden by a multi class queue
+	 * discipline or by a dcbx negotiation result.
+	 */
+	bnx2x_setup_tc(bp->dev, bp->max_cos);
+
+	/* Add all NAPI objects */
+	bnx2x_add_all_napi(bp);
+	DP(NETIF_MSG_IFUP, "napi added\n");
+	bnx2x_napi_enable(bp);
+
+	if (IS_PF(bp)) {
+		/* set pf load just before approaching the MCP */
+		bnx2x_set_pf_load(bp);
+
+		/* if mcp exists send load request and analyze response */
+		if (!BP_NOMCP(bp)) {
+			/* attempt to load pf */
+			rc = bnx2x_nic_load_request(bp, &load_code);
+			if (rc)
+				LOAD_ERROR_EXIT(bp, load_error1);
+
+			/* what did mcp say? */
+			rc = bnx2x_compare_fw_ver(bp, load_code, true);
+			if (rc) {
+				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+				LOAD_ERROR_EXIT(bp, load_error2);
+			}
+		} else {
+			load_code = bnx2x_nic_load_no_mcp(bp, port);
+		}
+
+		/* mark pmf if applicable */
+		bnx2x_nic_load_pmf(bp, load_code);
+
+		/* Init Function state controlling object */
+		bnx2x__init_func_obj(bp);
+
+		/* Initialize HW */
+		rc = bnx2x_init_hw(bp, load_code);
+		if (rc) {
+			BNX2X_ERR("HW init failed, aborting\n");
+			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+			LOAD_ERROR_EXIT(bp, load_error2);
+		}
+	}
+
+	bnx2x_pre_irq_nic_init(bp);
+
+	/* Connect to IRQs */
+	rc = bnx2x_setup_irqs(bp);
+	if (rc) {
+		BNX2X_ERR("setup irqs failed\n");
+		if (IS_PF(bp))
+			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+		LOAD_ERROR_EXIT(bp, load_error2);
+	}
+
+	/* Init per-function objects */
+	if (IS_PF(bp)) {
+		/* Setup NIC internals and enable interrupts */
+		bnx2x_post_irq_nic_init(bp, load_code);
+
+		bnx2x_init_bp_objs(bp);
+		bnx2x_iov_nic_init(bp);
+
+		/* Set AFEX default VLAN tag to an invalid value */
+		bp->afex_def_vlan_tag = -1;
+		bnx2x_nic_load_afex_dcc(bp, load_code);
+		bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+		rc = bnx2x_func_start(bp);
+		if (rc) {
+			BNX2X_ERR("Function start failed!\n");
+			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+
+			LOAD_ERROR_EXIT(bp, load_error3);
+		}
+
+		/* Send LOAD_DONE command to MCP */
+		if (!BP_NOMCP(bp)) {
+			load_code = bnx2x_fw_command(bp,
+						     DRV_MSG_CODE_LOAD_DONE, 0);
+			if (!load_code) {
+				BNX2X_ERR("MCP response failure, aborting\n");
+				rc = -EBUSY;
+				LOAD_ERROR_EXIT(bp, load_error3);
+			}
+		}
+
+		/* initialize FW coalescing state machines in RAM */
+		bnx2x_update_coalesce(bp);
+	}
+
+	/* setup the leading queue */
+	rc = bnx2x_setup_leading(bp);
+	if (rc) {
+		BNX2X_ERR("Setup leading failed!\n");
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
+
+	/* set up the rest of the queues */
+	for_each_nondefault_eth_queue(bp, i) {
+		if (IS_PF(bp))
+			rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+		else /* VF */
+			rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
+		if (rc) {
+			BNX2X_ERR("Queue %d setup failed\n", i);
+			LOAD_ERROR_EXIT(bp, load_error3);
+		}
+	}
+
+	/* setup rss */
+	rc = bnx2x_init_rss(bp);
+	if (rc) {
+		BNX2X_ERR("PF RSS init failed\n");
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
+
+	/* Now when Clients are configured we are ready to work */
+	bp->state = BNX2X_STATE_OPEN;
+
+	/* Configure a ucast MAC */
+	if (IS_PF(bp))
+		rc = bnx2x_set_eth_mac(bp, true);
+	else /* vf */
+		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
+					   true);
+	if (rc) {
+		BNX2X_ERR("Setting Ethernet MAC failed\n");
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
+
+	if (IS_PF(bp) && bp->pending_max) {
+		bnx2x_update_max_mf_config(bp, bp->pending_max);
+		bp->pending_max = 0;
+	}
+
+	if (bp->port.pmf) {
+		rc = bnx2x_initial_phy_init(bp, load_mode);
+		if (rc)
+			LOAD_ERROR_EXIT(bp, load_error3);
+	}
+	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
+
+	/* Start fast path */
+
+	/* Re-configure vlan filters */
+	rc = bnx2x_vlan_reconfigure_vid(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error3);
+
+	/* Initialize Rx filter. */
+	bnx2x_set_rx_mode_inner(bp);
+
+	if (bp->flags & PTP_SUPPORTED) {
+		bnx2x_init_ptp(bp);
+		bnx2x_configure_ptp_filters(bp);
+	}
+	/* Start Tx */
+	switch (load_mode) {
+	case LOAD_NORMAL:
+		/* Tx queue should be only re-enabled */
+		netif_tx_wake_all_queues(bp->dev);
+		break;
+
+	case LOAD_OPEN:
+		netif_tx_start_all_queues(bp->dev);
+		smp_mb__after_atomic();
+		break;
+
+	case LOAD_DIAG:
+	case LOAD_LOOPBACK_EXT:
+		bp->state = BNX2X_STATE_DIAG;
+		break;
+
+	default:
+		break;
+	}
+
+	if (bp->port.pmf)
+		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
+	else
+		bnx2x__link_status_update(bp);
+
+	/* start the timer */
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+	if (CNIC_ENABLED(bp))
+		bnx2x_load_cnic(bp);
+
+	if (IS_PF(bp))
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
+	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+		/* mark driver is loaded in shmem2 */
+		u32 val;
+		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+		val &= ~DRV_FLAGS_MTU_MASK;
+		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
+		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
+	}
+
+	/* Wait for all pending SP commands to complete */
+	if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+		bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
+		return -EBUSY;
+	}
+
+	/* Update driver data for On-Chip MFW dump. */
+	if (IS_PF(bp))
+		bnx2x_update_mfw_dump(bp);
+
+	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+		bnx2x_dcbx_init(bp, false);
+
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
+	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
+
+	return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error3:
+	if (IS_PF(bp)) {
+		bnx2x_int_disable_sync(bp, 1);
+
+		/* Clean queueable objects */
+		bnx2x_squeeze_objects(bp);
+	}
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_rx_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+load_error2:
+	if (IS_PF(bp) && !BP_NOMCP(bp)) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+	}
+
+	bp->port.pmf = 0;
+load_error1:
+	bnx2x_napi_disable(bp);
+	bnx2x_del_all_napi(bp);
+
+	/* clear pf_load status, as it was already set */
+	if (IS_PF(bp))
+		bnx2x_clear_pf_load(bp);
+load_error0:
+	bnx2x_free_fw_stats_mem(bp);
+	bnx2x_free_fp_mem(bp);
+	bnx2x_free_mem(bp);
+
+	return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
+{
+	u8 rc = 0, cos, i;
+
+	/* Wait until tx fastpath tasks complete */
+	for_each_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		for_each_cos_in_tx_queue(fp, cos)
+			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
+{
+	int i;
+	bool global = false;
+
+	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
+	/* mark driver is unloaded in shmem2 */
+	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+		u32 val;
+		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+	}
+
+	if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
+	    (bp->state == BNX2X_STATE_CLOSED ||
+	     bp->state == BNX2X_STATE_ERROR)) {
+		/* We can get here if the driver has been unloaded
+		 * during parity error recovery and is either waiting for a
+		 * leader to complete or for other functions to unload and
+		 * then ifdown has been issued. In this case we want to
+		 * unload and let other functions to complete a recovery
+		 * process.
+		 */
+		bp->recovery_state = BNX2X_RECOVERY_DONE;
+		bp->is_leader = 0;
+		bnx2x_release_leader_lock(bp);
+		smp_mb();
+
+		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
+		BNX2X_ERR("Can't unload in closed or error state\n");
+		return -EINVAL;
+	}
+
+	/* Nothing to do during unload if previous bnx2x_nic_load()
+	 * have not completed successfully - all resources are released.
+	 *
+	 * we can get here only after unsuccessful ndo_* callback, during which
+	 * dev->IFF_UP flag is still on.
+	 */
+	if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
+		return 0;
+
+	/* It's important to set the bp->state to the value different from
+	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+	 */
+	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+	smp_mb();
+
+	/* indicate to VFs that the PF is going down */
+	bnx2x_iov_channel_down(bp);
+
+	if (CNIC_LOADED(bp))
+		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
+	/* Stop Tx */
+	bnx2x_tx_disable(bp);
+	netdev_reset_tc(bp->dev);
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+	del_timer_sync(&bp->timer);
+
+	if (IS_PF(bp)) {
+		/* Set ALWAYS_ALIVE bit in shmem */
+		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+		bnx2x_drv_pulse(bp);
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_save_statistics(bp);
+	}
+
+	/* wait till consumers catch up with producers in all queues */
+	bnx2x_drain_tx_queues(bp);
+
+	/* if VF indicate to PF this function is going down (PF will delete sp
+	 * elements and clear initializations
+	 */
+	if (IS_VF(bp))
+		bnx2x_vfpf_close_vf(bp);
+	else if (unload_mode != UNLOAD_RECOVERY)
+		/* if this is a normal/close unload need to clean up chip*/
+		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
+	else {
+		/* Send the UNLOAD_REQUEST to the MCP */
+		bnx2x_send_unload_req(bp, unload_mode);
+
+		/* Prevent transactions to host from the functions on the
+		 * engine that doesn't reset global blocks in case of global
+		 * attention once global blocks are reset and gates are opened
+		 * (the engine which leader will perform the recovery
+		 * last).
+		 */
+		if (!CHIP_IS_E1x(bp))
+			bnx2x_pf_disable(bp);
+
+		/* Disable HW interrupts, NAPI */
+		bnx2x_netif_stop(bp, 1);
+		/* Delete all NAPI objects */
+		bnx2x_del_all_napi(bp);
+		if (CNIC_LOADED(bp))
+			bnx2x_del_all_napi_cnic(bp);
+		/* Release IRQs */
+		bnx2x_free_irq(bp);
+
+		/* Report UNLOAD_DONE to MCP */
+		bnx2x_send_unload_done(bp, false);
+	}
+
+	/*
+	 * At this stage no more interrupts will arrive so we may safely clean
+	 * the queueable objects here in case they failed to get cleaned so far.
+	 */
+	if (IS_PF(bp))
+		bnx2x_squeeze_objects(bp);
+
+	/* There should be no more pending SP commands at this stage */
+	bp->sp_state = 0;
+
+	bp->port.pmf = 0;
+
+	/* clear pending work in rtnl task */
+	bp->sp_rtnl_state = 0;
+	smp_mb();
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_free_skbs_cnic(bp);
+	for_each_rx_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+	bnx2x_free_fp_mem(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_free_fp_mem_cnic(bp);
+
+	if (IS_PF(bp)) {
+		if (CNIC_LOADED(bp))
+			bnx2x_free_mem_cnic(bp);
+	}
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+	bp->cnic_loaded = false;
+
+	/* Clear driver version indication in shmem */
+	if (IS_PF(bp))
+		bnx2x_update_mng_version(bp);
+
+	/* Check if there are pending parity attentions. If there are - set
+	 * RECOVERY_IN_PROGRESS.
+	 */
+	if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
+		bnx2x_set_reset_in_progress(bp);
+
+		/* Set RESET_IS_GLOBAL if needed */
+		if (global)
+			bnx2x_set_reset_global(bp);
+	}
+
+	/* The last driver must disable a "close the gate" if there is no
+	 * parity attention or "process kill" pending.
+	 */
+	if (IS_PF(bp) &&
+	    !bnx2x_clear_pf_load(bp) &&
+	    bnx2x_reset_is_done(bp, BP_PATH(bp)))
+		bnx2x_disable_close_the_gate(bp);
+
+	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
+
+	return 0;
+}
+
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+	u16 pmcsr;
+
+	/* If there is no power capability, silently succeed */
+	if (!bp->pdev->pm_cap) {
+		BNX2X_DEV_INFO("No power capability. Breaking.\n");
+		return 0;
+	}
+
+	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	switch (state) {
+	case PCI_D0:
+		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
+				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+				       PCI_PM_CTRL_PME_STATUS));
+
+		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+			/* delay required during transition out of D3hot */
+			msleep(20);
+		break;
+
+	case PCI_D3hot:
+		/* If there are other clients above don't
+		   shut down the power */
+		if (atomic_read(&bp->pdev->enable_cnt) != 1)
+			return 0;
+		/* Don't shut down the power for emulation and FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			return 0;
+
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		pmcsr |= 3;
+
+		if (bp->wol)
+			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
+				      pmcsr);
+
+		/* No more memory access after this point until
+		* device is brought back to D0.
+		*/
+		break;
+
+	default:
+		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * net_device service functions
+ */
+static int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+	int work_done = 0;
+	u8 cos;
+	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+						 napi);
+	struct bnx2x *bp = fp->bp;
+
+	while (1) {
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic)) {
+			napi_complete(napi);
+			return 0;
+		}
+#endif
+		if (!bnx2x_fp_lock_napi(fp))
+			return budget;
+
+		for_each_cos_in_tx_queue(fp, cos)
+			if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+				bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
+
+		if (bnx2x_has_rx_work(fp)) {
+			work_done += bnx2x_rx_int(fp, budget - work_done);
+
+			/* must not complete if we consumed full budget */
+			if (work_done >= budget) {
+				bnx2x_fp_unlock_napi(fp);
+				break;
+			}
+		}
+
+		bnx2x_fp_unlock_napi(fp);
+
+		/* Fall out from the NAPI loop if needed */
+		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+
+			/* No need to update SB for FCoE L2 ring as long as
+			 * it's connected to the default SB and the SB
+			 * has been updated when NAPI was scheduled.
+			 */
+			if (IS_FCOE_FP(fp)) {
+				napi_complete(napi);
+				break;
+			}
+			bnx2x_update_fpsb_idx(fp);
+			/* bnx2x_has_rx_work() reads the status block,
+			 * thus we need to ensure that status block indices
+			 * have been actually read (bnx2x_update_fpsb_idx)
+			 * prior to this check (bnx2x_has_rx_work) so that
+			 * we won't write the "newer" value of the status block
+			 * to IGU (if there was a DMA right after
+			 * bnx2x_has_rx_work and if there is no rmb, the memory
+			 * reading (bnx2x_update_fpsb_idx) may be postponed
+			 * to right before bnx2x_ack_sb). In this case there
+			 * will never be another interrupt until there is
+			 * another update of the status block, while there
+			 * is still unhandled work.
+			 */
+			rmb();
+
+			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+				napi_complete(napi);
+				/* Re-enable interrupts */
+				DP(NETIF_MSG_RX_STATUS,
+				   "Update index to %d\n", fp->fp_hc_idx);
+				bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+					     le16_to_cpu(fp->fp_hc_idx),
+					     IGU_INT_ENABLE, 1);
+				break;
+			}
+		}
+	}
+
+	return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+int bnx2x_low_latency_recv(struct napi_struct *napi)
+{
+	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+						 napi);
+	struct bnx2x *bp = fp->bp;
+	int found = 0;
+
+	if ((bp->state == BNX2X_STATE_CLOSED) ||
+	    (bp->state == BNX2X_STATE_ERROR) ||
+	    (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
+		return LL_FLUSH_FAILED;
+
+	if (!bnx2x_fp_lock_poll(fp))
+		return LL_FLUSH_BUSY;
+
+	if (bnx2x_has_rx_work(fp))
+		found = bnx2x_rx_int(fp, 4);
+
+	bnx2x_fp_unlock_poll(fp);
+
+	return found;
+}
+#endif
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ */
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+			  struct bnx2x_fp_txdata *txdata,
+			  struct sw_tx_bd *tx_buf,
+			  struct eth_tx_start_bd **tx_bd, u16 hlen,
+			  u16 bd_prod)
+{
+	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+	struct eth_tx_bd *d_tx_bd;
+	dma_addr_t mapping;
+	int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+	/* first fix first BD */
+	h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
+	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
+
+	/* now get a new data BD
+	 * (after the pbd) and fill it */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+	/* this marks the BD as one that has no individual mapping */
+	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "TSO split data size is %d (%x:%x)\n",
+	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+	/* update tx_bd */
+	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+	return bd_prod;
+}
+
+#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
+#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+	__sum16 tsum = (__force __sum16) csum;
+
+	if (fix > 0)
+		tsum = ~csum_fold(csum_sub((__force __wsum) csum,
+				  csum_partial(t_header - fix, fix, 0)));
+
+	else if (fix < 0)
+		tsum = ~csum_fold(csum_add((__force __wsum) csum,
+				  csum_partial(t_header, -fix, 0)));
+
+	return bswab16(tsum);
+}
+
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+	u32 rc;
+	__u8 prot = 0;
+	__be16 protocol;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return XMIT_PLAIN;
+
+	protocol = vlan_get_protocol(skb);
+	if (protocol == htons(ETH_P_IPV6)) {
+		rc = XMIT_CSUM_V6;
+		prot = ipv6_hdr(skb)->nexthdr;
+	} else {
+		rc = XMIT_CSUM_V4;
+		prot = ip_hdr(skb)->protocol;
+	}
+
+	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+		if (inner_ip_hdr(skb)->version == 6) {
+			rc |= XMIT_CSUM_ENC_V6;
+			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+				rc |= XMIT_CSUM_TCP;
+		} else {
+			rc |= XMIT_CSUM_ENC_V4;
+			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
+				rc |= XMIT_CSUM_TCP;
+		}
+	}
+	if (prot == IPPROTO_TCP)
+		rc |= XMIT_CSUM_TCP;
+
+	if (skb_is_gso(skb)) {
+		if (skb_is_gso_v6(skb)) {
+			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+			if (rc & XMIT_CSUM_ENC)
+				rc |= XMIT_GSO_ENC_V6;
+		} else {
+			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+			if (rc & XMIT_CSUM_ENC)
+				rc |= XMIT_GSO_ENC_V4;
+		}
+	}
+
+	return rc;
+}
+
+/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
+#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
+
+/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+#define BNX2X_NUM_TSO_WIN_SUB_BDS               3
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
+/* check if packet requires linearization (packet is too fragmented)
+   no need to check fragmentation if page size > 8K (there will be no
+   violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+			     u32 xmit_type)
+{
+	int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
+	int to_copy = 0, hlen = 0;
+
+	if (xmit_type & XMIT_GSO_ENC)
+		num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
+
+	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
+		if (xmit_type & XMIT_GSO) {
+			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+			int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
+			/* Number of windows to check */
+			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+			int wnd_idx = 0;
+			int frag_idx = 0;
+			u32 wnd_sum = 0;
+
+			/* Headers length */
+			if (xmit_type & XMIT_GSO_ENC)
+				hlen = (int)(skb_inner_transport_header(skb) -
+					     skb->data) +
+					     inner_tcp_hdrlen(skb);
+			else
+				hlen = (int)(skb_transport_header(skb) -
+					     skb->data) + tcp_hdrlen(skb);
+
+			/* Amount of data (w/o headers) on linear part of SKB*/
+			first_bd_sz = skb_headlen(skb) - hlen;
+
+			wnd_sum  = first_bd_sz;
+
+			/* Calculate the first sum - it's special */
+			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+				wnd_sum +=
+					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
+
+			/* If there was data on linear skb data - check it */
+			if (first_bd_sz > 0) {
+				if (unlikely(wnd_sum < lso_mss)) {
+					to_copy = 1;
+					goto exit_lbl;
+				}
+
+				wnd_sum -= first_bd_sz;
+			}
+
+			/* Others are easier: run through the frag list and
+			   check all windows */
+			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+				wnd_sum +=
+			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
+
+				if (unlikely(wnd_sum < lso_mss)) {
+					to_copy = 1;
+					break;
+				}
+				wnd_sum -=
+					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
+			}
+		} else {
+			/* in non-LSO too fragmented packet should always
+			   be linearized */
+			to_copy = 1;
+		}
+	}
+
+exit_lbl:
+	if (unlikely(to_copy))
+		DP(NETIF_MSG_TX_QUEUED,
+		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
+		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+	return to_copy;
+}
+#endif
+
+/**
+ * bnx2x_set_pbd_gso - update PBD in GSO case.
+ *
+ * @skb:	packet skb
+ * @pbd:	parse BD
+ * @xmit_type:	xmit flags
+ */
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+			      struct eth_tx_parse_bd_e1x *pbd,
+			      u32 xmit_type)
+{
+	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
+	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
+
+	if (xmit_type & XMIT_GSO_V4) {
+		pbd->ip_id = bswab16(ip_hdr(skb)->id);
+		pbd->tcp_pseudo_csum =
+			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+						   ip_hdr(skb)->daddr,
+						   0, IPPROTO_TCP, 0));
+	} else {
+		pbd->tcp_pseudo_csum =
+			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0));
+	}
+
+	pbd->global_data |=
+		cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
+}
+
+/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp:			driver handle
+ * @skb:		packet skb
+ * @parsing_data:	data to be updated
+ * @xmit_type:		xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+				 u32 *parsing_data, u32 xmit_type)
+{
+	*parsing_data |=
+		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+	if (xmit_type & XMIT_CSUM_TCP) {
+		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+		return skb_inner_transport_header(skb) +
+			inner_tcp_hdrlen(skb) - skb->data;
+	}
+
+	/* We support checksum offload for TCP and UDP only.
+	 * No need to pass the UDP header length - it's a constant.
+	 */
+	return skb_inner_transport_header(skb) +
+		sizeof(struct udphdr) - skb->data;
+}
+
+/**
+ * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
+ *
+ * @bp:			driver handle
+ * @skb:		packet skb
+ * @parsing_data:	data to be updated
+ * @xmit_type:		xmit flags
+ *
+ * 57712/578xx related
+ */
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+				u32 *parsing_data, u32 xmit_type)
+{
+	*parsing_data |=
+		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+	if (xmit_type & XMIT_CSUM_TCP) {
+		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+	}
+	/* We support checksum offload for TCP and UDP only.
+	 * No need to pass the UDP header length - it's a constant.
+	 */
+	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+}
+
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+			       struct eth_tx_start_bd *tx_start_bd,
+			       u32 xmit_type)
+{
+	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
+
+	if (!(xmit_type & XMIT_CSUM_TCP))
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
+/**
+ * bnx2x_set_pbd_csum - update PBD with checksum and return header length
+ *
+ * @bp:		driver handle
+ * @skb:	packet skb
+ * @pbd:	parse BD to be updated
+ * @xmit_type:	xmit flags
+ */
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+			     struct eth_tx_parse_bd_e1x *pbd,
+			     u32 xmit_type)
+{
+	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+
+	/* for now NS flag is not used in Linux */
+	pbd->global_data =
+		cpu_to_le16(hlen |
+			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+			     ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+
+	pbd->ip_hlen_w = (skb_transport_header(skb) -
+			skb_network_header(skb)) >> 1;
+
+	hlen += pbd->ip_hlen_w;
+
+	/* We support checksum offload for TCP and UDP only */
+	if (xmit_type & XMIT_CSUM_TCP)
+		hlen += tcp_hdrlen(skb) / 2;
+	else
+		hlen += sizeof(struct udphdr) / 2;
+
+	pbd->total_hlen_w = cpu_to_le16(hlen);
+	hlen = hlen*2;
+
+	if (xmit_type & XMIT_CSUM_TCP) {
+		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
+
+	} else {
+		s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "hlen %d  fix %d  csum before fix %x\n",
+		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
+
+		/* HW bug: fixup the CSUM */
+		pbd->tcp_pseudo_csum =
+			bnx2x_csum_fix(skb_transport_header(skb),
+				       SKB_CS(skb), fix);
+
+		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+		   pbd->tcp_pseudo_csum);
+	}
+
+	return hlen;
+}
+
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+				      struct eth_tx_parse_bd_e2 *pbd_e2,
+				      struct eth_tx_parse_2nd_bd *pbd2,
+				      u16 *global_data,
+				      u32 xmit_type)
+{
+	u16 hlen_w = 0;
+	u8 outerip_off, outerip_len = 0;
+
+	/* from outer IP to transport */
+	hlen_w = (skb_inner_transport_header(skb) -
+		  skb_network_header(skb)) >> 1;
+
+	/* transport len */
+	hlen_w += inner_tcp_hdrlen(skb) >> 1;
+
+	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+	/* outer IP header info */
+	if (xmit_type & XMIT_CSUM_V4) {
+		struct iphdr *iph = ip_hdr(skb);
+		u32 csum = (__force u32)(~iph->check) -
+			   (__force u32)iph->tot_len -
+			   (__force u32)iph->frag_off;
+
+		outerip_len = iph->ihl << 1;
+
+		pbd2->fw_ip_csum_wo_len_flags_frag =
+			bswab16(csum_fold((__force __wsum)csum));
+	} else {
+		pbd2->fw_ip_hdr_to_payload_w =
+			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+		pbd_e2->data.tunnel_data.flags |=
+			ETH_TUNNEL_DATA_IPV6_OUTER;
+	}
+
+	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+	/* inner IP header info */
+	if (xmit_type & XMIT_CSUM_ENC_V4) {
+		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+		pbd_e2->data.tunnel_data.pseudo_csum =
+			bswab16(~csum_tcpudp_magic(
+					inner_ip_hdr(skb)->saddr,
+					inner_ip_hdr(skb)->daddr,
+					0, IPPROTO_TCP, 0));
+	} else {
+		pbd_e2->data.tunnel_data.pseudo_csum =
+			bswab16(~csum_ipv6_magic(
+					&inner_ipv6_hdr(skb)->saddr,
+					&inner_ipv6_hdr(skb)->daddr,
+					0, IPPROTO_TCP, 0));
+	}
+
+	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+	*global_data |=
+		outerip_off |
+		(outerip_len <<
+			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+
+	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
+		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
+	}
+}
+
+static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
+					 u32 xmit_type)
+{
+	struct ipv6hdr *ipv6;
+
+	if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
+		return;
+
+	if (xmit_type & XMIT_GSO_ENC_V6)
+		ipv6 = inner_ipv6_hdr(skb);
+	else /* XMIT_GSO_V6 */
+		ipv6 = ipv6_hdr(skb);
+
+	if (ipv6->nexthdr == NEXTHDR_IPV6)
+		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	struct netdev_queue *txq;
+	struct bnx2x_fp_txdata *txdata;
+	struct sw_tx_bd *tx_buf;
+	struct eth_tx_start_bd *tx_start_bd, *first_bd;
+	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
+	u32 pbd_e2_parsing_data = 0;
+	u16 pkt_prod, bd_prod;
+	int nbd, txq_index;
+	dma_addr_t mapping;
+	u32 xmit_type = bnx2x_xmit_type(bp, skb);
+	int i;
+	u8 hlen = 0;
+	__le16 pkt_size = 0;
+	struct ethhdr *eth;
+	u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return NETDEV_TX_BUSY;
+#endif
+
+	txq_index = skb_get_queue_mapping(skb);
+	txq = netdev_get_tx_queue(dev, txq_index);
+
+	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
+
+	txdata = &bp->bnx2x_txq[txq_index];
+
+	/* enable this debug print to view the transmission queue being used
+	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
+	   txq_index, fp_index, txdata_index); */
+
+	/* enable this debug print to view the transmission details
+	DP(NETIF_MSG_TX_QUEUED,
+	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
+	   txdata->cid, fp_index, txdata_index, txdata, fp); */
+
+	if (unlikely(bnx2x_tx_avail(bp, txdata) <
+			skb_shinfo(skb)->nr_frags +
+			BDS_PER_TX_PKT +
+			NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
+		/* Handle special storage cases separately */
+		if (txdata->tx_ring_size == 0) {
+			struct bnx2x_eth_q_stats *q_stats =
+				bnx2x_fp_qstats(bp, txdata->parent_fp);
+			q_stats->driver_filtered_tx_pkt++;
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+		netif_tx_stop_queue(txq);
+		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+
+		return NETDEV_TX_BUSY;
+	}
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
+	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
+	   skb->len);
+
+	eth = (struct ethhdr *)skb->data;
+
+	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
+	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+		if (is_broadcast_ether_addr(eth->h_dest))
+			mac_type = BROADCAST_ADDRESS;
+		else
+			mac_type = MULTICAST_ADDRESS;
+	}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
+	/* First, check if we need to linearize the skb (due to FW
+	   restrictions). No need to check fragmentation if page size > 8K
+	   (there will be no violation to FW restrictions) */
+	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+		/* Statistics of linearization */
+		bp->lin_cnt++;
+		if (skb_linearize(skb) != 0) {
+			DP(NETIF_MSG_TX_QUEUED,
+			   "SKB linearization failed - silently dropping this SKB\n");
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+	}
+#endif
+	/* Map skb linear data for DMA */
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		DP(NETIF_MSG_TX_QUEUED,
+		   "SKB mapping failed - silently dropping this SKB\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	/*
+	Please read carefully. First we use one BD which we mark as start,
+	then we have a parsing info BD (used for TSO or xsum),
+	and only then we have the rest of the TSO BDs.
+	(don't forget to mark the last one as last,
+	and to unmap only AFTER you write to the BD ...)
+	And above all, all pdb sizes are in words - NOT DWORDS!
+	*/
+
+	/* get current pkt produced now - advance it just before sending packet
+	 * since mapping of pages may fail and cause packet to be dropped
+	 */
+	pkt_prod = txdata->tx_pkt_prod;
+	bd_prod = TX_BD(txdata->tx_bd_prod);
+
+	/* get a tx_buf and first BD
+	 * tx_start_bd may be changed during SPLIT,
+	 * but first_bd will always stay first
+	 */
+	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+	first_bd = tx_start_bd;
+
+	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+		if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+			BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
+		} else if (bp->ptp_tx_skb) {
+			BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+		} else {
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+			/* schedule check for Tx timestamp */
+			bp->ptp_tx_skb = skb_get(skb);
+			bp->ptp_tx_start = jiffies;
+			schedule_work(&bp->ptp_task);
+		}
+	}
+
+	/* header nbd: indirectly zero other flags! */
+	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
+
+	/* remember the first BD of the packet */
+	tx_buf->first_bd = txdata->tx_bd_prod;
+	tx_buf->skb = skb;
+	tx_buf->flags = 0;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
+	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
+
+	if (skb_vlan_tag_present(skb)) {
+		tx_start_bd->vlan_or_ethertype =
+		    cpu_to_le16(skb_vlan_tag_get(skb));
+		tx_start_bd->bd_flags.as_bitfield |=
+		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+	} else {
+		/* when transmitting in a vf, start bd must hold the ethertype
+		 * for fw to enforce it
+		 */
+#ifndef BNX2X_STOP_ON_ERROR
+		if (IS_VF(bp))
+#endif
+			tx_start_bd->vlan_or_ethertype =
+				cpu_to_le16(ntohs(eth->h_proto));
+#ifndef BNX2X_STOP_ON_ERROR
+		else
+			/* used by FW for packet accounting */
+			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+#endif
+	}
+
+	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
+	/* turn on parsing and get a BD */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	if (xmit_type & XMIT_CSUM)
+		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
+
+	if (!CHIP_IS_E1x(bp)) {
+		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+
+		if (xmit_type & XMIT_CSUM_ENC) {
+			u16 global_data = 0;
+
+			/* Set PBD in enc checksum offload case */
+			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+						      &pbd_e2_parsing_data,
+						      xmit_type);
+
+			/* turn on 2nd parsing and get a BD */
+			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+			memset(pbd2, 0, sizeof(*pbd2));
+
+			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+				(skb_inner_network_header(skb) -
+				 skb->data) >> 1;
+
+			if (xmit_type & XMIT_GSO_ENC)
+				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+							  &global_data,
+							  xmit_type);
+
+			pbd2->global_data = cpu_to_le16(global_data);
+
+			/* add addition parse BD indication to start BD */
+			SET_FLAG(tx_start_bd->general_data,
+				 ETH_TX_START_BD_PARSE_NBDS, 1);
+			/* set encapsulation flag in start BD */
+			SET_FLAG(tx_start_bd->general_data,
+				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
+			nbd++;
+		} else if (xmit_type & XMIT_CSUM) {
+			/* Set PBD in checksum offload case w/o encapsulation */
+			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+						     &pbd_e2_parsing_data,
+						     xmit_type);
+		}
+
+		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
+		/* Add the macs to the parsing BD if this is a vf or if
+		 * Tx Switching is enabled.
+		 */
+		if (IS_VF(bp)) {
+			/* override GRE parameters in BD */
+			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+					      &pbd_e2->data.mac_addr.src_mid,
+					      &pbd_e2->data.mac_addr.src_lo,
+					      eth->h_source);
+
+			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+					      &pbd_e2->data.mac_addr.dst_mid,
+					      &pbd_e2->data.mac_addr.dst_lo,
+					      eth->h_dest);
+		} else {
+			if (bp->flags & TX_SWITCHING)
+				bnx2x_set_fw_mac_addr(
+						&pbd_e2->data.mac_addr.dst_hi,
+						&pbd_e2->data.mac_addr.dst_mid,
+						&pbd_e2->data.mac_addr.dst_lo,
+						eth->h_dest);
+#ifdef BNX2X_STOP_ON_ERROR
+			/* Enforce security is always set in Stop on Error -
+			 * source mac should be present in the parsing BD
+			 */
+			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+					      &pbd_e2->data.mac_addr.src_mid,
+					      &pbd_e2->data.mac_addr.src_lo,
+					      eth->h_source);
+#endif
+		}
+
+		SET_FLAG(pbd_e2_parsing_data,
+			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
+	} else {
+		u16 global_data = 0;
+		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+		/* Set PBD in checksum offload case */
+		if (xmit_type & XMIT_CSUM)
+			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+
+		SET_FLAG(global_data,
+			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
+		pbd_e1x->global_data |= cpu_to_le16(global_data);
+	}
+
+	/* Setup the data pointer of the first BD of the packet */
+	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+	pkt_size = tx_start_bd->nbytes;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
+	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+	   le16_to_cpu(tx_start_bd->nbytes),
+	   tx_start_bd->bd_flags.as_bitfield,
+	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
+
+	if (xmit_type & XMIT_GSO) {
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
+		   skb->len, hlen, skb_headlen(skb),
+		   skb_shinfo(skb)->gso_size);
+
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+		if (unlikely(skb_headlen(skb) > hlen)) {
+			nbd++;
+			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
+						 &tx_start_bd, hlen,
+						 bd_prod);
+		}
+		if (!CHIP_IS_E1x(bp))
+			pbd_e2_parsing_data |=
+				(skb_shinfo(skb)->gso_size <<
+				 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+				 ETH_TX_PARSE_BD_E2_LSO_MSS;
+		else
+			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+	}
+
+	/* Set the PBD's parsing_data field if not zero
+	 * (for the chips newer than 57711).
+	 */
+	if (pbd_e2_parsing_data)
+		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
+	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+	/* Handle fragmented skb */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+			unsigned int pkts_compl = 0, bytes_compl = 0;
+
+			DP(NETIF_MSG_TX_QUEUED,
+			   "Unable to map page - dropping packet...\n");
+
+			/* we need unmap all buffers already mapped
+			 * for this SKB;
+			 * first_bd->nbd need to be properly updated
+			 * before call to bnx2x_free_tx_pkt
+			 */
+			first_bd->nbd = cpu_to_le16(nbd);
+			bnx2x_free_tx_pkt(bp, txdata,
+					  TX_BD(txdata->tx_pkt_prod),
+					  &pkts_compl, &bytes_compl);
+			return NETDEV_TX_OK;
+		}
+
+		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+		if (total_pkt_bd == NULL)
+			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
+		le16_add_cpu(&pkt_size, skb_frag_size(frag));
+		nbd++;
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
+		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+		   le16_to_cpu(tx_data_bd->nbytes));
+	}
+
+	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+	/* update with actual num BDs */
+	first_bd->nbd = cpu_to_le16(nbd);
+
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	/* now send a tx doorbell, counting the next BD
+	 * if the packet contains or ends with it
+	 */
+	if (TX_BD_POFF(bd_prod) < nbd)
+		nbd++;
+
+	/* total_pkt_bytes should be set on the first data BD if
+	 * it's not an LSO packet and there is more than one
+	 * data BD. In this case pkt_size is limited by an MTU value.
+	 * However we prefer to set it for an LSO packet (while we don't
+	 * have to) in order to save some CPU cycles in a none-LSO
+	 * case, when we much more care about them.
+	 */
+	if (total_pkt_bd != NULL)
+		total_pkt_bd->total_pkt_bytes = pkt_size;
+
+	if (pbd_e1x)
+		DP(NETIF_MSG_TX_QUEUED,
+		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
+		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
+		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
+		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
+		    le16_to_cpu(pbd_e1x->total_hlen_w));
+	if (pbd_e2)
+		DP(NETIF_MSG_TX_QUEUED,
+		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
+		   pbd_e2,
+		   pbd_e2->data.mac_addr.dst_hi,
+		   pbd_e2->data.mac_addr.dst_mid,
+		   pbd_e2->data.mac_addr.dst_lo,
+		   pbd_e2->data.mac_addr.src_hi,
+		   pbd_e2->data.mac_addr.src_mid,
+		   pbd_e2->data.mac_addr.src_lo,
+		   pbd_e2->parsing_data);
+	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
+
+	netdev_tx_sent_queue(txq, skb->len);
+
+	skb_tx_timestamp(skb);
+
+	txdata->tx_pkt_prod++;
+	/*
+	 * Make sure that the BD data is updated before updating the producer
+	 * since FW might read the BD right after the producer is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes packets must have BDs.
+	 */
+	wmb();
+
+	txdata->tx_db.data.prod += nbd;
+	barrier();
+
+	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+	mmiowb();
+
+	txdata->tx_bd_prod += nbd;
+
+	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
+		netif_tx_stop_queue(txq);
+
+		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
+		 * ordering of set_bit() in netif_tx_stop_queue() and read of
+		 * fp->bd_tx_cons */
+		smp_mb();
+
+		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+		if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
+			netif_tx_wake_queue(txq);
+	}
+	txdata->tx_pkt++;
+
+	return NETDEV_TX_OK;
+}
+
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+	int mfw_vn = BP_FW_MB_IDX(bp);
+	u32 tmp;
+
+	/* If the shmem shouldn't affect configuration, reflect */
+	if (!IS_MF_BD(bp)) {
+		int i;
+
+		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+			c2s_map[i] = i;
+		*c2s_default = 0;
+
+		return;
+	}
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	c2s_map[0] = tmp & 0xff;
+	c2s_map[1] = (tmp >> 8) & 0xff;
+	c2s_map[2] = (tmp >> 16) & 0xff;
+	c2s_map[3] = (tmp >> 24) & 0xff;
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	c2s_map[4] = tmp & 0xff;
+	c2s_map[5] = (tmp >> 8) & 0xff;
+	c2s_map[6] = (tmp >> 16) & 0xff;
+	c2s_map[7] = (tmp >> 24) & 0xff;
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
+/**
+ * bnx2x_setup_tc - routine to configure net_device for multi tc
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ *
+ * callback connected to the ndo_setup_tc function pointer
+ */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+	int cos, prio, count, offset;
+
+	/* setup tc must be called under rtnl lock */
+	ASSERT_RTNL();
+
+	/* no traffic classes requested. Aborting */
+	if (!num_tc) {
+		netdev_reset_tc(dev);
+		return 0;
+	}
+
+	/* requested to support too many traffic classes */
+	if (num_tc > bp->max_cos) {
+		BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
+			  num_tc, bp->max_cos);
+		return -EINVAL;
+	}
+
+	/* declare amount of supported traffic classes */
+	if (netdev_set_num_tc(dev, num_tc)) {
+		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
+		return -EINVAL;
+	}
+
+	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
+	/* configure priority to traffic class mapping */
+	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+		int outer_prio = c2s_map[prio];
+
+		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
+		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+		   "mapping priority %d to tc %d\n",
+		   outer_prio, bp->prio_to_cos[outer_prio]);
+	}
+
+	/* Use this configuration to differentiate tc0 from other COSes
+	   This can be used for ets or pfc, and save the effort of setting
+	   up a multio class queue disc or negotiating DCBX with a switch
+	netdev_set_prio_tc_map(dev, 0, 0);
+	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
+	for (prio = 1; prio < 16; prio++) {
+		netdev_set_prio_tc_map(dev, prio, 1);
+		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
+	} */
+
+	/* configure traffic class to transmission queue mapping */
+	for (cos = 0; cos < bp->max_cos; cos++) {
+		count = BNX2X_NUM_ETH_QUEUES(bp);
+		offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
+		netdev_set_tc_queue(dev, cos, count, offset);
+		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+		   "mapping tc %d to offset %d count %d\n",
+		   cos, offset, count);
+	}
+
+	return 0;
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	if (!is_valid_ether_addr(addr->sa_data)) {
+		BNX2X_ERR("Requested MAC address is not valid\n");
+		return -EINVAL;
+	}
+
+	if (IS_MF_STORAGE_ONLY(bp)) {
+		BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
+		return -EINVAL;
+	}
+
+	if (netif_running(dev))  {
+		rc = bnx2x_set_eth_mac(bp, false);
+		if (rc)
+			return rc;
+	}
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	if (netif_running(dev))
+		rc = bnx2x_set_eth_mac(bp, true);
+
+	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
+	return rc;
+}
+
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+	u8 cos;
+
+	/* Common */
+
+	if (IS_FCOE_IDX(fp_index)) {
+		memset(sb, 0, sizeof(union host_hc_status_block));
+		fp->status_blk_mapping = 0;
+	} else {
+		/* status blocks */
+		if (!CHIP_IS_E1x(bp))
+			BNX2X_PCI_FREE(sb->e2_sb,
+				       bnx2x_fp(bp, fp_index,
+						status_blk_mapping),
+				       sizeof(struct host_hc_status_block_e2));
+		else
+			BNX2X_PCI_FREE(sb->e1x_sb,
+				       bnx2x_fp(bp, fp_index,
+						status_blk_mapping),
+				       sizeof(struct host_hc_status_block_e1x));
+	}
+
+	/* Rx */
+	if (!skip_rx_queue(bp, fp_index)) {
+		bnx2x_free_rx_bds(fp);
+
+		/* fastpath rx rings: rx_buf rx_desc rx_comp */
+		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
+			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
+			       sizeof(struct eth_fast_path_rx_cqe) *
+			       NUM_RCQ_BD);
+
+		/* SGE ring */
+		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
+			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+	}
+
+	/* Tx */
+	if (!skip_tx_queue(bp, fp_index)) {
+		/* fastpath tx rings: tx_buf tx_desc */
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+
+			DP(NETIF_MSG_IFDOWN,
+			   "freeing tx memory of fp %d cos %d cid %d\n",
+			   fp_index, cos, txdata->cid);
+
+			BNX2X_FREE(txdata->tx_buf_ring);
+			BNX2X_PCI_FREE(txdata->tx_desc_ring,
+				txdata->tx_desc_mapping,
+				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+		}
+	}
+	/* end of fastpath */
+}
+
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+{
+	int i;
+	for_each_cnic_queue(bp, i)
+		bnx2x_free_fp_mem_at(bp, i);
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+	int i;
+	for_each_eth_queue(bp, i)
+		bnx2x_free_fp_mem_at(bp, i);
+}
+
+static void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+	if (!CHIP_IS_E1x(bp)) {
+		bnx2x_fp(bp, index, sb_index_values) =
+			(__le16 *)status_blk.e2_sb->sb.index_values;
+		bnx2x_fp(bp, index, sb_running_index) =
+			(__le16 *)status_blk.e2_sb->sb.running_index;
+	} else {
+		bnx2x_fp(bp, index, sb_index_values) =
+			(__le16 *)status_blk.e1x_sb->sb.index_values;
+		bnx2x_fp(bp, index, sb_running_index) =
+			(__le16 *)status_blk.e1x_sb->sb.running_index;
+	}
+}
+
+/* Returns the number of actually allocated BDs */
+static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+			      int rx_ring_size)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 ring_prod, cqe_ring_prod;
+	int i, failure_cnt = 0;
+
+	fp->rx_comp_cons = 0;
+	cqe_ring_prod = ring_prod = 0;
+
+	/* This routine is called only during fo init so
+	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
+	 */
+	for (i = 0; i < rx_ring_size; i++) {
+		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
+			failure_cnt++;
+			continue;
+		}
+		ring_prod = NEXT_RX_IDX(ring_prod);
+		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+		WARN_ON(ring_prod <= (i - failure_cnt));
+	}
+
+	if (failure_cnt)
+		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
+			  i - failure_cnt, fp->index);
+
+	fp->rx_bd_prod = ring_prod;
+	/* Limit the CQE producer by the CQE ring size */
+	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+			       cqe_ring_prod);
+	fp->rx_pkt = fp->rx_calls = 0;
+
+	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+
+	return i - failure_cnt;
+}
+
+static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+		struct eth_rx_cqe_next_page *nextpg;
+
+		nextpg = (struct eth_rx_cqe_next_page *)
+			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+		nextpg->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+		nextpg->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+	}
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+	union host_hc_status_block *sb;
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	int ring_size = 0;
+	u8 cos;
+	int rx_ring_size = 0;
+
+	if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
+		rx_ring_size = MIN_RX_SIZE_NONTPA;
+		bp->rx_ring_size = rx_ring_size;
+	} else if (!bp->rx_ring_size) {
+		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+		if (CHIP_IS_E3(bp)) {
+			u32 cfg = SHMEM_RD(bp,
+					   dev_info.port_hw_config[BP_PORT(bp)].
+					   default_cfg);
+
+			/* Decrease ring size for 1G functions */
+			if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+			    PORT_HW_CFG_NET_SERDES_IF_SGMII)
+				rx_ring_size /= 10;
+		}
+
+		/* allocate at least number of buffers required by FW */
+		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+				     MIN_RX_SIZE_TPA, rx_ring_size);
+
+		bp->rx_ring_size = rx_ring_size;
+	} else /* if rx_ring_size specified - use it */
+		rx_ring_size = bp->rx_ring_size;
+
+	DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
+
+	/* Common */
+	sb = &bnx2x_fp(bp, index, status_blk);
+
+	if (!IS_FCOE_IDX(index)) {
+		/* status blocks */
+		if (!CHIP_IS_E1x(bp)) {
+			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+						    sizeof(struct host_hc_status_block_e2));
+			if (!sb->e2_sb)
+				goto alloc_mem_err;
+		} else {
+			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+						     sizeof(struct host_hc_status_block_e1x));
+			if (!sb->e1x_sb)
+				goto alloc_mem_err;
+		}
+	}
+
+	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+	 * set shortcuts for it.
+	 */
+	if (!IS_FCOE_IDX(index))
+		set_sb_shortcuts(bp, index);
+
+	/* Tx */
+	if (!skip_tx_queue(bp, index)) {
+		/* fastpath tx rings: tx_buf tx_desc */
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+
+			DP(NETIF_MSG_IFUP,
+			   "allocating tx memory of fp %d cos %d\n",
+			   index, cos);
+
+			txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+						      sizeof(struct sw_tx_bd),
+						      GFP_KERNEL);
+			if (!txdata->tx_buf_ring)
+				goto alloc_mem_err;
+			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+							       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+			if (!txdata->tx_desc_ring)
+				goto alloc_mem_err;
+		}
+	}
+
+	/* Rx */
+	if (!skip_rx_queue(bp, index)) {
+		/* fastpath rx rings: rx_buf rx_desc rx_comp */
+		bnx2x_fp(bp, index, rx_buf_ring) =
+			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+		if (!bnx2x_fp(bp, index, rx_buf_ring))
+			goto alloc_mem_err;
+		bnx2x_fp(bp, index, rx_desc_ring) =
+			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+					sizeof(struct eth_rx_bd) * NUM_RX_BD);
+		if (!bnx2x_fp(bp, index, rx_desc_ring))
+			goto alloc_mem_err;
+
+		/* Seed all CQEs by 1s */
+		bnx2x_fp(bp, index, rx_comp_ring) =
+			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+		if (!bnx2x_fp(bp, index, rx_comp_ring))
+			goto alloc_mem_err;
+
+		/* SGE ring */
+		bnx2x_fp(bp, index, rx_page_ring) =
+			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+				GFP_KERNEL);
+		if (!bnx2x_fp(bp, index, rx_page_ring))
+			goto alloc_mem_err;
+		bnx2x_fp(bp, index, rx_sge_ring) =
+			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+		if (!bnx2x_fp(bp, index, rx_sge_ring))
+			goto alloc_mem_err;
+		/* RX BD ring */
+		bnx2x_set_next_page_rx_bd(fp);
+
+		/* CQ ring */
+		bnx2x_set_next_page_rx_cq(fp);
+
+		/* BDs */
+		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+		if (ring_size < rx_ring_size)
+			goto alloc_mem_err;
+	}
+
+	return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+						index, ring_size);
+	/* FW will drop all packets if queue is not big enough,
+	 * In these cases we disable the queue
+	 * Min size is different for OOO, TPA and non-TPA queues
+	 */
+	if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
+				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
+			/* release memory allocated for this queue */
+			bnx2x_free_fp_mem_at(bp, index);
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+{
+	if (!NO_FCOE(bp))
+		/* FCoE */
+		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
+			/* we will fail load process instead of mark
+			 * NO_FCOE_FLAG
+			 */
+			return -ENOMEM;
+
+	return 0;
+}
+
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+	int i;
+
+	/* 1. Allocate FP for leading - fatal if error
+	 * 2. Allocate RSS - fix number of queues if error
+	 */
+
+	/* leading */
+	if (bnx2x_alloc_fp_mem_at(bp, 0))
+		return -ENOMEM;
+
+	/* RSS */
+	for_each_nondefault_eth_queue(bp, i)
+		if (bnx2x_alloc_fp_mem_at(bp, i))
+			break;
+
+	/* handle memory failures */
+	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+		WARN_ON(delta < 0);
+		bnx2x_shrink_eth_fp(bp, delta);
+		if (CNIC_SUPPORT(bp))
+			/* move non eth FPs next to last eth FP
+			 * must be done in that order
+			 * FCOE_IDX < FWD_IDX < OOO_IDX
+			 */
+
+			/* move FCoE fp even NO_FCOE_FLAG is on */
+			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
+		bp->num_ethernet_queues -= delta;
+		bp->num_queues = bp->num_ethernet_queues +
+				 bp->num_cnic_queues;
+		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+			  bp->num_queues + delta, bp->num_queues);
+	}
+
+	return 0;
+}
+
+void bnx2x_free_mem_bp(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->fp_array_size; i++)
+		kfree(bp->fp[i].tpa_info);
+	kfree(bp->fp);
+	kfree(bp->sp_objs);
+	kfree(bp->fp_stats);
+	kfree(bp->bnx2x_txq);
+	kfree(bp->msix_table);
+	kfree(bp->ilt);
+}
+
+int bnx2x_alloc_mem_bp(struct bnx2x *bp)
+{
+	struct bnx2x_fastpath *fp;
+	struct msix_entry *tbl;
+	struct bnx2x_ilt *ilt;
+	int msix_table_size = 0;
+	int fp_array_size, txq_array_size;
+	int i;
+
+	/*
+	 * The biggest MSI-X table we might need is as a maximum number of fast
+	 * path IGU SBs plus default SB (for PF only).
+	 */
+	msix_table_size = bp->igu_sb_cnt;
+	if (IS_PF(bp))
+		msix_table_size++;
+	BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
+
+	/* fp array: RSS plus CNIC related L2 queues */
+	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
+	bp->fp_array_size = fp_array_size;
+	BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
+
+	fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
+	if (!fp)
+		goto alloc_err;
+	for (i = 0; i < bp->fp_array_size; i++) {
+		fp[i].tpa_info =
+			kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+				sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+		if (!(fp[i].tpa_info))
+			goto alloc_err;
+	}
+
+	bp->fp = fp;
+
+	/* allocate sp objs */
+	bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
+			      GFP_KERNEL);
+	if (!bp->sp_objs)
+		goto alloc_err;
+
+	/* allocate fp_stats */
+	bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
+			       GFP_KERNEL);
+	if (!bp->fp_stats)
+		goto alloc_err;
+
+	/* Allocate memory for the transmission queues array */
+	txq_array_size =
+		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
+	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
+
+	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
+				GFP_KERNEL);
+	if (!bp->bnx2x_txq)
+		goto alloc_err;
+
+	/* msix table */
+	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		goto alloc_err;
+	bp->msix_table = tbl;
+
+	/* ilt */
+	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+	if (!ilt)
+		goto alloc_err;
+	bp->ilt = ilt;
+
+	return 0;
+alloc_err:
+	bnx2x_free_mem_bp(bp);
+	return -ENOMEM;
+}
+
+int bnx2x_reload_if_running(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (unlikely(!netif_running(dev)))
+		return 0;
+
+	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+	return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+	u32 sel_phy_idx = 0;
+	if (bp->link_params.num_phys <= 1)
+		return INT_PHY;
+
+	if (bp->link_vars.link_up) {
+		sel_phy_idx = EXT_PHY1;
+		/* In case link is SERDES, check if the EXT_PHY2 is the one */
+		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+			sel_phy_idx = EXT_PHY2;
+	} else {
+
+		switch (bnx2x_phy_selection(&bp->link_params)) {
+		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		       sel_phy_idx = EXT_PHY1;
+		       break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		       sel_phy_idx = EXT_PHY2;
+		       break;
+		}
+	}
+
+	return sel_phy_idx;
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+	/*
+	 * The selected activated PHY is always after swapping (in case PHY
+	 * swapping is enabled). So when swapping is enabled, we need to reverse
+	 * the configuration
+	 */
+
+	if (bp->link_params.multi_phy_config &
+	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+		if (sel_phy_idx == EXT_PHY1)
+			sel_phy_idx = EXT_PHY2;
+		else if (sel_phy_idx == EXT_PHY2)
+			sel_phy_idx = EXT_PHY1;
+	}
+	return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+#ifdef NETDEV_FCOE_WWNN
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	switch (type) {
+	case NETDEV_FCOE_WWNN:
+		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
+				cp->fcoe_wwn_node_name_lo);
+		break;
+	case NETDEV_FCOE_WWPN:
+		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
+				cp->fcoe_wwn_port_name_lo);
+		break;
+	default:
+		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (pci_num_vf(bp->pdev)) {
+		DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
+		return -EPERM;
+	}
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
+		return -EAGAIN;
+	}
+
+	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+		BNX2X_ERR("Can't support requested MTU size\n");
+		return -EINVAL;
+	}
+
+	/* This does not race with packet allocation
+	 * because the actual alloc size is
+	 * only updated as part of load
+	 */
+	dev->mtu = new_mtu;
+
+	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
+	return bnx2x_reload_if_running(dev);
+}
+
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+				     netdev_features_t features)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (pci_num_vf(bp->pdev)) {
+		netdev_features_t changed = dev->features ^ features;
+
+		/* Revert the requested changes in features if they
+		 * would require internal reload of PF in bnx2x_set_features().
+		 */
+		if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
+			features &= ~NETIF_F_RXCSUM;
+			features |= dev->features & NETIF_F_RXCSUM;
+		}
+
+		if (changed & NETIF_F_LOOPBACK) {
+			features &= ~NETIF_F_LOOPBACK;
+			features |= dev->features & NETIF_F_LOOPBACK;
+		}
+	}
+
+	/* TPA requires Rx CSUM offloading */
+	if (!(features & NETIF_F_RXCSUM)) {
+		features &= ~NETIF_F_LRO;
+		features &= ~NETIF_F_GRO;
+	}
+
+	return features;
+}
+
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	netdev_features_t changes = features ^ dev->features;
+	bool bnx2x_reload = false;
+	int rc;
+
+	/* VFs or non SRIOV PFs should be able to change loopback feature */
+	if (!pci_num_vf(bp->pdev)) {
+		if (features & NETIF_F_LOOPBACK) {
+			if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+				bp->link_params.loopback_mode = LOOPBACK_BMAC;
+				bnx2x_reload = true;
+			}
+		} else {
+			if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+				bp->link_params.loopback_mode = LOOPBACK_NONE;
+				bnx2x_reload = true;
+			}
+		}
+	}
+
+	/* if GRO is changed while LRO is enabled, don't force a reload */
+	if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
+		changes &= ~NETIF_F_GRO;
+
+	/* if GRO is changed while HW TPA is off, don't force a reload */
+	if ((changes & NETIF_F_GRO) && bp->disable_tpa)
+		changes &= ~NETIF_F_GRO;
+
+	if (changes)
+		bnx2x_reload = true;
+
+	if (bnx2x_reload) {
+		if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
+			dev->features = features;
+			rc = bnx2x_reload_if_running(dev);
+			return rc ? rc : 1;
+		}
+		/* else: bnx2x_nic_load() will be called at end of recovery */
+	}
+
+	return 0;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (!bp->panic)
+		bnx2x_panic();
+#endif
+
+	/* This allows the netif to be shutdown gracefully before resetting */
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
+}
+
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return -ENODEV;
+	}
+	bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	pci_save_state(pdev);
+
+	if (!netif_running(dev)) {
+		rtnl_unlock();
+		return 0;
+	}
+
+	netif_device_detach(dev);
+
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
+
+	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+int bnx2x_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+	int rc;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return -ENODEV;
+	}
+	bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		BNX2X_ERR("Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	rtnl_lock();
+
+	pci_restore_state(pdev);
+
+	if (!netif_running(dev)) {
+		rtnl_unlock();
+		return 0;
+	}
+
+	bnx2x_set_power_state(bp, PCI_D0);
+	netif_device_attach(dev);
+
+	rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+	rtnl_unlock();
+
+	return rc;
+}
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid)
+{
+	if (!cxt) {
+		BNX2X_ERR("bad context pointer %p\n", cxt);
+		return;
+	}
+
+	/* ustorm cxt validation */
+	cxt->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+	/* xcontext validation */
+	cxt->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+				    u8 fw_sb_id, u8 sb_index,
+				    u8 ticks)
+{
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+	REG_WR8(bp, addr, ticks);
+	DP(NETIF_MSG_IFUP,
+	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
+	   port, fw_sb_id, sb_index, ticks);
+}
+
+static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+				    u16 fw_sb_id, u8 sb_index,
+				    u8 disable)
+{
+	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+	u8 flags = REG_RD8(bp, addr);
+	/* clear and set */
+	flags &= ~HC_INDEX_DATA_HC_ENABLED;
+	flags |= enable_flag;
+	REG_WR8(bp, addr, flags);
+	DP(NETIF_MSG_IFUP,
+	   "port %x fw_sb_id %d sb_index %d disable %d\n",
+	   port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec)
+{
+	int port = BP_PORT(bp);
+	u8 ticks = usec / BNX2X_BTR;
+
+	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+	disable = disable ? 1 : (usec ? 0 : 1);
+	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+			    u32 verbose)
+{
+	smp_mb__before_atomic();
+	set_bit(flag, &bp->sp_rtnl_state);
+	smp_mb__after_atomic();
+	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+	   flag);
+	schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
new file mode 100644
index 0000000..b7d32e8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,1403 @@
+/* bnx2x_cmn.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_CMN_H
+#define BNX2X_CMN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/irq.h>
+
+#include "bnx2x.h"
+#include "bnx2x_sriov.h"
+
+/* This is used as a replacement for an MCP if it's not present */
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
+
+/************************ Macros ********************************/
+#define BNX2X_PCI_FREE(x, y, size) \
+	do { \
+		if (x) { \
+			dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
+			x = NULL; \
+			y = 0; \
+		} \
+	} while (0)
+
+#define BNX2X_FREE(x) \
+	do { \
+		if (x) { \
+			kfree((void *)x); \
+			x = NULL; \
+		} \
+	} while (0)
+
+#define BNX2X_PCI_ALLOC(y, size)					\
+({									\
+	void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+	if (x)								\
+		DP(NETIF_MSG_HW,					\
+		   "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n",	\
+		   (unsigned long long)(*y), x);			\
+	x;								\
+})
+#define BNX2X_PCI_FALLOC(y, size)					\
+({									\
+	void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+	if (x) {							\
+		memset(x, 0xff, size);					\
+		DP(NETIF_MSG_HW,					\
+		   "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",	\
+		   (unsigned long long)(*y), x);			\
+	}								\
+	x;								\
+})
+
+/*********************** Interfaces ****************************
+ *  Functions that need to be implemented by each driver version
+ */
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ * @keep_link:		true iff link should be kept up
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters in a PF.
+ *
+ * @bp:			driver handle
+ * @rss_obj:		RSS object to use
+ * @ind_table:		indirection table to configure
+ * @config_hash:	re-configure RSS hash keys configuration
+ * @enable:		enabled or disabled configuration
+ */
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+	      bool config_hash, bool enable);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp:			driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to the fastpath structure
+ * @leading:	boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp:		driver handle
+ * @command:	request
+ * @param:	request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
+
+/**
+ * bnx2x_initial_phy_init - initialize link parameters structure variables.
+ *
+ * @bp:		driver handle
+ * @load_mode:	current mode
+ */
+int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+
+/**
+ * bnx2x_link_set - configure hw according to link parameters structure.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_link_set(struct bnx2x *bp);
+
+/**
+ * bnx2x_force_link_reset - Forces link reset, and put the PHY
+ * in reset as well.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_force_link_reset(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_test - query link status.
+ *
+ * @bp:		driver handle
+ * @is_serdes:	bool
+ *
+ * Returns 0 if link is UP.
+ */
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
+
+/**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp:		driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp:		driver handle
+ * @igu_sb_id:	SB id
+ * @segment:	SB segment
+ * @index:	SB index
+ * @op:		SB operation
+ * @update:	is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update);
+
+/* Disable transactions from chip to host */
+void bnx2x_pf_disable(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
+
+/**
+ * bnx2x__link_status_update - handles link status change.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x__link_status_update(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_report - report link status to upper layer.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_link_report(struct bnx2x *bp);
+
+/* None-atomic version of bnx2x_link_report() */
+void __bnx2x_link_report(struct bnx2x *bp);
+
+/**
+ * bnx2x_get_mf_speed - calculate MF speed.
+ *
+ * @bp:		driver handle
+ *
+ * Takes into account current linespeed and MF configuration.
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
+ * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
+ *
+ * @irq:		irq number
+ * @dev_instance:	private instance
+ */
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
+
+/**
+ * bnx2x_interrupt - non MSI-X interrupt handler
+ *
+ * @irq:		irq number
+ * @dev_instance:	private instance
+ */
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
+
+/**
+ * bnx2x_cnic_notify - send command to cnic driver
+ *
+ * @bp:		driver handle
+ * @cmd:	command
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+
+/**
+ * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_int_enable - enable HW interrupts.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_int_enable(struct bnx2x *bp);
+
+/**
+ * bnx2x_int_disable_sync - disable interrupts.
+ *
+ * @bp:		driver handle
+ * @disable_hw:	true, disable HW interrupts.
+ *
+ * This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
+ */
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
+
+/**
+ * bnx2x_nic_init_cnic - init driver internals for cnic.
+ *
+ * @bp:		driver handle
+ * @load_code:	COMMON, PORT or FUNCTION
+ *
+ * Initializes:
+ *  - rings
+ *  - status blocks
+ *  - etc.
+ */
+void bnx2x_nic_init_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_preirq_nic_init - init driver internals.
+ *
+ * @bp:		driver handle
+ *
+ * Initializes:
+ *  - fastpath object
+ *  - fastpath rings
+ *  etc.
+ */
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_postirq_nic_init - init driver internals.
+ *
+ * @bp:		driver handle
+ * @load_code:	COMMON, PORT or FUNCTION
+ *
+ * Initializes:
+ *  - status blocks
+ *  - slowpath rings
+ *  - etc.
+ */
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
+/**
+ * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
+/**
+ * bnx2x_alloc_mem - allocate driver's memory.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_alloc_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem_cnic - release driver's memory for cnic.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_free_mem_cnic(struct bnx2x *bp);
+/**
+ * bnx2x_free_mem - release driver's memory.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_free_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_num_queues - set number of queues according to mode.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_set_num_queues(struct bnx2x *bp);
+
+/**
+ * bnx2x_chip_cleanup - cleanup chip internals.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	COMMON, PORT, FUNCTION
+ * @keep_link:		true iff link should be kept up.
+ *
+ * - Cleanup MAC configuration.
+ * - Closes clients.
+ * - etc.
+ */
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
+
+/**
+ * bnx2x_acquire_hw_lock - acquire HW lock.
+ *
+ * @bp:		driver handle
+ * @resource:	resource bit which was locked
+ */
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_hw_lock - release HW lock.
+ *
+ * @bp:		driver handle
+ * @resource:	resource bit which was locked
+ */
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_leader_lock - release recovery leader lock
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_release_leader_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_eth_mac - configure eth MAC address in the HW
+ *
+ * @bp:		driver handle
+ * @set:	set or clear
+ *
+ * Configures according to the value in netdev->dev_addr.
+ */
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
+
+/**
+ * bnx2x_set_rx_mode - set MAC filtering configurations.
+ *
+ * @dev:	netdevice
+ *
+ * called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
+ */
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
+
+/* Parity errors related */
+void bnx2x_set_pf_load(struct bnx2x *bp);
+bool bnx2x_clear_pf_load(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
+void bnx2x_set_reset_in_progress(struct bnx2x *bp);
+void bnx2x_set_reset_global(struct bnx2x *bp);
+void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_sp_event - handle ramrods completion.
+ *
+ * @fp:		fastpath handle for the event
+ * @rr_cqe:	eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
+
+/**
+ * bnx2x_ilt_set_info - prepare ILT configurations.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_ilt_set_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
+ * and TM.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_dcbx_init - initialize dcbx protocol.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
+
+/**
+ * bnx2x_set_power_state - set power state to the requested value.
+ *
+ * @bp:		driver handle
+ * @state:	required state D0 or D3hot
+ *
+ * Currently only D0 and D3hot are supported.
+ */
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/**
+ * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
+ *
+ * @bp:		driver handle
+ * @value:	new value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
+
+/* dev_close main block */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
+
+/* dev_open main block */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+/* hard_xmit callback */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* setup_tc callback */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+			struct ifla_vf_info *ivi);
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+
+/* select_queue callback */
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+		       void *accel_priv, select_queue_fallback_t fallback);
+
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+					struct bnx2x_fastpath *fp,
+					u16 bd_prod, u16 rx_comp_prod,
+					u16 rx_sge_prod)
+{
+	struct ustorm_eth_rx_producers rx_prods = {0};
+	u32 i;
+
+	/* Update producers */
+	rx_prods.bd_prod = bd_prod;
+	rx_prods.cqe_prod = rx_comp_prod;
+	rx_prods.sge_prod = rx_sge_prod;
+
+	/* Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes BDs must have buffers.
+	 */
+	wmb();
+
+	for (i = 0; i < sizeof(rx_prods)/4; i++)
+		REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
+		       ((u32 *)&rx_prods)[i]);
+
+	mmiowb(); /* keep prod updates ordered */
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
+	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
+/* reload helper */
+int bnx2x_reload_if_running(struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+
+/* NAPI poll Tx part */
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
+
+/* suspend/resume callbacks */
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
+int bnx2x_resume(struct pci_dev *pdev);
+
+/* Release IRQ vectors */
+void bnx2x_free_irq(struct bnx2x *bp);
+
+void bnx2x_free_fp_mem(struct bnx2x *bp);
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
+void bnx2x_free_skbs(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_netif_start(struct bnx2x *bp);
+int bnx2x_load_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msix - set msix configuration.
+ *
+ * @bp:		driver handle
+ *
+ * fills msix_table, requests vectors, updates num_queues
+ * according to number of available vectors.
+ */
+int bnx2x_enable_msix(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_enable_msi(struct bnx2x *bp);
+
+/**
+ * bnx2x_low_latency_recv - LL callback
+ *
+ * @napi:	napi structure
+ */
+int bnx2x_low_latency_recv(struct napi_struct *napi);
+
+/**
+ * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_alloc_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem_bp - release memories outsize main driver structure
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_free_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_change_mtu - change mtu netdev callback
+ *
+ * @dev:	net device
+ * @new_mtu:	requested mtu
+ *
+ */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+
+#ifdef NETDEV_FCOE_WWNN
+/**
+ * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
+ *
+ * @dev:	net_device
+ * @wwn:	output buffer
+ * @type:	WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
+ *
+ */
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
+#endif
+
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+				     netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
+
+/**
+ * bnx2x_tx_timeout - tx timeout netdev callback
+ *
+ * @dev:	net device
+ */
+void bnx2x_tx_timeout(struct net_device *dev);
+
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp:			driver handle
+ * @c2s_map:		should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default:	entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+	barrier(); /* status block is written to by the chip */
+	fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
+}
+
+static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
+					u8 segment, u16 index, u8 op,
+					u8 update, u32 igu_addr)
+{
+	struct igu_regular cmd_data = {0};
+
+	cmd_data.sb_id_and_flags =
+			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
+			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+	DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
+	   cmd_data.sb_id_and_flags, igu_addr);
+	REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
+
+	/* Make sure that ACK is written */
+	mmiowb();
+	barrier();
+}
+
+static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
+				   u8 storm, u16 index, u8 op, u8 update)
+{
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_INT_ACK);
+	struct igu_ack_register igu_ack;
+
+	igu_ack.status_block_index = index;
+	igu_ack.sb_id_and_flags =
+			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
+
+	/* Make sure that ACK is written */
+	mmiowb();
+	barrier();
+}
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
+				u16 index, u8 op, u8 update)
+{
+	if (bp->common.int_block == INT_BLOCK_HC)
+		bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
+	else {
+		u8 segment;
+
+		if (CHIP_INT_MODE_IS_BC(bp))
+			segment = storm;
+		else if (igu_sb_id != bp->igu_dsb_id)
+			segment = IGU_SEG_ACCESS_DEF;
+		else if (storm == ATTENTION_ID)
+			segment = IGU_SEG_ACCESS_ATTN;
+		else
+			segment = IGU_SEG_ACCESS_DEF;
+		bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
+	}
+}
+
+static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
+{
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_SIMD_MASK);
+	u32 result = REG_RD(bp, hc_addr);
+
+	barrier();
+	return result;
+}
+
+static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
+{
+	u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
+	u32 result = REG_RD(bp, igu_addr);
+
+	DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
+	   result, igu_addr);
+
+	barrier();
+	return result;
+}
+
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+	barrier();
+	if (bp->common.int_block == INT_BLOCK_HC)
+		return bnx2x_hc_ack_int(bp);
+	else
+		return bnx2x_igu_ack_int(bp);
+}
+
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
+{
+	/* Tell compiler that consumer and producer can change */
+	barrier();
+	return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
+}
+
+static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
+				 struct bnx2x_fp_txdata *txdata)
+{
+	s16 used;
+	u16 prod;
+	u16 cons;
+
+	prod = txdata->tx_bd_prod;
+	cons = txdata->tx_bd_cons;
+
+	used = SUB_S16(prod, cons);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	WARN_ON(used < 0);
+	WARN_ON(used > txdata->tx_ring_size);
+	WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL);
+#endif
+
+	return (s16)(txdata->tx_ring_size) - used;
+}
+
+static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
+{
+	u16 hw_cons;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+	return hw_cons != txdata->tx_pkt_cons;
+}
+
+static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+	u8 cos;
+	for_each_cos_in_tx_queue(fp, cos)
+		if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+			return true;
+	return false;
+}
+
+#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
+#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+	u16 cons;
+	union eth_rx_cqe *cqe;
+	struct eth_fast_path_rx_cqe *cqe_fp;
+
+	cons = RCQ_BD(fp->rx_comp_cons);
+	cqe = &fp->rx_comp_ring[cons];
+	cqe_fp = &cqe->fast_path_cqe;
+	return BNX2X_IS_CQE_COMPLETED(cqe_fp);
+}
+
+/**
+ * bnx2x_tx_disable - disables tx from stack point of view
+ *
+ * @bp:		driver handle
+ */
+static inline void bnx2x_tx_disable(struct bnx2x *bp)
+{
+	netif_tx_disable(bp->dev);
+	netif_carrier_off(bp->dev);
+}
+
+static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct page *page = sw_buf->page;
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+
+	/* Skip "next page" elements */
+	if (!page)
+		return;
+
+	/* Since many fragments can share the same page, make sure to
+	 * only unmap and free the page once.
+	 */
+	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+		       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+	put_page(page);
+
+	sw_buf->page = NULL;
+	sge->addr_hi = 0;
+	sge->addr_lo = 0;
+}
+
+static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue_cnic(bp, i) {
+		napi_hash_del(&bnx2x_fp(bp, i, napi));
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static inline void bnx2x_del_all_napi(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_eth_queue(bp, i) {
+		napi_hash_del(&bnx2x_fp(bp, i, napi));
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+int bnx2x_set_int_mode(struct bnx2x *bp);
+
+static inline void bnx2x_disable_msi(struct bnx2x *bp)
+{
+	if (bp->flags & USING_MSIX_FLAG) {
+		pci_disable_msix(bp->pdev);
+		bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
+	} else if (bp->flags & USING_MSI_FLAG) {
+		pci_disable_msi(bp->pdev);
+		bp->flags &= ~USING_MSI_FLAG;
+	}
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+	int i, j;
+
+	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+		int idx = RX_SGE_CNT * i - 1;
+
+		for (j = 0; j < 2; j++) {
+			BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
+			idx--;
+		}
+	}
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+	memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
+
+	/* Clear the two last indices in the page to 1:
+	   these are the indices that correspond to the "next" element,
+	   hence will never be indicated and should be removed from
+	   the calculations. */
+	bnx2x_clear_sge_mask_next_elems(fp);
+}
+
+/* note that we are not allocating a new buffer,
+ * we are just moving one from cons to prod
+ * we are not creating a new mapping,
+ * so there is no need to check for dma_mapping_error().
+ */
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
+				      u16 cons, u16 prod)
+{
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
+	prod_rx_buf->data = cons_rx_buf->data;
+	*prod_bd = *cons_bd;
+}
+
+/************************* Init ******************************************/
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+	return 2 * vn + BP_PORT(bp);
+}
+
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
+{
+	return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
+}
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp:		driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_start_params *start_params =
+		&func_params.params.start;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_START;
+
+	/* Function parameters */
+	start_params->mf_mode = bp->mf_mode;
+	start_params->sd_vlan_tag = bp->mf_ov;
+
+	/* Configure Ethertype for BD mode */
+	if (IS_MF_BD(bp)) {
+		DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+		start_params->sd_vlan_eth_type = ETH_P_8021AD;
+		REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+		REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+		REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+		bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+				      &start_params->c2s_pri_default);
+		start_params->c2s_pri_valid = 1;
+
+		DP(NETIF_MSG_IFUP,
+		   "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+		   start_params->c2s_pri[0], start_params->c2s_pri[1],
+		   start_params->c2s_pri[2], start_params->c2s_pri[3],
+		   start_params->c2s_pri[4], start_params->c2s_pri[5],
+		   start_params->c2s_pri[6], start_params->c2s_pri[7],
+		   start_params->c2s_pri_default);
+	}
+
+	if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
+		start_params->network_cos_mode = STATIC_COS;
+	else /* CHIP_IS_E1X */
+		start_params->network_cos_mode = FW_WRR;
+
+	start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
+	start_params->inner_rss = 1;
+
+	if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+		start_params->class_fail_ethtype = ETH_P_FIP;
+		start_params->class_fail = 1;
+		start_params->no_added_tags = 1;
+	}
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi:	pointer to upper part
+ * @fw_mid:	pointer to middle part
+ * @fw_lo:	pointer to lower part
+ * @mac:	pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+					 __le16 *fw_lo, u8 *mac)
+{
+	((u8 *)fw_hi)[0]  = mac[1];
+	((u8 *)fw_hi)[1]  = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lo)[0]  = mac[5];
+	((u8 *)fw_lo)[1]  = mac[4];
+}
+
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+					  struct bnx2x_alloc_pool *pool)
+{
+	if (!pool->page)
+		return;
+
+	put_page(pool->page);
+
+	pool->page = NULL;
+}
+
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+					   struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	if (fp->mode == TPA_MODE_DISABLED)
+		return;
+
+	for (i = 0; i < last; i++)
+		bnx2x_free_rx_sge(bp, fp, i);
+
+	bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
+}
+
+static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RX_RINGS; i++) {
+		struct eth_rx_bd *rx_bd;
+
+		rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+		rx_bd->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+		rx_bd->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+	}
+}
+
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+	if (!CHIP_IS_E1x(bp)) {
+		/* there are special statistics counters for FCoE 136..140 */
+		if (IS_FCOE_FP(fp))
+			return bp->cnic_base_cl_id + (bp->pf_num >> 1);
+		return fp->cl_id;
+	}
+	return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+					       bnx2x_obj_type obj_type)
+{
+	struct bnx2x *bp = fp->bp;
+
+	/* Configure classification DBs */
+	bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+			   fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+			   bnx2x_sp_mapping(bp, mac_rdata),
+			   BNX2X_FILTER_MAC_PENDING,
+			   &bp->sp_state, obj_type,
+			   &bp->macs_pool);
+
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+				    fp->cl_id, fp->cid, BP_FUNC(bp),
+				    bnx2x_sp(bp, vlan_rdata),
+				    bnx2x_sp_mapping(bp, vlan_rdata),
+				    BNX2X_FILTER_VLAN_PENDING,
+				    &bp->sp_state, obj_type,
+				    &bp->vlans_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp:		driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+	u8 func_num = 0, i;
+
+	/* 57710 has only one function per-port */
+	if (CHIP_IS_E1(bp))
+		return 1;
+
+	/* Calculate a number of functions enabled on the current
+	 * PATH/PORT.
+	 */
+	if (CHIP_REV_IS_SLOW(bp)) {
+		if (IS_MF(bp))
+			func_num = 4;
+		else
+			func_num = 2;
+	} else {
+		for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+			u32 func_config =
+				MF_CFG_RD(bp,
+					  func_mf_config[BP_PORT(bp) + 2 * i].
+					  config);
+			func_num +=
+				((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+		}
+	}
+
+	WARN_ON(!func_num);
+
+	return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+	/* RX_MODE controlling object */
+	bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+	/* multicast configuration controlling object */
+	bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+			     BP_FUNC(bp), BP_FUNC(bp),
+			     bnx2x_sp(bp, mcast_rdata),
+			     bnx2x_sp_mapping(bp, mcast_rdata),
+			     BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+			     BNX2X_OBJ_TYPE_RX);
+
+	/* Setup CAM credit pools */
+	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+				   bnx2x_get_path_func_num(bp));
+
+	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
+				    bnx2x_get_path_func_num(bp));
+
+	/* RSS configuration object */
+	bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+				  bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+				  bnx2x_sp(bp, rss_rdata),
+				  bnx2x_sp_mapping(bp, rss_rdata),
+				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+				  BNX2X_OBJ_TYPE_RX);
+
+	bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+	else
+		return fp->cl_id;
+}
+
+static inline void bnx2x_init_txdata(struct bnx2x *bp,
+				     struct bnx2x_fp_txdata *txdata, u32 cid,
+				     int txq_index, __le16 *tx_cons_sb,
+				     struct bnx2x_fastpath *fp)
+{
+	txdata->cid = cid;
+	txdata->txq_index = txq_index;
+	txdata->tx_cons_sb = tx_cons_sb;
+	txdata->parent_fp = fp;
+	txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size;
+
+	DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
+	   txdata->cid, txdata->txq_index);
+}
+
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+	return bp->cnic_base_cl_id + cl_idx +
+		(bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+	/* the 'first' id is allocated for the cnic */
+	return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+	return bp->igu_base_sb;
+}
+
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+				       struct bnx2x_fp_txdata *txdata)
+{
+	int cnt = 1000;
+
+	while (bnx2x_has_tx_work_unload(txdata)) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+				  txdata->txq_index, txdata->tx_pkt_prod,
+				  txdata->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+			bnx2x_panic();
+			return -EBUSY;
+#else
+			break;
+#endif
+		}
+		cnt--;
+		usleep_range(1000, 2000);
+	}
+
+	return 0;
+}
+
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+
+static inline void __storm_memset_struct(struct bnx2x *bp,
+					 u32 addr, size_t size, u32 *data)
+{
+	int i;
+	for (i = 0; i < size/4; i++)
+		REG_WR(bp, addr + (i * 4), data[i]);
+}
+
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp:		driver handle
+ * @mask:	bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+	int tout = 5000; /* Wait for 5 secs tops */
+
+	while (tout--) {
+		smp_mb();
+		netif_addr_lock_bh(bp->dev);
+		if (!(bp->sp_state & mask)) {
+			netif_addr_unlock_bh(bp->dev);
+			return true;
+		}
+		netif_addr_unlock_bh(bp->dev);
+
+		usleep_range(1000, 2000);
+	}
+
+	smp_mb();
+
+	netif_addr_lock_bh(bp->dev);
+	if (bp->sp_state & mask) {
+		BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
+			  bp->sp_state, mask);
+		netif_addr_unlock_bh(bp->dev);
+		return false;
+	}
+	netif_addr_unlock_bh(bp->dev);
+
+	return true;
+}
+
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp:		driver handle
+ * @cxt:	context of the connection on the host memory
+ * @cid:	SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec);
+void bnx2x_acquire_phy_lock(struct bnx2x *bp);
+void bnx2x_release_phy_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
+ *
+ * @bp:		driver handle
+ * @mf_cfg:	MF configuration
+ *
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+	u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+			      FUNC_MF_CFG_MAX_BW_SHIFT;
+	if (!max_cfg) {
+		DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
+		   "Max BW configured to 0 - using 100 instead\n");
+		max_cfg = 100;
+	}
+	return max_cfg;
+}
+
+/* checks if HW supports GRO for given MTU */
+static inline bool bnx2x_mtu_allows_gro(int mtu)
+{
+	/* gro frags per page */
+	int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
+
+	/*
+	 * 1. Number of frags should not grow above MAX_SKB_FRAGS
+	 * 2. Frag must fit the page
+	 */
+	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
+}
+
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp:		driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp:		driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+	int func;
+	int vn;
+
+	/* Set the attention towards other drivers on the same port */
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		if (vn == BP_VN(bp))
+			continue;
+
+		func = func_by_vn(bp, vn);
+		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+	}
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp:		driver handle
+ * @flags:	flags to update
+ * @set:	set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+	if (SHMEM2_HAS(bp, drv_flags)) {
+		u32 drv_flags;
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
+		drv_flags = SHMEM2_RD(bp, drv_flags);
+
+		if (set)
+			SET_FLAGS(drv_flags, flags);
+		else
+			RESET_FLAGS(drv_flags, flags);
+
+		SHMEM2_WR(bp, drv_flags, drv_flags);
+		DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
+	}
+}
+
+
+
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string
+ *
+ * @bp:        driver handle
+ * @buf:       character buffer to fill with the fw name
+ * @buf_len:   length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+			    u32 verbose);
+
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp:		driver handle
+ * @state:	OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp:		driver handle
+ * @offset:	byte offset in nvram
+ * @ret_buf:	pointer to buffer where data is to be stored
+ * @buf_size:   Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+		     int buf_size);
+
+#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
new file mode 100644
index 0000000..7ccf668
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2556 @@
+/* bnx2x_dcb.c: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+/* forward declarations of dcbx related functions */
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+					  u32 *set_configuration_ets_pg,
+					  u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+					    u32 *pg_pri_orginal_spread,
+					    struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+				struct cos_help_data *cos_data,
+				u32 *pg_pri_orginal_spread,
+				struct dcbx_ets_feature *ets);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+				 struct bnx2x_func_tx_start_params*);
+
+/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
+static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
+				   u32 addr, u32 len)
+{
+	int i;
+	for (i = 0; i < len; i += 4, buff++)
+		*buff = REG_RD(bp, addr + i);
+}
+
+static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
+				    u32 addr, u32 len)
+{
+	int i;
+	for (i = 0; i < len; i += 4, buff++)
+		REG_WR(bp, addr + i, *buff);
+}
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+	struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+	u32 pri_bit, val = 0;
+	int i;
+
+	pfc_params.num_of_rx_cos_priority_mask =
+					bp->dcbx_port_params.ets.num_of_cos;
+
+	/* Tx COS configuration */
+	for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+		/*
+		 * We configure only the pauseable bits (non pauseable aren't
+		 * configured at all) it's done to avoid false pauses from
+		 * network
+		 */
+		pfc_params.rx_cos_priority_mask[i] =
+			bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+				& DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+	/*
+	 * Rx COS configuration
+	 * Changing PFC RX configuration .
+	 * In RX COS0 will always be configured to lossless and COS1 to lossy
+	 */
+	for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+		pri_bit = 1 << i;
+
+		if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
+			val |= 1 << (i * 4);
+	}
+
+	pfc_params.pkt_priority_to_cos = val;
+
+	/* RX COS0 */
+	pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+	/* RX COS1 */
+	pfc_params.llfc_high_priority_classes = 0;
+
+	bnx2x_acquire_phy_lock(bp);
+	bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+	bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+	bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+	struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+	nig_params.pause_enable = 1;
+	bnx2x_acquire_phy_lock(bp);
+	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+	bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+	bnx2x_release_phy_lock(bp);
+}
+
+static void  bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+				       struct dcbx_features *features,
+				       u32 error)
+{
+	u8 i = 0;
+	DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+	/* PG */
+	DP(NETIF_MSG_LINK,
+	   "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+	for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+		DP(NETIF_MSG_LINK,
+		   "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+		   DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+	for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+		DP(NETIF_MSG_LINK,
+		   "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+		   DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+	/* pfc */
+	DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n",
+					features->pfc.pri_en_bitmap);
+	DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n",
+					features->pfc.pfc_caps);
+	DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n",
+					features->pfc.enabled);
+
+	DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n",
+					features->app.default_pri);
+	DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n",
+					features->app.tc_supported);
+	DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n",
+					features->app.enabled);
+	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+		DP(BNX2X_MSG_DCB,
+		   "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+		   i, features->app.app_pri_tbl[i].app_id);
+		DP(BNX2X_MSG_DCB,
+		   "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+		   i, features->app.app_pri_tbl[i].pri_bitmap);
+		DP(BNX2X_MSG_DCB,
+		   "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+		   i, features->app.app_pri_tbl[i].appBitfield);
+	}
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+				       u8 pri_bitmap,
+				       u8 llfc_traf_type)
+{
+	u32 pri = MAX_PFC_PRIORITIES;
+	u32 index = MAX_PFC_PRIORITIES - 1;
+	u32 pri_mask;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+	/* Choose the highest priority */
+	while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+		pri_mask = 1 << index;
+		if (GET_FLAGS(pri_bitmap, pri_mask))
+			pri = index ;
+		index--;
+	}
+
+	if (pri < MAX_PFC_PRIORITIES)
+		ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+				   struct dcbx_app_priority_feature *app,
+				   u32 error) {
+	u8 index;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+	if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n");
+
+	if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n");
+
+	if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND))
+		DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n");
+	if (app->enabled &&
+	    !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH |
+			      DCBX_REMOTE_APP_TLV_NOT_FOUND)) {
+
+		bp->dcbx_port_params.app.enabled = true;
+
+		for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+			ttp[index] = 0;
+
+		if (app->default_pri < MAX_PFC_PRIORITIES)
+			ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
+
+		for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+			struct dcbx_app_priority_entry *entry =
+							app->app_pri_tbl;
+
+			if (GET_FLAGS(entry[index].appBitfield,
+				     DCBX_APP_SF_ETH_TYPE) &&
+			   ETH_TYPE_FCOE == entry[index].app_id)
+				bnx2x_dcbx_get_ap_priority(bp,
+						entry[index].pri_bitmap,
+						LLFC_TRAFFIC_TYPE_FCOE);
+
+			if (GET_FLAGS(entry[index].appBitfield,
+				     DCBX_APP_SF_PORT) &&
+			   TCP_PORT_ISCSI == entry[index].app_id)
+				bnx2x_dcbx_get_ap_priority(bp,
+						entry[index].pri_bitmap,
+						LLFC_TRAFFIC_TYPE_ISCSI);
+		}
+	} else {
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n");
+		bp->dcbx_port_params.app.enabled = false;
+		for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+			ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+	}
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+				       struct dcbx_ets_feature *ets,
+				       u32 error) {
+	int i = 0;
+	u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+	struct pg_help_data pg_help_data;
+	struct bnx2x_dcbx_cos_params *cos_params =
+			bp->dcbx_port_params.ets.cos_params;
+
+	memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+	if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
+
+	if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND))
+		DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n");
+
+	/* Clean up old settings of ets on COS */
+	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
+		cos_params[i].pauseable = false;
+		cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
+		cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+		cos_params[i].pri_bitmask = 0;
+	}
+
+	if (bp->dcbx_port_params.app.enabled && ets->enabled &&
+	   !GET_FLAGS(error,
+		      DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) {
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n");
+		bp->dcbx_port_params.ets.enabled = true;
+
+		bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+					      pg_pri_orginal_spread,
+					      ets->pri_pg_tbl);
+
+		bnx2x_dcbx_get_num_pg_traf_type(bp,
+						pg_pri_orginal_spread,
+						&pg_help_data);
+
+		bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+					   ets, pg_pri_orginal_spread);
+
+	} else {
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n");
+		bp->dcbx_port_params.ets.enabled = false;
+		ets->pri_pg_tbl[0] = 0;
+
+		for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+			DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+	}
+}
+
+static void  bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+					struct dcbx_pfc_feature *pfc, u32 error)
+{
+	if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
+
+	if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND))
+		DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n");
+	if (bp->dcbx_port_params.app.enabled && pfc->enabled &&
+	   !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH |
+			     DCBX_REMOTE_PFC_TLV_NOT_FOUND)) {
+		bp->dcbx_port_params.pfc.enabled = true;
+		bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+			~(pfc->pri_en_bitmap);
+	} else {
+		DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n");
+		bp->dcbx_port_params.pfc.enabled = false;
+		bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+	}
+}
+
+/* maps unmapped priorities to to the same COS as L2 */
+static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
+{
+	int i;
+	u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
+	struct bnx2x_dcbx_cos_params *cos_params =
+			bp->dcbx_port_params.ets.cos_params;
+
+	/* get unmapped priorities by clearing mapped bits */
+	for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+		unmapped &= ~(1 << ttp[i]);
+
+	/* find cos for nw prio and extend it with unmapped */
+	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
+		if (cos_params[i].pri_bitmask & nw_prio) {
+			/* extend the bitmask with unmapped */
+			DP(BNX2X_MSG_DCB,
+			   "cos %d extended with 0x%08x\n", i, unmapped);
+			cos_params[i].pri_bitmask |= unmapped;
+			break;
+		}
+	}
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+				     struct dcbx_features *features,
+				     u32 error)
+{
+	bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+	bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+	bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+
+	bnx2x_dcbx_map_nw(bp);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ		(100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+			       u32 *base_mib_addr,
+			       u32 offset,
+			       int read_mib_type)
+{
+	int max_try_read = 0;
+	u32 mib_size, prefix_seq_num, suffix_seq_num;
+	struct lldp_remote_mib *remote_mib ;
+	struct lldp_local_mib  *local_mib;
+
+	switch (read_mib_type) {
+	case DCBX_READ_LOCAL_MIB:
+		mib_size = sizeof(struct lldp_local_mib);
+		break;
+	case DCBX_READ_REMOTE_MIB:
+		mib_size = sizeof(struct lldp_remote_mib);
+		break;
+	default:
+		return 1; /*error*/
+	}
+
+	offset += BP_PORT(bp) * mib_size;
+
+	do {
+		bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
+
+		max_try_read++;
+
+		switch (read_mib_type) {
+		case DCBX_READ_LOCAL_MIB:
+			local_mib = (struct lldp_local_mib *) base_mib_addr;
+			prefix_seq_num = local_mib->prefix_seq_num;
+			suffix_seq_num = local_mib->suffix_seq_num;
+			break;
+		case DCBX_READ_REMOTE_MIB:
+			remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+			prefix_seq_num = remote_mib->prefix_seq_num;
+			suffix_seq_num = remote_mib->suffix_seq_num;
+			break;
+		default:
+			return 1; /*error*/
+		}
+	} while ((prefix_seq_num != suffix_seq_num) &&
+	       (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+	if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+		BNX2X_ERR("MIB could not be read\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+	int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+			     GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+				       1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
+	if (bp->dcbx_port_params.pfc.enabled &&
+	    (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
+		/*
+		 * 1. Fills up common PFC structures if required
+		 * 2. Configure NIG, MAC and BRB via the elink
+		 */
+		bnx2x_pfc_set(bp);
+	else
+		bnx2x_pfc_clear(bp);
+}
+
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	int rc;
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_TX_STOP;
+
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
+
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc) {
+		BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+		bnx2x_panic();
+	}
+
+	return rc;
+}
+
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_tx_start_params *tx_params =
+		&func_params.params.tx_start;
+	int rc;
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_TX_START;
+
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	bnx2x_dcbx_fw_struct(bp, tx_params);
+
+	DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
+
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc) {
+		BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+		bnx2x_panic();
+	}
+
+	return rc;
+}
+
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+	int rc = 0;
+
+	if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+		BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
+		return;
+	}
+
+	/* valid COS entries */
+	if (ets->num_of_cos == 1)   /* no ETS */
+		return;
+
+	/* sanity */
+	if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
+	     (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+	    ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
+	     (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+		BNX2X_ERR("all COS should have at least bw_limit or strict"
+			    "ets->cos_params[0].strict= %x"
+			    "ets->cos_params[0].bw_tbl= %x"
+			    "ets->cos_params[1].strict= %x"
+			    "ets->cos_params[1].bw_tbl= %x",
+			  ets->cos_params[0].strict,
+			  ets->cos_params[0].bw_tbl,
+			  ets->cos_params[1].strict,
+			  ets->cos_params[1].bw_tbl);
+		return;
+	}
+	/* If we join a group and there is bw_tbl and strict then bw rules */
+	if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+	    (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+		u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+		u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+		/* Do not allow 0-100 configuration
+		 * since PBF does not support it
+		 * force 1-99 instead
+		 */
+		if (bw_tbl_0 == 0) {
+			bw_tbl_0 = 1;
+			bw_tbl_1 = 99;
+		} else if (bw_tbl_1 == 0) {
+			bw_tbl_1 = 1;
+			bw_tbl_0 = 99;
+		}
+
+		bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+	} else {
+		if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+			rc = bnx2x_ets_strict(&bp->link_params, 0);
+		else if (ets->cos_params[1].strict
+					== BNX2X_DCBX_STRICT_COS_HIGHEST)
+			rc = bnx2x_ets_strict(&bp->link_params, 1);
+		if (rc)
+			BNX2X_ERR("update_ets_params failed\n");
+	}
+}
+
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+	struct bnx2x_ets_params ets_params = { 0 };
+	u8 i;
+
+	ets_params.num_of_cos = ets->num_of_cos;
+
+	for (i = 0; i < ets->num_of_cos; i++) {
+		/* COS is SP */
+		if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+			if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+				BNX2X_ERR("COS can't be not BW and not SP\n");
+				return;
+			}
+
+			ets_params.cos[i].state = bnx2x_cos_state_strict;
+			ets_params.cos[i].params.sp_params.pri =
+						ets->cos_params[i].strict;
+		} else { /* COS is BW */
+			if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+				BNX2X_ERR("COS can't be not BW and not SP\n");
+				return;
+			}
+			ets_params.cos[i].state = bnx2x_cos_state_bw;
+			ets_params.cos[i].params.bw_params.bw =
+						(u8)ets->cos_params[i].bw_tbl;
+		}
+	}
+
+	/* Configure the ETS in HW */
+	if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+				  &ets_params)) {
+		BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+		bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+	}
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+	int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+			     GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+				       1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
+	bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+	if (!bp->dcbx_port_params.ets.enabled ||
+	    ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured))
+		return;
+
+	if (CHIP_IS_E3B0(bp))
+		bnx2x_dcbx_update_ets_config(bp);
+	else
+		bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
+#ifdef BCM_DCBNL
+static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
+{
+	struct lldp_remote_mib remote_mib = {0};
+	u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
+	int rc;
+
+	DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n",
+	   dcbx_remote_mib_offset);
+
+	if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
+		BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
+		return -EINVAL;
+	}
+
+	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
+				 DCBX_READ_REMOTE_MIB);
+
+	if (rc) {
+		BNX2X_ERR("Failed to read remote mib from FW\n");
+		return rc;
+	}
+
+	/* save features and flags */
+	bp->dcbx_remote_feat = remote_mib.features;
+	bp->dcbx_remote_flags = remote_mib.flags;
+	return 0;
+}
+#endif
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+	struct lldp_local_mib local_mib = {0};
+	u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+	int rc;
+
+	DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+	if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+		BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+		return -EINVAL;
+	}
+
+	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+				 DCBX_READ_LOCAL_MIB);
+
+	if (rc) {
+		BNX2X_ERR("Failed to read local mib from FW\n");
+		return rc;
+	}
+
+	/* save features and error */
+	bp->dcbx_local_feat = local_mib.features;
+	bp->dcbx_error = local_mib.error;
+	return 0;
+}
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+	u8 pri;
+
+	/* Choose the highest priority */
+	for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+		if (ent->pri_bitmap & (1 << pri))
+			break;
+	return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+	return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+		DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+		DCB_APP_IDTYPE_ETHTYPE;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+	int i, err = 0;
+
+	for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+		struct dcbx_app_priority_entry *ent =
+			&bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+		if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+			u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+			/* avoid invalid user-priority */
+			if (up) {
+				struct dcb_app app;
+				app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+				app.protocol = ent->app_id;
+				app.priority = delall ? 0 : up;
+				err = dcb_setapp(bp->dev, &app);
+			}
+		}
+	}
+	return err;
+}
+#endif
+
+static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
+{
+	u8 prio, cos;
+	for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
+		for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+			if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
+			    & (1 << prio)) {
+				bp->prio_to_cos[prio] = cos;
+				DP(BNX2X_MSG_DCB,
+				   "tx_mapping %d --> %d\n", prio, cos);
+			}
+		}
+	}
+
+	/* setup tc must be called under rtnl lock, but we can't take it here
+	 * as we are handling an attention on a work queue which must be
+	 * flushed at some rtnl-locked contexts (e.g. if down)
+	 */
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
+}
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+	switch (state) {
+	case BNX2X_DCBX_STATE_NEG_RECEIVED:
+		{
+			DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+#ifdef BCM_DCBNL
+			/**
+			 * Delete app tlvs from dcbnl before reading new
+			 * negotiation results
+			 */
+			bnx2x_dcbnl_update_applist(bp, true);
+
+			/* Read remote mib if dcbx is in the FW */
+			if (bnx2x_dcbx_read_shmem_remote_mib(bp))
+				return;
+#endif
+			/* Read neg results if dcbx is in the FW */
+			if (bnx2x_dcbx_read_shmem_neg_results(bp))
+				return;
+
+			bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+						  bp->dcbx_error);
+
+			bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+						 bp->dcbx_error);
+
+			/* mark DCBX result for PMF migration */
+			bnx2x_update_drv_flags(bp,
+					       1 << DRV_FLAGS_DCB_CONFIGURED,
+					       1);
+#ifdef BCM_DCBNL
+			/*
+			 * Add new app tlvs to dcbnl
+			 */
+			bnx2x_dcbnl_update_applist(bp, false);
+#endif
+			/*
+			 * reconfigure the netdevice with the results of the new
+			 * dcbx negotiation.
+			 */
+			bnx2x_dcbx_update_tc_mapping(bp);
+
+			/*
+			 * allow other functions to update their netdevices
+			 * accordingly
+			 */
+			if (IS_MF(bp))
+				bnx2x_link_sync_notify(bp);
+
+			bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
+			return;
+		}
+	case BNX2X_DCBX_STATE_TX_PAUSED:
+		DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+		bnx2x_pfc_set_pfc(bp);
+
+		bnx2x_dcbx_update_ets_params(bp);
+
+		/* ets may affect cmng configuration: reinit it in hw */
+		bnx2x_set_local_cmng(bp);
+		return;
+	case BNX2X_DCBX_STATE_TX_RELEASED:
+		DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+#ifdef BCM_DCBNL
+		/*
+		 * Send a notification for the new negotiated parameters
+		 */
+		dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+		return;
+	default:
+		BNX2X_ERR("Unknown DCBX_STATE\n");
+	}
+}
+
+#define LLDP_ADMIN_MIB_OFFSET(bp)	(PORT_MAX*sizeof(struct lldp_params) + \
+				      BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+				u32 dcbx_lldp_params_offset)
+{
+	struct lldp_admin_mib admin_mib;
+	u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+	u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+	/*shortcuts*/
+	struct dcbx_features *af = &admin_mib.features;
+	struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+	memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+
+	/* Read the data first */
+	bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
+			sizeof(struct lldp_admin_mib));
+
+	if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
+		SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+	else
+		RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+
+	if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
+
+		RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+		admin_mib.ver_cfg_flags |=
+			(dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+			 DCBX_CEE_VERSION_MASK;
+
+		af->ets.enabled = (u8)dp->admin_ets_enable;
+
+		af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+		/* FOR IEEE dp->admin_tc_supported_tx_enable */
+		if (dp->admin_ets_configuration_tx_enable)
+			SET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_ETS_CONFIG_TX_ENABLED);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags,
+				    DCBX_ETS_CONFIG_TX_ENABLED);
+		/* For IEEE admin_ets_recommendation_tx_enable */
+		if (dp->admin_pfc_tx_enable)
+			SET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_PFC_CONFIG_TX_ENABLED);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_PFC_CONFIG_TX_ENABLED);
+
+		if (dp->admin_application_priority_tx_enable)
+			SET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_APP_CONFIG_TX_ENABLED);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_APP_CONFIG_TX_ENABLED);
+
+		if (dp->admin_ets_willing)
+			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+		/* For IEEE admin_ets_reco_valid */
+		if (dp->admin_pfc_willing)
+			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+		if (dp->admin_app_priority_willing)
+			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+		for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+			DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+				(u8)dp->admin_configuration_bw_precentage[i]);
+
+			DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n",
+			   i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+		}
+
+		for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+			DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+					(u8)dp->admin_configuration_ets_pg[i]);
+
+			DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n",
+			   i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+		}
+
+		/*For IEEE admin_recommendation_bw_percentage
+		 *For IEEE admin_recommendation_ets_pg */
+		af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+		for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
+			if (dp->admin_priority_app_table[i].valid) {
+				struct bnx2x_admin_priority_app_table *table =
+					dp->admin_priority_app_table;
+				if ((ETH_TYPE_FCOE == table[i].app_id) &&
+				   (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+					traf_type = FCOE_APP_IDX;
+				else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+				   (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+					traf_type = ISCSI_APP_IDX;
+				else
+					traf_type = other_traf_type++;
+
+				af->app.app_pri_tbl[traf_type].app_id =
+					table[i].app_id;
+
+				af->app.app_pri_tbl[traf_type].pri_bitmap =
+					(u8)(1 << table[i].priority);
+
+				af->app.app_pri_tbl[traf_type].appBitfield =
+				    (DCBX_APP_ENTRY_VALID);
+
+				af->app.app_pri_tbl[traf_type].appBitfield |=
+				   (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+					DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+			}
+		}
+
+		af->app.default_pri = (u8)dp->admin_default_priority;
+	}
+
+	/* Write the data. */
+	bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
+			 sizeof(struct lldp_admin_mib));
+}
+
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
+{
+	if (!CHIP_IS_E1x(bp)) {
+		bp->dcb_state = dcb_on;
+		bp->dcbx_enabled = dcbx_enabled;
+	} else {
+		bp->dcb_state = false;
+		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+	}
+	DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n",
+	   dcb_on ? "ON" : "OFF",
+	   dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
+	   dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
+	   dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
+	   "on-chip with negotiation" : "invalid");
+}
+
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+	bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+	bp->dcbx_config_params.admin_ets_willing = 1;
+	bp->dcbx_config_params.admin_pfc_willing = 1;
+	bp->dcbx_config_params.overwrite_settings = 1;
+	bp->dcbx_config_params.admin_ets_enable = 1;
+	bp->dcbx_config_params.admin_pfc_enable = 1;
+	bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+	bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+	bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+	bp->dcbx_config_params.admin_ets_reco_valid = 1;
+	bp->dcbx_config_params.admin_app_priority_willing = 1;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+	bp->dcbx_config_params.admin_pfc_bitmap = 0x0;
+	bp->dcbx_config_params.admin_priority_app_table[0].valid = 0;
+	bp->dcbx_config_params.admin_priority_app_table[1].valid = 0;
+	bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+	bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+	bp->dcbx_config_params.admin_default_priority = 0;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
+{
+	u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+
+	/* only PMF can send ADMIN msg to MFW in old MFW versions */
+	if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+		return;
+
+	if (bp->dcbx_enabled <= 0)
+		return;
+
+	/* validate:
+	 * chip of good for dcbx version,
+	 * dcb is wanted
+	 * shmem2 contains DCBX support fields
+	 */
+	DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
+	   bp->dcb_state, bp->port.pmf);
+
+	if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
+	    SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+		dcbx_lldp_params_offset =
+			SHMEM2_RD(bp, dcbx_lldp_params_offset);
+
+		DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n",
+		   dcbx_lldp_params_offset);
+
+		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
+
+		if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+			/* need HW lock to avoid scenario of two drivers
+			 * writing in parallel to shmem
+			 */
+			bnx2x_acquire_hw_lock(bp,
+					      HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+			if (update_shmem)
+				bnx2x_dcbx_admin_mib_updated_params(bp,
+					dcbx_lldp_params_offset);
+
+			/* Let HW start negotiation */
+			bnx2x_fw_command(bp,
+					 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+			/* release HW lock only after MFW acks that it finished
+			 * reading values from shmem
+			 */
+			bnx2x_release_hw_lock(bp,
+					      HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+		}
+	}
+}
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+			    struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+	u8 pri = 0;
+	u8 cos = 0;
+
+	DP(BNX2X_MSG_DCB,
+	   "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+	DP(BNX2X_MSG_DCB,
+	   "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n",
+	   bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+	for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+		DP(BNX2X_MSG_DCB,
+		   "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n",
+		   cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+		DP(BNX2X_MSG_DCB,
+		   "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n",
+		   cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+		DP(BNX2X_MSG_DCB,
+		   "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n",
+		   cos, bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+		DP(BNX2X_MSG_DCB,
+		   "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n",
+		   cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+	}
+
+	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+		DP(BNX2X_MSG_DCB,
+		   "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n",
+		   pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+		DP(BNX2X_MSG_DCB,
+		   "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+		   pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+	}
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+					    u32 *pg_pri_orginal_spread,
+					    struct pg_help_data *help_data)
+{
+	bool pg_found  = false;
+	u32 i, traf_type, add_traf_type, add_pg;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	struct pg_entry_help_data *data = help_data->data; /*shortcut*/
+
+	/* Set to invalid */
+	for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+		data[i].pg = DCBX_ILLEGAL_PG;
+
+	for (add_traf_type = 0;
+	     add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+		pg_found = false;
+		if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+			add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+			for (traf_type = 0;
+			     traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+			     traf_type++) {
+				if (data[traf_type].pg == add_pg) {
+					if (!(data[traf_type].pg_priority &
+					     (1 << ttp[add_traf_type])))
+						data[traf_type].
+							num_of_dif_pri++;
+					data[traf_type].pg_priority |=
+						(1 << ttp[add_traf_type]);
+					pg_found = true;
+					break;
+				}
+			}
+			if (false == pg_found) {
+				data[help_data->num_of_pg].pg = add_pg;
+				data[help_data->num_of_pg].pg_priority =
+						(1 << ttp[add_traf_type]);
+				data[help_data->num_of_pg].num_of_dif_pri = 1;
+				help_data->num_of_pg++;
+			}
+		}
+		DP(BNX2X_MSG_DCB,
+		   "add_traf_type %d pg_found %s num_of_pg %d\n",
+		   add_traf_type, (false == pg_found) ? "NO" : "YES",
+		   help_data->num_of_pg);
+	}
+}
+
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+					       struct cos_help_data *cos_data,
+					       u32 pri_join_mask)
+{
+	/* Only one priority than only one COS */
+	cos_data->data[0].pausable =
+		IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+	cos_data->data[0].pri_join_mask = pri_join_mask;
+	cos_data->data[0].cos_bw = 100;
+	cos_data->num_of_cos = 1;
+}
+
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+					    struct cos_entry_help_data *data,
+					    u8 pg_bw)
+{
+	if (data->cos_bw == DCBX_INVALID_COS_BW)
+		data->cos_bw = pg_bw;
+	else
+		data->cos_bw += pg_bw;
+}
+
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+			struct cos_help_data *cos_data,
+			u32 *pg_pri_orginal_spread,
+			struct dcbx_ets_feature *ets)
+{
+	u32	pri_tested	= 0;
+	u8	i		= 0;
+	u8	entry		= 0;
+	u8	pg_entry	= 0;
+	u8	num_of_pri	= LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+	cos_data->data[0].pausable = true;
+	cos_data->data[1].pausable = false;
+	cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+	for (i = 0 ; i < num_of_pri ; i++) {
+		pri_tested = 1 << bp->dcbx_port_params.
+					app.traffic_type_priority[i];
+
+		if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+			cos_data->data[1].pri_join_mask |= pri_tested;
+			entry = 1;
+		} else {
+			cos_data->data[0].pri_join_mask |= pri_tested;
+			entry = 0;
+		}
+		pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+						app.traffic_type_priority[i]];
+		/* There can be only one strict pg */
+		if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+			bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+		else
+			/* If we join a group and one is strict
+			 * than the bw rules
+			 */
+			cos_data->data[entry].strict =
+						BNX2X_DCBX_STRICT_COS_HIGHEST;
+	}
+	if ((0 == cos_data->data[0].pri_join_mask) &&
+	    (0 == cos_data->data[1].pri_join_mask))
+		BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x)	((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
+					      struct pg_help_data *pg_help_data,
+					      struct cos_help_data *cos_data,
+					      u32 pri_join_mask,
+					      u8 num_of_dif_pri)
+{
+	u8 i = 0;
+	u32 pri_tested = 0;
+	u32 pri_mask_without_pri = 0;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	/*debug*/
+	if (num_of_dif_pri == 1) {
+		bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+		return;
+	}
+	/* single priority group */
+	if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+		/* If there are both pauseable and non-pauseable priorities,
+		 * the pauseable priorities go to the first queue and
+		 * the non-pauseable priorities go to the second queue.
+		 */
+		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+			/* Pauseable */
+			cos_data->data[0].pausable = true;
+			/* Non pauseable.*/
+			cos_data->data[1].pausable = false;
+
+			if (2 == num_of_dif_pri) {
+				cos_data->data[0].cos_bw = 50;
+				cos_data->data[1].cos_bw = 50;
+			}
+
+			if (3 == num_of_dif_pri) {
+				if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+							pri_join_mask))) {
+					cos_data->data[0].cos_bw = 33;
+					cos_data->data[1].cos_bw = 67;
+				} else {
+					cos_data->data[0].cos_bw = 67;
+					cos_data->data[1].cos_bw = 33;
+				}
+			}
+
+		} else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+			/* If there are only pauseable priorities,
+			 * then one/two priorities go to the first queue
+			 * and one priority goes to the second queue.
+			 */
+			if (2 == num_of_dif_pri) {
+				cos_data->data[0].cos_bw = 50;
+				cos_data->data[1].cos_bw = 50;
+			} else {
+				cos_data->data[0].cos_bw = 67;
+				cos_data->data[1].cos_bw = 33;
+			}
+			cos_data->data[1].pausable = true;
+			cos_data->data[0].pausable = true;
+			/* All priorities except FCOE */
+			cos_data->data[0].pri_join_mask = (pri_join_mask &
+				((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+			/* Only FCOE priority.*/
+			cos_data->data[1].pri_join_mask =
+				(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+		} else
+			/* If there are only non-pauseable priorities,
+			 * they will all go to the same queue.
+			 */
+			bnx2x_dcbx_ets_disabled_entry_data(bp,
+						cos_data, pri_join_mask);
+	} else {
+		/* priority group which is not BW limited (PG#15):*/
+		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+			/* If there are both pauseable and non-pauseable
+			 * priorities, the pauseable priorities go to the first
+			 * queue and the non-pauseable priorities
+			 * go to the second queue.
+			 */
+			if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+			    DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+				cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+				cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+			} else {
+				cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+				cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+			}
+			/* Pauseable */
+			cos_data->data[0].pausable = true;
+			/* Non pause-able.*/
+			cos_data->data[1].pausable = false;
+		} else {
+			/* If there are only pauseable priorities or
+			 * only non-pauseable,* the lower priorities go
+			 * to the first queue and the higher priorities go
+			 * to the second queue.
+			 */
+			cos_data->data[0].pausable =
+				cos_data->data[1].pausable =
+				IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+			for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+				pri_tested = 1 << bp->dcbx_port_params.
+					app.traffic_type_priority[i];
+				/* Remove priority tested */
+				pri_mask_without_pri =
+					(pri_join_mask & ((u8)(~pri_tested)));
+				if (pri_mask_without_pri < pri_tested)
+					break;
+			}
+
+			if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+				BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n");
+
+			cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+			cos_data->data[1].pri_join_mask = pri_tested;
+			/* Both queues are strict priority,
+			 * and that with the highest priority
+			 * gets the highest strict priority in the arbiter.
+			 */
+			cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+			cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+		}
+	}
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+			    struct bnx2x		*bp,
+			    struct  pg_help_data	*pg_help_data,
+			    struct dcbx_ets_feature	*ets,
+			    struct cos_help_data	*cos_data,
+			    u32			*pg_pri_orginal_spread,
+			    u32				pri_join_mask,
+			    u8				num_of_dif_pri)
+{
+	u8 i = 0;
+	u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
+
+	/* If there are both pauseable and non-pauseable priorities,
+	 * the pauseable priorities go to the first queue and
+	 * the non-pauseable priorities go to the second queue.
+	 */
+	if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+					 pg_help_data->data[0].pg_priority) ||
+		    IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+					 pg_help_data->data[1].pg_priority)) {
+			/* If one PG contains both pauseable and
+			 * non-pauseable priorities then ETS is disabled.
+			 */
+			bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+					pg_pri_orginal_spread, ets);
+			bp->dcbx_port_params.ets.enabled = false;
+			return;
+		}
+
+		/* Pauseable */
+		cos_data->data[0].pausable = true;
+		/* Non pauseable. */
+		cos_data->data[1].pausable = false;
+		if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+				pg_help_data->data[0].pg_priority)) {
+			/* 0 is pauseable */
+			cos_data->data[0].pri_join_mask =
+				pg_help_data->data[0].pg_priority;
+			pg[0] = pg_help_data->data[0].pg;
+			cos_data->data[1].pri_join_mask =
+				pg_help_data->data[1].pg_priority;
+			pg[1] = pg_help_data->data[1].pg;
+		} else {/* 1 is pauseable */
+			cos_data->data[0].pri_join_mask =
+				pg_help_data->data[1].pg_priority;
+			pg[0] = pg_help_data->data[1].pg;
+			cos_data->data[1].pri_join_mask =
+				pg_help_data->data[0].pg_priority;
+			pg[1] = pg_help_data->data[0].pg;
+		}
+	} else {
+		/* If there are only pauseable priorities or
+		 * only non-pauseable, each PG goes to a queue.
+		 */
+		cos_data->data[0].pausable = cos_data->data[1].pausable =
+			IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+		cos_data->data[0].pri_join_mask =
+			pg_help_data->data[0].pg_priority;
+		pg[0] = pg_help_data->data[0].pg;
+		cos_data->data[1].pri_join_mask =
+			pg_help_data->data[1].pg_priority;
+		pg[1] = pg_help_data->data[1].pg;
+	}
+
+	/* There can be only one strict pg */
+	for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
+		if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+			cos_data->data[i].cos_bw =
+				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+		else
+			cos_data->data[i].strict =
+						BNX2X_DCBX_STRICT_COS_HIGHEST;
+	}
+}
+
+static int bnx2x_dcbx_join_pgs(
+			      struct bnx2x            *bp,
+			      struct dcbx_ets_feature *ets,
+			      struct pg_help_data     *pg_help_data,
+			      u8                      required_num_of_pg)
+{
+	u8 entry_joined    = pg_help_data->num_of_pg - 1;
+	u8 entry_removed   = entry_joined + 1;
+	u8 pg_joined       = 0;
+
+	if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+						<= pg_help_data->num_of_pg) {
+
+		BNX2X_ERR("required_num_of_pg can't be zero\n");
+		return -EINVAL;
+	}
+
+	while (required_num_of_pg < pg_help_data->num_of_pg) {
+		entry_joined = pg_help_data->num_of_pg - 2;
+		entry_removed = entry_joined + 1;
+		/* protect index */
+		entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+		pg_help_data->data[entry_joined].pg_priority |=
+			pg_help_data->data[entry_removed].pg_priority;
+
+		pg_help_data->data[entry_joined].num_of_dif_pri +=
+			pg_help_data->data[entry_removed].num_of_dif_pri;
+
+		if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+		    pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+			/* Entries joined strict priority rules */
+			pg_help_data->data[entry_joined].pg =
+							DCBX_STRICT_PRI_PG;
+		else {
+			/* Entries can be joined join BW */
+			pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+					pg_help_data->data[entry_joined].pg) +
+				    DCBX_PG_BW_GET(ets->pg_bw_tbl,
+					pg_help_data->data[entry_removed].pg);
+
+			DCBX_PG_BW_SET(ets->pg_bw_tbl,
+				pg_help_data->data[entry_joined].pg, pg_joined);
+		}
+		/* Joined the entries */
+		pg_help_data->num_of_pg--;
+	}
+
+	return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+			      struct bnx2x		*bp,
+			      struct pg_help_data	*pg_help_data,
+			      struct dcbx_ets_feature	*ets,
+			      struct cos_help_data	*cos_data,
+			      u32			*pg_pri_orginal_spread,
+			      u32			pri_join_mask,
+			      u8			num_of_dif_pri)
+{
+	u8 i = 0;
+	u32 pri_tested = 0;
+	u8 entry = 0;
+	u8 pg_entry = 0;
+	bool b_found_strict = false;
+	u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+	cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+	/* If there are both pauseable and non-pauseable priorities,
+	 * the pauseable priorities go to the first queue and the
+	 * non-pauseable priorities go to the second queue.
+	 */
+	if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+		bnx2x_dcbx_separate_pauseable_from_non(bp,
+				cos_data, pg_pri_orginal_spread, ets);
+	else {
+		/* If two BW-limited PG-s were combined to one queue,
+		 * the BW is their sum.
+		 *
+		 * If there are only pauseable priorities or only non-pauseable,
+		 * and there are both BW-limited and non-BW-limited PG-s,
+		 * the BW-limited PG/s go to one queue and the non-BW-limited
+		 * PG/s go to the second queue.
+		 *
+		 * If there are only pauseable priorities or only non-pauseable
+		 * and all are BW limited, then	two priorities go to the first
+		 * queue and one priority goes to the second queue.
+		 *
+		 * We will join this two cases:
+		 * if one is BW limited it will go to the second queue
+		 * otherwise the last priority will get it
+		 */
+
+		cos_data->data[0].pausable = cos_data->data[1].pausable =
+			IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+		for (i = 0 ; i < num_of_pri; i++) {
+			pri_tested = 1 << bp->dcbx_port_params.
+				app.traffic_type_priority[i];
+			pg_entry = (u8)pg_pri_orginal_spread[bp->
+				dcbx_port_params.app.traffic_type_priority[i]];
+
+			if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+				entry = 0;
+
+				if (i == (num_of_pri-1) &&
+				    false == b_found_strict)
+					/* last entry will be handled separately
+					 * If no priority is strict than last
+					 * entry goes to last queue.
+					 */
+					entry = 1;
+				cos_data->data[entry].pri_join_mask |=
+								pri_tested;
+				bnx2x_dcbx_add_to_cos_bw(bp,
+					&cos_data->data[entry],
+					DCBX_PG_BW_GET(ets->pg_bw_tbl,
+						       pg_entry));
+			} else {
+				b_found_strict = true;
+				cos_data->data[1].pri_join_mask |= pri_tested;
+				/* If we join a group and one is strict
+				 * than the bw rules
+				 */
+				cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+			}
+		}
+	}
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       struct cos_help_data *cos_data,
+				       u32 *pg_pri_orginal_spread,
+				       u32 pri_join_mask,
+				       u8 num_of_dif_pri)
+{
+	/* default E2 settings */
+	cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
+
+	switch (help_data->num_of_pg) {
+	case 1:
+		bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
+					       bp,
+					       help_data,
+					       cos_data,
+					       pri_join_mask,
+					       num_of_dif_pri);
+		break;
+	case 2:
+		bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+					    bp,
+					    help_data,
+					    ets,
+					    cos_data,
+					    pg_pri_orginal_spread,
+					    pri_join_mask,
+					    num_of_dif_pri);
+		break;
+
+	case 3:
+		bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+					      bp,
+					      help_data,
+					      ets,
+					      cos_data,
+					      pg_pri_orginal_spread,
+					      pri_join_mask,
+					      num_of_dif_pri);
+		break;
+	default:
+		BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+		bnx2x_dcbx_ets_disabled_entry_data(bp,
+						   cos_data, pri_join_mask);
+	}
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+					struct cos_help_data *cos_data,
+					u8 entry,
+					u8 num_spread_of_entries,
+					u8 strict_app_pris)
+{
+	u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+	u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+	u8 app_pri_bit = 0;
+
+	while (num_spread_of_entries && num_of_app_pri > 0) {
+		app_pri_bit = 1 << (num_of_app_pri - 1);
+		if (app_pri_bit & strict_app_pris) {
+			struct cos_entry_help_data *data = &cos_data->
+								data[entry];
+			num_spread_of_entries--;
+			if (num_spread_of_entries == 0) {
+				/* last entry needed put all the entries left */
+				data->cos_bw = DCBX_INVALID_COS_BW;
+				data->strict = strict_pri;
+				data->pri_join_mask = strict_app_pris;
+				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+							data->pri_join_mask);
+			} else {
+				strict_app_pris &= ~app_pri_bit;
+
+				data->cos_bw = DCBX_INVALID_COS_BW;
+				data->strict = strict_pri;
+				data->pri_join_mask = app_pri_bit;
+				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+							data->pri_join_mask);
+			}
+
+			strict_pri =
+			    BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+			entry++;
+		}
+
+		num_of_app_pri--;
+	}
+
+	if (num_spread_of_entries) {
+		BNX2X_ERR("Didn't succeed to spread strict priorities\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+					 struct cos_help_data *cos_data,
+					 u8 entry,
+					 u8 num_spread_of_entries,
+					 u8 strict_app_pris)
+{
+	if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+					 num_spread_of_entries,
+					 strict_app_pris)) {
+		struct cos_entry_help_data *data = &cos_data->
+						    data[entry];
+		/* Fill BW entry */
+		data->cos_bw = DCBX_INVALID_COS_BW;
+		data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+		data->pri_join_mask = strict_app_pris;
+		data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+				 data->pri_join_mask);
+		return 1;
+	}
+
+	return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+					   struct pg_help_data *help_data,
+					   struct dcbx_ets_feature *ets,
+					   struct cos_help_data *cos_data,
+					   u32 pri_join_mask)
+
+{
+	u8 need_num_of_entries = 0;
+	u8 i = 0;
+	u8 entry = 0;
+
+	/*
+	 * if the number of requested PG-s in CEE is greater than 3
+	 * then the results are not determined since this is a violation
+	 * of the standard.
+	 */
+	if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+		if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+					DCBX_COS_MAX_NUM_E3B0)) {
+			BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n");
+			bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+							   pri_join_mask);
+			return;
+		}
+	}
+
+	for (i = 0 ; i < help_data->num_of_pg; i++) {
+		struct pg_entry_help_data *pg =  &help_data->data[i];
+		if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+			struct cos_entry_help_data *data = &cos_data->
+							    data[entry];
+			/* Fill BW entry */
+			data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+			data->strict = BNX2X_DCBX_STRICT_INVALID;
+			data->pri_join_mask = pg->pg_priority;
+			data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+						data->pri_join_mask);
+
+			entry++;
+		} else {
+			need_num_of_entries =  min_t(u8,
+				(u8)pg->num_of_dif_pri,
+				(u8)DCBX_COS_MAX_NUM_E3B0 -
+						 help_data->num_of_pg + 1);
+			/*
+			 * If there are still VOQ-s which have no associated PG,
+			 * then associate these VOQ-s to PG15. These PG-s will
+			 * be used for SP between priorities on PG15.
+			 */
+			entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+				entry, need_num_of_entries, pg->pg_priority);
+		}
+	}
+
+	/* the entry will represent the number of COSes used */
+	cos_data->num_of_cos = entry;
+}
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       u32 *pg_pri_orginal_spread)
+{
+	struct cos_help_data         cos_data;
+	u8                    i                           = 0;
+	u32                   pri_join_mask               = 0;
+	u8                    num_of_dif_pri              = 0;
+
+	memset(&cos_data, 0, sizeof(cos_data));
+
+	/* Validate the pg value */
+	for (i = 0; i < help_data->num_of_pg ; i++) {
+		if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+		    DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+			BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+				  help_data->data[i].pg);
+		pri_join_mask   |=  help_data->data[i].pg_priority;
+		num_of_dif_pri  += help_data->data[i].num_of_dif_pri;
+	}
+
+	/* defaults */
+	cos_data.num_of_cos = 1;
+	for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+		cos_data.data[i].pri_join_mask = 0;
+		cos_data.data[i].pausable = false;
+		cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+		cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+	}
+
+	if (CHIP_IS_E3B0(bp))
+		bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+					       &cos_data, pri_join_mask);
+	else /* E2 + E3A0 */
+		bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+							  help_data, ets,
+							  &cos_data,
+							  pg_pri_orginal_spread,
+							  pri_join_mask,
+							  num_of_dif_pri);
+
+	for (i = 0; i < cos_data.num_of_cos ; i++) {
+		struct bnx2x_dcbx_cos_params *p =
+			&bp->dcbx_port_params.ets.cos_params[i];
+
+		p->strict = cos_data.data[i].strict;
+		p->bw_tbl = cos_data.data[i].cos_bw;
+		p->pri_bitmask = cos_data.data[i].pri_join_mask;
+		p->pauseable = cos_data.data[i].pausable;
+
+		/* sanity */
+		if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+		    p->strict != BNX2X_DCBX_STRICT_INVALID) {
+			if (p->pri_bitmask == 0)
+				BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+			if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+				if (p->pauseable &&
+				    DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+						p->pri_bitmask) != 0)
+					BNX2X_ERR("Inconsistent config for pausable COS %d\n",
+						  i);
+
+				if (!p->pauseable &&
+				    DCBX_PFC_PRI_GET_PAUSE(bp,
+						p->pri_bitmask) != 0)
+					BNX2X_ERR("Inconsistent config for nonpausable COS %d\n",
+						  i);
+			}
+		}
+
+		if (p->pauseable)
+			DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n",
+				  i, cos_data.data[i].pri_join_mask);
+		else
+			DP(BNX2X_MSG_DCB,
+			   "COS %d NONPAUSABLE prijoinmask 0x%x\n",
+			   i, cos_data.data[i].pri_join_mask);
+	}
+
+	bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+				u32 *set_configuration_ets_pg,
+				u32 *pri_pg_tbl)
+{
+	int i;
+
+	for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+		set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+		DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n",
+		   i, set_configuration_ets_pg[i]);
+	}
+}
+
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+				 struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+	u16 pri_bit = 0;
+	u8 cos = 0, pri = 0;
+	struct priority_cos *tt2cos;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+			     GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+				       1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
+	memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
+
+	/* to disable DCB - the structure must be zeroed */
+	if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)
+		return;
+
+	/*shortcut*/
+	tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+	/* Fw version should be incremented each update */
+	pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+	pfc_fw_cfg->dcb_enabled = 1;
+
+	/* Fill priority parameters */
+	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+		tt2cos[pri].priority = ttp[pri];
+		pri_bit = 1 << tt2cos[pri].priority;
+
+		/* Fill COS parameters based on COS calculated to
+		 * make it more general for future use */
+		for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+			if (bp->dcbx_port_params.ets.cos_params[cos].
+						pri_bitmask & pri_bit)
+					tt2cos[pri].cos = cos;
+
+		pfc_fw_cfg->dcb_outer_pri[pri]  = ttp[pri];
+	}
+
+	/* we never want the FW to add a 0 vlan tag */
+	pfc_fw_cfg->dont_add_pri_0_en = 1;
+
+	bnx2x_dcbx_print_cos_params(bp,	pfc_fw_cfg);
+}
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+	/* if we need to synchronize DCBX result from prev PMF
+	 * read it from shmem and update bp and netdev accordingly
+	 */
+	if (SHMEM2_HAS(bp, drv_flags) &&
+	   GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) {
+		/* Read neg results if dcbx is in the FW */
+		if (bnx2x_dcbx_read_shmem_neg_results(bp))
+			return;
+
+		bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					  bp->dcbx_error);
+		bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					 bp->dcbx_error);
+#ifdef BCM_DCBNL
+		/*
+		 * Add new app tlvs to dcbnl
+		 */
+		bnx2x_dcbnl_update_applist(bp, false);
+		/*
+		 * Send a notification for the new negotiated parameters
+		 */
+		dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+		/*
+		 * reconfigure the netdevice with the results of the new
+		 * dcbx negotiation.
+		 */
+		bnx2x_dcbx_update_tc_mapping(bp);
+	}
+}
+
+/* DCB netlink */
+#ifdef BCM_DCBNL
+
+#define BNX2X_DCBX_CAPS		(DCB_CAP_DCBX_LLD_MANAGED | \
+				DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
+
+static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
+{
+	/* validate dcbnl call that may change HW state:
+	 * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
+	 */
+	return bp->dcb_state && bp->dcbx_mode_uset;
+}
+
+static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state);
+	return bp->dcb_state;
+}
+
+static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+
+	/* Fail to set state to "enabled" if dcbx is disabled in nvram */
+	if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
+		      (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
+		DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
+		return 1;
+	}
+
+	bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
+	return 0;
+}
+
+static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+					 u8 *perm_addr)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n");
+
+	/* first the HW mac address */
+	memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+
+	if (CNIC_LOADED(bp))
+		/* second SAN address */
+		memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
+		       netdev->addr_len);
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
+					u8 prio_type, u8 pgid, u8 bw_pct,
+					u8 up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid);
+	if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+		return;
+
+	/**
+	 * bw_pct ignored -	band-width percentage devision between user
+	 *			priorities within the same group is not
+	 *			standard and hence not supported
+	 *
+	 * prio_type ignored -	priority levels within the same group are not
+	 *			standard and hence are not supported. According
+	 *			to the standard pgid 15 is dedicated to strict
+	 *			priority traffic (on the port level).
+	 *
+	 * up_map ignored
+	 */
+
+	bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
+	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
+					 int pgid, u8 bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct);
+
+	if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+		return;
+
+	bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
+	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
+					u8 prio_type, u8 pgid, u8 bw_pct,
+					u8 up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
+					 int pgid, u8 bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
+					u8 *prio_type, u8 *pgid, u8 *bw_pct,
+					u8 *up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
+
+	/**
+	 * bw_pct ignored -	band-width percentage devision between user
+	 *			priorities within the same group is not
+	 *			standard and hence not supported
+	 *
+	 * prio_type ignored -	priority levels within the same group are not
+	 *			standard and hence are not supported. According
+	 *			to the standard pgid 15 is dedicated to strict
+	 *			priority traffic (on the port level).
+	 *
+	 * up_map ignored
+	 */
+	*up_map = *bw_pct = *prio_type = *pgid = 0;
+
+	if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+		return;
+
+	*pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
+					 int pgid, u8 *bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid);
+
+	*bw_pct = 0;
+
+	if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+		return;
+
+	*bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
+					u8 *prio_type, u8 *pgid, u8 *bw_pct,
+					u8 *up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
+
+	*prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
+					 int pgid, u8 *bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
+
+	*bw_pct = 0;
+}
+
+static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+				    u8 setting)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting);
+
+	if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
+		return;
+
+	if (setting) {
+		bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
+		bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+	} else {
+		bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio);
+	}
+}
+
+static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+				    u8 *setting)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
+
+	*setting = 0;
+
+	if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
+		return;
+
+	*setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
+}
+
+static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(BNX2X_MSG_DCB, "SET-ALL\n");
+
+	if (!bnx2x_dcbnl_set_valid(bp))
+		return 1;
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		netdev_err(bp->dev,
+			   "Handling parity error recovery. Try again later\n");
+		return 1;
+	}
+	if (netif_running(bp->dev)) {
+		bnx2x_update_drv_flags(bp,
+				       1 << DRV_FLAGS_DCB_MFW_CONFIGURED,
+				       1);
+		bnx2x_dcbx_init(bp, true);
+	}
+	DP(BNX2X_MSG_DCB, "set_dcbx_params done\n");
+
+	return 0;
+}
+
+static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	if (bp->dcb_state) {
+		switch (capid) {
+		case DCB_CAP_ATTR_PG:
+			*cap = true;
+			break;
+		case DCB_CAP_ATTR_PFC:
+			*cap = true;
+			break;
+		case DCB_CAP_ATTR_UP2TC:
+			*cap = false;
+			break;
+		case DCB_CAP_ATTR_PG_TCS:
+			*cap = 0x80;	/* 8 priorities for PGs */
+			break;
+		case DCB_CAP_ATTR_PFC_TCS:
+			*cap = 0x80;	/* 8 priorities for PFC */
+			break;
+		case DCB_CAP_ATTR_GSP:
+			*cap = true;
+			break;
+		case DCB_CAP_ATTR_BCN:
+			*cap = false;
+			break;
+		case DCB_CAP_ATTR_DCBX:
+			*cap = BNX2X_DCBX_CAPS;
+			break;
+		default:
+			BNX2X_ERR("Non valid capability ID\n");
+			rval = 1;
+			break;
+		}
+	} else {
+		DP(BNX2X_MSG_DCB, "DCB disabled\n");
+		rval = 1;
+	}
+
+	DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
+	return rval;
+}
+
+static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	DP(BNX2X_MSG_DCB, "tcid %d\n", tcid);
+
+	if (bp->dcb_state) {
+		switch (tcid) {
+		case DCB_NUMTCS_ATTR_PG:
+			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+						  DCBX_COS_MAX_NUM_E2;
+			break;
+		case DCB_NUMTCS_ATTR_PFC:
+			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+						  DCBX_COS_MAX_NUM_E2;
+			break;
+		default:
+			BNX2X_ERR("Non valid TC-ID\n");
+			rval = 1;
+			break;
+		}
+	} else {
+		DP(BNX2X_MSG_DCB, "DCB disabled\n");
+		rval = 1;
+	}
+
+	return rval;
+}
+
+static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num);
+	return -EINVAL;
+}
+
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+
+	if (!bp->dcb_state)
+		return 0;
+
+	return bp->dcbx_local_feat.pfc.enabled;
+}
+
+static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+
+	if (!bnx2x_dcbnl_set_valid(bp))
+		return;
+
+	bp->dcbx_config_params.admin_pfc_tx_enable =
+	bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
+}
+
+static void bnx2x_admin_app_set_ent(
+	struct bnx2x_admin_priority_app_table *app_ent,
+	u8 idtype, u16 idval, u8 up)
+{
+	app_ent->valid = 1;
+
+	switch (idtype) {
+	case DCB_APP_IDTYPE_ETHTYPE:
+		app_ent->traffic_type = TRAFFIC_TYPE_ETH;
+		break;
+	case DCB_APP_IDTYPE_PORTNUM:
+		app_ent->traffic_type = TRAFFIC_TYPE_PORT;
+		break;
+	default:
+		break; /* never gets here */
+	}
+	app_ent->app_id = idval;
+	app_ent->priority = up;
+}
+
+static bool bnx2x_admin_app_is_equal(
+	struct bnx2x_admin_priority_app_table *app_ent,
+	u8 idtype, u16 idval)
+{
+	if (!app_ent->valid)
+		return false;
+
+	switch (idtype) {
+	case DCB_APP_IDTYPE_ETHTYPE:
+		if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
+			return false;
+		break;
+	case DCB_APP_IDTYPE_PORTNUM:
+		if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
+			return false;
+		break;
+	default:
+		return false;
+	}
+	if (app_ent->app_id != idval)
+		return false;
+
+	return true;
+}
+
+static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
+{
+	int i, ff;
+
+	/* iterate over the app entries looking for idtype and idval */
+	for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
+		struct bnx2x_admin_priority_app_table *app_ent =
+			&bp->dcbx_config_params.admin_priority_app_table[i];
+		if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
+			break;
+
+		if (ff < 0 && !app_ent->valid)
+			ff = i;
+	}
+	if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
+		/* if found overwrite up */
+		bp->dcbx_config_params.
+			admin_priority_app_table[i].priority = up;
+	else if (ff >= 0)
+		/* not found use first-free */
+		bnx2x_admin_app_set_ent(
+			&bp->dcbx_config_params.admin_priority_app_table[ff],
+			idtype, idval, up);
+	else {
+		/* app table is full */
+		BNX2X_ERR("Application table is too large\n");
+		return -EBUSY;
+	}
+
+	/* up configured, if not 0 make sure feature is enabled */
+	if (up)
+		bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+
+	return 0;
+}
+
+static int bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+				  u16 idval, u8 up)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n",
+	   idtype, idval, up);
+
+	if (!bnx2x_dcbnl_set_valid(bp)) {
+		DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
+		return -EINVAL;
+	}
+
+	/* verify idtype */
+	switch (idtype) {
+	case DCB_APP_IDTYPE_ETHTYPE:
+	case DCB_APP_IDTYPE_PORTNUM:
+		break;
+	default:
+		DP(BNX2X_MSG_DCB, "Wrong ID type\n");
+		return -EINVAL;
+	}
+	return bnx2x_set_admin_app_up(bp, idtype, idval, up);
+}
+
+static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 state;
+
+	state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
+
+	if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
+		state |= DCB_CAP_DCBX_STATIC;
+
+	return state;
+}
+
+static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(BNX2X_MSG_DCB, "state = %02x\n", state);
+
+	/* set dcbx mode */
+
+	if ((state & BNX2X_DCBX_CAPS) != state) {
+		BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n",
+			  state);
+		return 1;
+	}
+
+	if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
+		BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
+		return 1;
+	}
+
+	if (state & DCB_CAP_DCBX_STATIC)
+		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
+	else
+		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
+
+	bp->dcbx_mode_uset = true;
+	return 0;
+}
+
+static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
+				  u8 *flags)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	DP(BNX2X_MSG_DCB, "featid %d\n", featid);
+
+	if (bp->dcb_state) {
+		*flags = 0;
+		switch (featid) {
+		case DCB_FEATCFG_ATTR_PG:
+			if (bp->dcbx_local_feat.ets.enabled)
+				*flags |= DCB_FEATCFG_ENABLE;
+			if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+					      DCBX_REMOTE_MIB_ERROR))
+				*flags |= DCB_FEATCFG_ERROR;
+			break;
+		case DCB_FEATCFG_ATTR_PFC:
+			if (bp->dcbx_local_feat.pfc.enabled)
+				*flags |= DCB_FEATCFG_ENABLE;
+			if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
+					      DCBX_LOCAL_PFC_MISMATCH |
+					      DCBX_REMOTE_MIB_ERROR))
+				*flags |= DCB_FEATCFG_ERROR;
+			break;
+		case DCB_FEATCFG_ATTR_APP:
+			if (bp->dcbx_local_feat.app.enabled)
+				*flags |= DCB_FEATCFG_ENABLE;
+			if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
+					      DCBX_LOCAL_APP_MISMATCH |
+					      DCBX_REMOTE_MIB_ERROR))
+				*flags |= DCB_FEATCFG_ERROR;
+			break;
+		default:
+			BNX2X_ERR("Non valid feature-ID\n");
+			rval = 1;
+			break;
+		}
+	} else {
+		DP(BNX2X_MSG_DCB, "DCB disabled\n");
+		rval = 1;
+	}
+
+	return rval;
+}
+
+static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
+				  u8 flags)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags);
+
+	/* ignore the 'advertise' flag */
+	if (bnx2x_dcbnl_set_valid(bp)) {
+		switch (featid) {
+		case DCB_FEATCFG_ATTR_PG:
+			bp->dcbx_config_params.admin_ets_enable =
+				flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+			bp->dcbx_config_params.admin_ets_willing =
+				flags & DCB_FEATCFG_WILLING ? 1 : 0;
+			break;
+		case DCB_FEATCFG_ATTR_PFC:
+			bp->dcbx_config_params.admin_pfc_enable =
+				flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+			bp->dcbx_config_params.admin_pfc_willing =
+				flags & DCB_FEATCFG_WILLING ? 1 : 0;
+			break;
+		case DCB_FEATCFG_ATTR_APP:
+			/* ignore enable, always enabled */
+			bp->dcbx_config_params.admin_app_priority_willing =
+				flags & DCB_FEATCFG_WILLING ? 1 : 0;
+			break;
+		default:
+			BNX2X_ERR("Non valid feature-ID\n");
+			rval = 1;
+			break;
+		}
+	} else {
+		DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
+		rval = 1;
+	}
+
+	return rval;
+}
+
+static int bnx2x_peer_appinfo(struct net_device *netdev,
+			      struct dcb_peer_app_info *info, u16* app_count)
+{
+	int i;
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(BNX2X_MSG_DCB, "APP-INFO\n");
+
+	info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
+	info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
+	*app_count = 0;
+
+	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+		if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
+		    DCBX_APP_ENTRY_VALID)
+			(*app_count)++;
+	return 0;
+}
+
+static int bnx2x_peer_apptable(struct net_device *netdev,
+			       struct dcb_app *table)
+{
+	int i, j;
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(BNX2X_MSG_DCB, "APP-TABLE\n");
+
+	for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+		struct dcbx_app_priority_entry *ent =
+			&bp->dcbx_remote_feat.app.app_pri_tbl[i];
+
+		if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+			table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+			table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
+			table[j++].protocol = ent->app_id;
+		}
+	}
+	return 0;
+}
+
+static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
+{
+	int i;
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
+
+	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+		pg->pg_bw[i] =
+			DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
+		pg->prio_pg[i] =
+			DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
+	}
+	return 0;
+}
+
+static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
+				 struct cee_pfc *pfc)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
+	pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
+	return 0;
+}
+
+const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
+	.getstate		= bnx2x_dcbnl_get_state,
+	.setstate		= bnx2x_dcbnl_set_state,
+	.getpermhwaddr		= bnx2x_dcbnl_get_perm_hw_addr,
+	.setpgtccfgtx		= bnx2x_dcbnl_set_pg_tccfg_tx,
+	.setpgbwgcfgtx		= bnx2x_dcbnl_set_pg_bwgcfg_tx,
+	.setpgtccfgrx		= bnx2x_dcbnl_set_pg_tccfg_rx,
+	.setpgbwgcfgrx		= bnx2x_dcbnl_set_pg_bwgcfg_rx,
+	.getpgtccfgtx		= bnx2x_dcbnl_get_pg_tccfg_tx,
+	.getpgbwgcfgtx		= bnx2x_dcbnl_get_pg_bwgcfg_tx,
+	.getpgtccfgrx		= bnx2x_dcbnl_get_pg_tccfg_rx,
+	.getpgbwgcfgrx		= bnx2x_dcbnl_get_pg_bwgcfg_rx,
+	.setpfccfg		= bnx2x_dcbnl_set_pfc_cfg,
+	.getpfccfg		= bnx2x_dcbnl_get_pfc_cfg,
+	.setall			= bnx2x_dcbnl_set_all,
+	.getcap			= bnx2x_dcbnl_get_cap,
+	.getnumtcs		= bnx2x_dcbnl_get_numtcs,
+	.setnumtcs		= bnx2x_dcbnl_set_numtcs,
+	.getpfcstate		= bnx2x_dcbnl_get_pfc_state,
+	.setpfcstate		= bnx2x_dcbnl_set_pfc_state,
+	.setapp			= bnx2x_dcbnl_set_app_up,
+	.getdcbx		= bnx2x_dcbnl_get_dcbx,
+	.setdcbx		= bnx2x_dcbnl_set_dcbx,
+	.getfeatcfg		= bnx2x_dcbnl_get_featcfg,
+	.setfeatcfg		= bnx2x_dcbnl_set_featcfg,
+	.peer_getappinfo	= bnx2x_peer_appinfo,
+	.peer_getapptable	= bnx2x_peer_apptable,
+	.cee_peer_getpg		= bnx2x_cee_peer_getpg,
+	.cee_peer_getpfc	= bnx2x_cee_peer_getpfc,
+};
+
+#endif /* BCM_DCBNL */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
new file mode 100644
index 0000000..9a9517c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,207 @@
+/* bnx2x_dcb.h: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#ifndef BNX2X_DCB_H
+#define BNX2X_DCB_H
+
+#include "bnx2x_hsi.h"
+
+#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
+struct bnx2x_dcbx_app_params {
+	u32 enabled;
+	u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+};
+
+#define DCBX_COS_MAX_NUM_E2	DCBX_E2E3_MAX_NUM_COS
+/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
+#define BNX2X_MAX_COS_SUPPORT	3
+#define DCBX_COS_MAX_NUM_E3B0	BNX2X_MAX_COS_SUPPORT
+#define DCBX_COS_MAX_NUM	BNX2X_MAX_COS_SUPPORT
+
+struct bnx2x_dcbx_cos_params {
+	u32	bw_tbl;
+	u32	pri_bitmask;
+	/*
+	 * strict priority: valid values are 0..5; 0 is highest priority.
+	 * There can't be two COSes with the same priority.
+	 */
+	u8	strict;
+#define BNX2X_DCBX_STRICT_INVALID			DCBX_COS_MAX_NUM
+#define BNX2X_DCBX_STRICT_COS_HIGHEST			0
+#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp)	((sp) + 1)
+	u8	pauseable;
+};
+
+struct bnx2x_dcbx_pg_params {
+	u32 enabled;
+	u8 num_of_cos; /* valid COS entries */
+	struct bnx2x_dcbx_cos_params	cos_params[DCBX_COS_MAX_NUM];
+};
+
+struct bnx2x_dcbx_pfc_params {
+	u32 enabled;
+	u32 priority_non_pauseable_mask;
+};
+
+struct bnx2x_dcbx_port_params {
+	struct bnx2x_dcbx_pfc_params pfc;
+	struct bnx2x_dcbx_pg_params  ets;
+	struct bnx2x_dcbx_app_params app;
+};
+
+#define BNX2X_DCBX_CONFIG_INV_VALUE			(0xFFFFFFFF)
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE		0
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE		1
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID	(BNX2X_DCBX_CONFIG_INV_VALUE)
+#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
+				  (bp)->dcbx_port_params.ets.enabled)
+
+struct bnx2x_config_lldp_params {
+	u32 overwrite_settings;
+	u32 msg_tx_hold;
+	u32 msg_fast_tx;
+	u32 tx_credit_max;
+	u32 msg_tx_interval;
+	u32 tx_fast;
+};
+
+struct bnx2x_admin_priority_app_table {
+		u32 valid;
+		u32 priority;
+#define INVALID_TRAFFIC_TYPE_PRIORITY	(0xFFFFFFFF)
+		u32 traffic_type;
+#define TRAFFIC_TYPE_ETH		0
+#define TRAFFIC_TYPE_PORT		1
+		u32 app_id;
+};
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+struct bnx2x_config_dcbx_params {
+	u32 overwrite_settings;
+	u32 admin_dcbx_version;
+	u32 admin_ets_enable;
+	u32 admin_pfc_enable;
+	u32 admin_tc_supported_tx_enable;
+	u32 admin_ets_configuration_tx_enable;
+	u32 admin_ets_recommendation_tx_enable;
+	u32 admin_pfc_tx_enable;
+	u32 admin_application_priority_tx_enable;
+	u32 admin_ets_willing;
+	u32 admin_ets_reco_valid;
+	u32 admin_pfc_willing;
+	u32 admin_app_priority_willing;
+	u32 admin_configuration_bw_precentage[8];
+	u32 admin_configuration_ets_pg[8];
+	u32 admin_recommendation_bw_precentage[8];
+	u32 admin_recommendation_ets_pg[8];
+	u32 admin_pfc_bitmap;
+	struct bnx2x_admin_priority_app_table
+		admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
+	u32 admin_default_priority;
+};
+
+#define GET_FLAGS(flags, bits)		((flags) & (bits))
+#define SET_FLAGS(flags, bits)		((flags) |= (bits))
+#define RESET_FLAGS(flags, bits)	((flags) &= ~(bits))
+
+enum {
+	DCBX_READ_LOCAL_MIB,
+	DCBX_READ_REMOTE_MIB
+};
+
+#define ETH_TYPE_FCOE		(0x8906)
+#define TCP_PORT_ISCSI		(0xCBC)
+
+#define PFC_VALUE_FRAME_SIZE				(512)
+#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed)  \
+				((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
+
+#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD			130
+#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD			170
+
+struct cos_entry_help_data {
+	u32			pri_join_mask;
+	u32			cos_bw;
+	u8			strict;
+	bool			pausable;
+};
+
+struct cos_help_data {
+	struct cos_entry_help_data	data[DCBX_COS_MAX_NUM];
+	u8				num_of_cos;
+};
+
+#define DCBX_ILLEGAL_PG				(0xFF)
+#define DCBX_PFC_PRI_MASK			(0xFF)
+#define DCBX_STRICT_PRIORITY			(15)
+#define DCBX_INVALID_COS_BW			(0xFFFFFFFF)
+#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp)		\
+			((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
+#define DCBX_PFC_PRI_PAUSE_MASK(bp)		\
+					((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
+#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)	\
+				((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
+#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri)	\
+			(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri)	\
+			(0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
+#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri)	\
+			(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
+			((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri)	\
+			(!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
+			 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
+
+struct pg_entry_help_data {
+	u8	num_of_dif_pri;
+	u8	pg;
+	u32	pg_priority;
+};
+
+struct pg_help_data {
+	struct pg_entry_help_data	data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+	u8				num_of_pg;
+};
+
+/* forward DCB/PFC related declarations */
+struct bnx2x;
+void bnx2x_dcbx_update(struct work_struct *work);
+void bnx2x_dcbx_init_params(struct bnx2x *bp);
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
+
+enum {
+	BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
+	BNX2X_DCBX_STATE_TX_PAUSED,
+	BNX2X_DCBX_STATE_TX_RELEASED
+};
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
+/* DCB netlink */
+#ifdef BCM_DCBNL
+extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
+
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
+#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
new file mode 100644
index 0000000..eccfa13
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -0,0 +1,2218 @@
+/* bnx2x_dump.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ */
+
+#ifndef BNX2X_DUMP_H
+#define BNX2X_DUMP_H
+
+/* WaitP Definitions */
+#define DRV_DUMP_XSTORM_WAITP_ADDRESS    0x2b8a80
+#define DRV_DUMP_TSTORM_WAITP_ADDRESS    0x1b8a80
+#define DRV_DUMP_USTORM_WAITP_ADDRESS    0x338a80
+#define DRV_DUMP_CSTORM_WAITP_ADDRESS    0x238a80
+
+/* Possible Chips */
+#define DUMP_CHIP_E1 1
+#define DUMP_CHIP_E1H 2
+#define DUMP_CHIP_E2 4
+#define DUMP_CHIP_E3A0 8
+#define DUMP_CHIP_E3B0 16
+#define DUMP_PATH_0 512
+#define DUMP_PATH_1 1024
+#define NUM_PRESETS 13
+#define NUM_CHIPS 5
+
+struct	dump_header {
+	u32 header_size; /* Size in DWORDs excluding this field */
+	u32 version;
+	u32 preset;
+	u32 dump_meta_data; /* OR of CHIP and PATH. */
+};
+
+#define  BNX2X_DUMP_VERSION 0x61111111
+struct reg_addr {
+	u32 addr;
+	u32 size;
+	u32 chips;
+	u32 presets;
+};
+
+struct wreg_addr {
+	u32 addr;
+	u32 size;
+	u32 read_regs_count;
+	const u32 *read_regs;
+	u32 chips;
+	u32 presets;
+};
+
+#define PAGE_MODE_VALUES_E2 2
+#define PAGE_READ_REGS_E2 1
+#define PAGE_WRITE_REGS_E2 1
+static const u32 page_vals_e2[] = {0, 128};
+static const u32 page_write_regs_e2[] = {328476};
+static const struct reg_addr page_read_regs_e2[] = {
+	{0x58000, 4608, DUMP_CHIP_E2, 0x30}
+};
+
+#define PAGE_MODE_VALUES_E3 2
+#define PAGE_READ_REGS_E3 1
+#define PAGE_WRITE_REGS_E3 1
+static const u32 page_vals_e3[] = {0, 128};
+static const u32 page_write_regs_e3[] = {328476};
+static const struct reg_addr page_read_regs_e3[] = {
+	{0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30}
+};
+
+static const struct reg_addr reg_addrs[] = {
+	{ 0x2000, 1, 0x1f, 0xfff},
+	{ 0x2004, 1, 0x1f, 0x1fff},
+	{ 0x2008, 25, 0x1f, 0xfff},
+	{ 0x206c, 1, 0x1f, 0x1fff},
+	{ 0x2070, 313, 0x1f, 0xfff},
+	{ 0x2800, 103, 0x1f, 0xfff},
+	{ 0x3000, 287, 0x1f, 0xfff},
+	{ 0x3800, 331, 0x1f, 0xfff},
+	{ 0x8800, 6, 0x1f, 0x924},
+	{ 0x8818, 1, 0x1e, 0x924},
+	{ 0x9000, 4, 0x1c, 0x924},
+	{ 0x9010, 7, 0x1c, 0xfff},
+	{ 0x902c, 1, 0x1c, 0x924},
+	{ 0x9030, 1, 0x1c, 0xfff},
+	{ 0x9034, 13, 0x1c, 0x924},
+	{ 0x9068, 16, 0x1c, 0xfff},
+	{ 0x90a8, 98, 0x1c, 0x924},
+	{ 0x9230, 2, 0x1c, 0xfff},
+	{ 0x9238, 3, 0x1c, 0x924},
+	{ 0x9244, 1, 0x1c, 0xfff},
+	{ 0x9248, 1, 0x1c, 0x924},
+	{ 0x924c, 1, 0x4, 0x924},
+	{ 0x9250, 16, 0x1c, 0x924},
+	{ 0x92a8, 2, 0x1c, 0x1fff},
+	{ 0x92b4, 1, 0x1c, 0x1fff},
+	{ 0x9400, 33, 0x1c, 0x924},
+	{ 0x9484, 5, 0x18, 0x924},
+	{ 0xa000, 27, 0x1f, 0x924},
+	{ 0xa06c, 1, 0x3, 0x924},
+	{ 0xa070, 2, 0x1f, 0x924},
+	{ 0xa078, 1, 0x1f, 0x1fff},
+	{ 0xa07c, 31, 0x1f, 0x924},
+	{ 0xa0f8, 1, 0x1f, 0x1fff},
+	{ 0xa0fc, 3, 0x1f, 0x924},
+	{ 0xa108, 1, 0x1f, 0x1fff},
+	{ 0xa10c, 3, 0x1f, 0x924},
+	{ 0xa118, 1, 0x1f, 0x1fff},
+	{ 0xa11c, 28, 0x1f, 0x924},
+	{ 0xa18c, 4, 0x3, 0x924},
+	{ 0xa19c, 3, 0x1f, 0x924},
+	{ 0xa1a8, 1, 0x1f, 0x1fff},
+	{ 0xa1ac, 3, 0x1f, 0x924},
+	{ 0xa1b8, 1, 0x1f, 0x1fff},
+	{ 0xa1bc, 54, 0x1f, 0x924},
+	{ 0xa294, 2, 0x3, 0x924},
+	{ 0xa29c, 2, 0x1f, 0x924},
+	{ 0xa2a4, 2, 0x7, 0x924},
+	{ 0xa2ac, 2, 0x1f, 0x924},
+	{ 0xa2b4, 1, 0x1f, 0x1fff},
+	{ 0xa2b8, 49, 0x1f, 0x924},
+	{ 0xa38c, 2, 0x1f, 0x1fff},
+	{ 0xa398, 1, 0x1f, 0x1fff},
+	{ 0xa39c, 7, 0x1e, 0x924},
+	{ 0xa3b8, 2, 0x18, 0x924},
+	{ 0xa3c0, 1, 0x1e, 0x924},
+	{ 0xa3c4, 1, 0x1e, 0xfff},
+	{ 0xa3c8, 1, 0x1e, 0x924},
+	{ 0xa3d0, 1, 0x1e, 0x924},
+	{ 0xa3d8, 1, 0x1e, 0x924},
+	{ 0xa3e0, 1, 0x1e, 0x924},
+	{ 0xa3e8, 1, 0x1e, 0x924},
+	{ 0xa3f0, 1, 0x1e, 0x924},
+	{ 0xa3f8, 1, 0x1e, 0x924},
+	{ 0xa400, 1, 0x1f, 0x924},
+	{ 0xa404, 1, 0x1f, 0xfff},
+	{ 0xa408, 2, 0x1f, 0x1fff},
+	{ 0xa410, 7, 0x1f, 0x924},
+	{ 0xa42c, 12, 0x1f, 0xfff},
+	{ 0xa45c, 1, 0x1f, 0x924},
+	{ 0xa460, 1, 0x1f, 0x1924},
+	{ 0xa464, 15, 0x1f, 0x924},
+	{ 0xa4a0, 1, 0x7, 0x924},
+	{ 0xa4a4, 2, 0x1f, 0x924},
+	{ 0xa4ac, 2, 0x3, 0x924},
+	{ 0xa4b4, 1, 0x7, 0x924},
+	{ 0xa4b8, 2, 0x3, 0x924},
+	{ 0xa4c0, 3, 0x1f, 0x924},
+	{ 0xa4cc, 5, 0x3, 0x924},
+	{ 0xa4e0, 3, 0x1f, 0x924},
+	{ 0xa4fc, 2, 0x1f, 0x924},
+	{ 0xa504, 1, 0x3, 0x924},
+	{ 0xa508, 3, 0x1f, 0x924},
+	{ 0xa518, 1, 0x1f, 0x924},
+	{ 0xa520, 1, 0x1f, 0x924},
+	{ 0xa528, 1, 0x1f, 0x924},
+	{ 0xa530, 1, 0x1f, 0x924},
+	{ 0xa538, 1, 0x1f, 0x924},
+	{ 0xa540, 1, 0x1f, 0x924},
+	{ 0xa548, 1, 0x3, 0x924},
+	{ 0xa550, 1, 0x3, 0x924},
+	{ 0xa558, 1, 0x3, 0x924},
+	{ 0xa560, 1, 0x3, 0x924},
+	{ 0xa568, 1, 0x3, 0x924},
+	{ 0xa570, 1, 0x1f, 0x924},
+	{ 0xa580, 1, 0x1f, 0x1fff},
+	{ 0xa590, 1, 0x1f, 0x1fff},
+	{ 0xa5a0, 1, 0x7, 0x924},
+	{ 0xa5c0, 1, 0x1f, 0x924},
+	{ 0xa5e0, 1, 0x1e, 0x924},
+	{ 0xa5e8, 1, 0x1e, 0x924},
+	{ 0xa5f0, 1, 0x1e, 0x924},
+	{ 0xa5f8, 1, 0x6, 0x924},
+	{ 0xa5fc, 1, 0x1e, 0x924},
+	{ 0xa600, 5, 0x1e, 0xfff},
+	{ 0xa614, 1, 0x1e, 0x924},
+	{ 0xa618, 1, 0x1e, 0xfff},
+	{ 0xa61c, 1, 0x1e, 0x924},
+	{ 0xa620, 6, 0x1c, 0x924},
+	{ 0xa638, 20, 0x4, 0x924},
+	{ 0xa688, 35, 0x1c, 0x924},
+	{ 0xa714, 1, 0x1c, 0xfff},
+	{ 0xa718, 2, 0x1c, 0x924},
+	{ 0xa720, 1, 0x1c, 0xfff},
+	{ 0xa724, 3, 0x1c, 0x924},
+	{ 0xa730, 1, 0x4, 0x924},
+	{ 0xa734, 2, 0x1c, 0x924},
+	{ 0xa73c, 4, 0x4, 0x924},
+	{ 0xa74c, 1, 0x1c, 0x924},
+	{ 0xa750, 1, 0x1c, 0xfff},
+	{ 0xa754, 3, 0x1c, 0x924},
+	{ 0xa760, 5, 0x4, 0x924},
+	{ 0xa774, 7, 0x1c, 0x924},
+	{ 0xa790, 15, 0x4, 0x924},
+	{ 0xa7cc, 4, 0x1c, 0x924},
+	{ 0xa7e0, 6, 0x18, 0x924},
+	{ 0xa800, 18, 0x4, 0x924},
+	{ 0xa848, 33, 0x1c, 0x924},
+	{ 0xa8cc, 2, 0x18, 0x924},
+	{ 0xa8d4, 4, 0x1c, 0x924},
+	{ 0xa8e4, 1, 0x18, 0x924},
+	{ 0xa8e8, 1, 0x1c, 0x924},
+	{ 0xa8f0, 1, 0x1c, 0x924},
+	{ 0xa8f8, 30, 0x18, 0x924},
+	{ 0xa974, 73, 0x18, 0x924},
+	{ 0xac30, 1, 0x18, 0x924},
+	{ 0xac40, 1, 0x18, 0x924},
+	{ 0xac50, 1, 0x18, 0x924},
+	{ 0xac60, 1, 0x10, 0x924},
+	{ 0x10000, 9, 0x1f, 0x924},
+	{ 0x10024, 1, 0x7, 0x924},
+	{ 0x10028, 5, 0x1f, 0x924},
+	{ 0x1003c, 6, 0x7, 0x924},
+	{ 0x10054, 20, 0x1f, 0x924},
+	{ 0x100a4, 4, 0x7, 0x924},
+	{ 0x100b4, 11, 0x1f, 0x924},
+	{ 0x100e0, 4, 0x7, 0x924},
+	{ 0x100f0, 8, 0x1f, 0x924},
+	{ 0x10110, 6, 0x7, 0x924},
+	{ 0x10128, 110, 0x1f, 0x924},
+	{ 0x102e0, 4, 0x7, 0x924},
+	{ 0x102f0, 18, 0x1f, 0x924},
+	{ 0x10338, 20, 0x7, 0x924},
+	{ 0x10388, 10, 0x1f, 0x924},
+	{ 0x103d0, 2, 0x3, 0x1fff},
+	{ 0x103dc, 1, 0x3, 0x1fff},
+	{ 0x10400, 6, 0x7, 0x924},
+	{ 0x10418, 1, 0x1f, 0xfff},
+	{ 0x1041c, 1, 0x1f, 0x924},
+	{ 0x10420, 1, 0x1f, 0xfff},
+	{ 0x10424, 1, 0x1f, 0x924},
+	{ 0x10428, 1, 0x1f, 0xfff},
+	{ 0x1042c, 1, 0x1f, 0x924},
+	{ 0x10430, 10, 0x7, 0x924},
+	{ 0x10458, 2, 0x1f, 0x924},
+	{ 0x10460, 1, 0x1f, 0xfff},
+	{ 0x10464, 4, 0x1f, 0x924},
+	{ 0x10474, 1, 0x1f, 0xfff},
+	{ 0x10478, 14, 0x1f, 0x924},
+	{ 0x104b0, 12, 0x7, 0x924},
+	{ 0x104e0, 1, 0x1f, 0xfff},
+	{ 0x104e8, 1, 0x1f, 0x924},
+	{ 0x104ec, 1, 0x1f, 0xfff},
+	{ 0x104f4, 1, 0x1f, 0x924},
+	{ 0x104f8, 1, 0x1f, 0xfff},
+	{ 0x10500, 2, 0x1f, 0x924},
+	{ 0x10508, 1, 0x1f, 0xfff},
+	{ 0x1050c, 9, 0x1f, 0x924},
+	{ 0x10530, 1, 0x1f, 0xfff},
+	{ 0x10534, 1, 0x1f, 0x924},
+	{ 0x10538, 1, 0x1f, 0xfff},
+	{ 0x1053c, 3, 0x1f, 0x924},
+	{ 0x10548, 1, 0x1f, 0xfff},
+	{ 0x1054c, 3, 0x1f, 0x924},
+	{ 0x10558, 1, 0x1f, 0xfff},
+	{ 0x1055c, 123, 0x1f, 0x924},
+	{ 0x10750, 2, 0x7, 0x924},
+	{ 0x10760, 2, 0x7, 0x924},
+	{ 0x10770, 2, 0x7, 0x924},
+	{ 0x10780, 2, 0x7, 0x924},
+	{ 0x10790, 2, 0x1f, 0x924},
+	{ 0x107a0, 2, 0x7, 0x924},
+	{ 0x107b0, 2, 0x7, 0x924},
+	{ 0x107c0, 2, 0x7, 0x924},
+	{ 0x107d0, 2, 0x7, 0x924},
+	{ 0x107e0, 2, 0x1f, 0x924},
+	{ 0x10880, 2, 0x1f, 0x924},
+	{ 0x10900, 2, 0x1f, 0x924},
+	{ 0x16000, 1, 0x6, 0x924},
+	{ 0x16004, 25, 0x1e, 0x924},
+	{ 0x16070, 8, 0x1e, 0x924},
+	{ 0x16090, 4, 0xe, 0x924},
+	{ 0x160a0, 6, 0x1e, 0x924},
+	{ 0x160c0, 7, 0x1e, 0x924},
+	{ 0x160dc, 2, 0x6, 0x924},
+	{ 0x160e4, 6, 0x1e, 0x924},
+	{ 0x160fc, 4, 0x1e, 0x1fff},
+	{ 0x1610c, 2, 0x6, 0x924},
+	{ 0x16114, 6, 0x1e, 0x924},
+	{ 0x16140, 48, 0x1e, 0x1fff},
+	{ 0x16204, 5, 0x1e, 0x924},
+	{ 0x18000, 1, 0x1e, 0x924},
+	{ 0x18008, 1, 0x1e, 0x924},
+	{ 0x18010, 35, 0x1c, 0x924},
+	{ 0x180a4, 2, 0x1c, 0x924},
+	{ 0x180c0, 9, 0x1c, 0x924},
+	{ 0x180e4, 1, 0xc, 0x924},
+	{ 0x180e8, 2, 0x1c, 0x924},
+	{ 0x180f0, 1, 0xc, 0x924},
+	{ 0x180f4, 79, 0x1c, 0x924},
+	{ 0x18230, 1, 0xc, 0x924},
+	{ 0x18234, 2, 0x1c, 0x924},
+	{ 0x1823c, 1, 0xc, 0x924},
+	{ 0x18240, 13, 0x1c, 0x924},
+	{ 0x18274, 1, 0x4, 0x924},
+	{ 0x18278, 12, 0x1c, 0x924},
+	{ 0x182a8, 1, 0x1c, 0xfff},
+	{ 0x182ac, 3, 0x1c, 0x924},
+	{ 0x182b8, 1, 0x1c, 0xfff},
+	{ 0x182bc, 19, 0x1c, 0x924},
+	{ 0x18308, 1, 0x1c, 0xfff},
+	{ 0x1830c, 3, 0x1c, 0x924},
+	{ 0x18318, 1, 0x1c, 0xfff},
+	{ 0x1831c, 7, 0x1c, 0x924},
+	{ 0x18338, 1, 0x1c, 0xfff},
+	{ 0x1833c, 3, 0x1c, 0x924},
+	{ 0x18348, 1, 0x1c, 0xfff},
+	{ 0x1834c, 28, 0x1c, 0x924},
+	{ 0x183bc, 2, 0x1c, 0x1fff},
+	{ 0x183c8, 3, 0x1c, 0x1fff},
+	{ 0x183d8, 1, 0x1c, 0x1fff},
+	{ 0x18440, 48, 0x1c, 0x1fff},
+	{ 0x18500, 15, 0x1c, 0x924},
+	{ 0x18570, 1, 0x18, 0xfff},
+	{ 0x18574, 1, 0x18, 0x924},
+	{ 0x18578, 1, 0x18, 0xfff},
+	{ 0x1857c, 4, 0x18, 0x924},
+	{ 0x1858c, 1, 0x18, 0xfff},
+	{ 0x18590, 1, 0x18, 0x924},
+	{ 0x18594, 1, 0x18, 0xfff},
+	{ 0x18598, 32, 0x18, 0x924},
+	{ 0x18618, 5, 0x10, 0x924},
+	{ 0x1862c, 4, 0x10, 0xfff},
+	{ 0x1863c, 16, 0x10, 0x924},
+	{ 0x18680, 44, 0x10, 0x924},
+	{ 0x18748, 12, 0x10, 0x924},
+	{ 0x18788, 1, 0x10, 0x924},
+	{ 0x1879c, 6, 0x10, 0x924},
+	{ 0x187c4, 51, 0x10, 0x924},
+	{ 0x18a00, 48, 0x10, 0x924},
+	{ 0x20000, 24, 0x1f, 0x924},
+	{ 0x20060, 8, 0x1f, 0x9e4},
+	{ 0x20080, 94, 0x1f, 0x924},
+	{ 0x201f8, 1, 0x3, 0x924},
+	{ 0x201fc, 1, 0x1f, 0x924},
+	{ 0x20200, 1, 0x3, 0x924},
+	{ 0x20204, 1, 0x1f, 0x924},
+	{ 0x20208, 1, 0x3, 0x924},
+	{ 0x2020c, 4, 0x1f, 0x924},
+	{ 0x2021c, 11, 0x1f, 0xfff},
+	{ 0x20248, 24, 0x1f, 0x924},
+	{ 0x202b8, 2, 0x1f, 0x1fff},
+	{ 0x202c4, 1, 0x1f, 0x1fff},
+	{ 0x202c8, 1, 0x1c, 0x924},
+	{ 0x202d8, 4, 0x1c, 0x924},
+	{ 0x202f0, 1, 0x10, 0x924},
+	{ 0x20400, 1, 0x1f, 0x924},
+	{ 0x20404, 1, 0x1f, 0xfff},
+	{ 0x2040c, 2, 0x1f, 0xfff},
+	{ 0x20414, 2, 0x1f, 0x924},
+	{ 0x2041c, 2, 0x1f, 0xfff},
+	{ 0x20424, 2, 0x1f, 0x924},
+	{ 0x2042c, 18, 0x1e, 0x924},
+	{ 0x20480, 1, 0x1f, 0x924},
+	{ 0x20500, 1, 0x1f, 0x924},
+	{ 0x20600, 1, 0x1f, 0x924},
+	{ 0x28000, 1, 0x1f, 0x9e4},
+	{ 0x28004, 255, 0x1f, 0x180},
+	{ 0x28400, 1, 0x1f, 0x1c0},
+	{ 0x28404, 255, 0x1f, 0x180},
+	{ 0x28800, 1, 0x1f, 0x1c0},
+	{ 0x28804, 255, 0x1f, 0x180},
+	{ 0x28c00, 1, 0x1f, 0x1c0},
+	{ 0x28c04, 255, 0x1f, 0x180},
+	{ 0x29000, 1, 0x1f, 0x1c0},
+	{ 0x29004, 255, 0x1f, 0x180},
+	{ 0x29400, 1, 0x1f, 0x1c0},
+	{ 0x29404, 255, 0x1f, 0x180},
+	{ 0x29800, 1, 0x1f, 0x1c0},
+	{ 0x29804, 255, 0x1f, 0x180},
+	{ 0x29c00, 1, 0x1f, 0x1c0},
+	{ 0x29c04, 255, 0x1f, 0x180},
+	{ 0x2a000, 1, 0x1f, 0x1c0},
+	{ 0x2a004, 255, 0x1f, 0x180},
+	{ 0x2a400, 1, 0x1f, 0x1c0},
+	{ 0x2a404, 255, 0x1f, 0x180},
+	{ 0x2a800, 1, 0x1f, 0x1c0},
+	{ 0x2a804, 255, 0x1f, 0x180},
+	{ 0x2ac00, 1, 0x1f, 0x1c0},
+	{ 0x2ac04, 255, 0x1f, 0x180},
+	{ 0x2b000, 1, 0x1f, 0x1c0},
+	{ 0x2b004, 255, 0x1f, 0x180},
+	{ 0x2b400, 1, 0x1f, 0x1c0},
+	{ 0x2b404, 255, 0x1f, 0x180},
+	{ 0x2b800, 1, 0x1f, 0x1c0},
+	{ 0x2b804, 255, 0x1f, 0x180},
+	{ 0x2bc00, 1, 0x1f, 0x1c0},
+	{ 0x2bc04, 255, 0x1f, 0x180},
+	{ 0x2c000, 1, 0x1f, 0x1c0},
+	{ 0x2c004, 255, 0x1f, 0x180},
+	{ 0x2c400, 1, 0x1f, 0x1c0},
+	{ 0x2c404, 255, 0x1f, 0x180},
+	{ 0x2c800, 1, 0x1f, 0x1c0},
+	{ 0x2c804, 255, 0x1f, 0x180},
+	{ 0x2cc00, 1, 0x1f, 0x1c0},
+	{ 0x2cc04, 255, 0x1f, 0x180},
+	{ 0x2d000, 1, 0x1f, 0x1c0},
+	{ 0x2d004, 255, 0x1f, 0x180},
+	{ 0x2d400, 1, 0x1f, 0x1c0},
+	{ 0x2d404, 255, 0x1f, 0x180},
+	{ 0x2d800, 1, 0x1f, 0x1c0},
+	{ 0x2d804, 255, 0x1f, 0x180},
+	{ 0x2dc00, 1, 0x1f, 0x1c0},
+	{ 0x2dc04, 255, 0x1f, 0x180},
+	{ 0x2e000, 1, 0x1f, 0x1c0},
+	{ 0x2e004, 255, 0x1f, 0x180},
+	{ 0x2e400, 1, 0x1f, 0x1c0},
+	{ 0x2e404, 255, 0x1f, 0x180},
+	{ 0x2e800, 1, 0x1f, 0x1c0},
+	{ 0x2e804, 255, 0x1f, 0x180},
+	{ 0x2ec00, 1, 0x1f, 0x1c0},
+	{ 0x2ec04, 255, 0x1f, 0x180},
+	{ 0x2f000, 1, 0x1f, 0x1c0},
+	{ 0x2f004, 255, 0x1f, 0x180},
+	{ 0x2f400, 1, 0x1f, 0x1c0},
+	{ 0x2f404, 255, 0x1f, 0x180},
+	{ 0x2f800, 1, 0x1f, 0x1c0},
+	{ 0x2f804, 255, 0x1f, 0x180},
+	{ 0x2fc00, 1, 0x1f, 0x1c0},
+	{ 0x2fc04, 255, 0x1f, 0x180},
+	{ 0x30000, 1, 0x1f, 0x9e4},
+	{ 0x30004, 255, 0x1f, 0x180},
+	{ 0x30400, 1, 0x1f, 0x1c0},
+	{ 0x30404, 255, 0x1f, 0x180},
+	{ 0x30800, 1, 0x1f, 0x1c0},
+	{ 0x30804, 255, 0x1f, 0x180},
+	{ 0x30c00, 1, 0x1f, 0x1c0},
+	{ 0x30c04, 255, 0x1f, 0x180},
+	{ 0x31000, 1, 0x1f, 0x1c0},
+	{ 0x31004, 255, 0x1f, 0x180},
+	{ 0x31400, 1, 0x1f, 0x1c0},
+	{ 0x31404, 255, 0x1f, 0x180},
+	{ 0x31800, 1, 0x1f, 0x1c0},
+	{ 0x31804, 255, 0x1f, 0x180},
+	{ 0x31c00, 1, 0x1f, 0x1c0},
+	{ 0x31c04, 255, 0x1f, 0x180},
+	{ 0x32000, 1, 0x1f, 0x1c0},
+	{ 0x32004, 255, 0x1f, 0x180},
+	{ 0x32400, 1, 0x1f, 0x1c0},
+	{ 0x32404, 255, 0x1f, 0x180},
+	{ 0x32800, 1, 0x1f, 0x1c0},
+	{ 0x32804, 255, 0x1f, 0x180},
+	{ 0x32c00, 1, 0x1f, 0x1c0},
+	{ 0x32c04, 255, 0x1f, 0x180},
+	{ 0x33000, 1, 0x1f, 0x1c0},
+	{ 0x33004, 255, 0x1f, 0x180},
+	{ 0x33400, 1, 0x1f, 0x1c0},
+	{ 0x33404, 255, 0x1f, 0x180},
+	{ 0x33800, 1, 0x1f, 0x1c0},
+	{ 0x33804, 255, 0x1f, 0x180},
+	{ 0x33c00, 1, 0x1f, 0x1c0},
+	{ 0x33c04, 255, 0x1f, 0x180},
+	{ 0x34000, 1, 0x1f, 0x1c0},
+	{ 0x34004, 255, 0x1f, 0x180},
+	{ 0x34400, 1, 0x1f, 0x1c0},
+	{ 0x34404, 255, 0x1f, 0x180},
+	{ 0x34800, 1, 0x1f, 0x1c0},
+	{ 0x34804, 255, 0x1f, 0x180},
+	{ 0x34c00, 1, 0x1f, 0x1c0},
+	{ 0x34c04, 255, 0x1f, 0x180},
+	{ 0x35000, 1, 0x1f, 0x1c0},
+	{ 0x35004, 255, 0x1f, 0x180},
+	{ 0x35400, 1, 0x1f, 0x1c0},
+	{ 0x35404, 255, 0x1f, 0x180},
+	{ 0x35800, 1, 0x1f, 0x1c0},
+	{ 0x35804, 255, 0x1f, 0x180},
+	{ 0x35c00, 1, 0x1f, 0x1c0},
+	{ 0x35c04, 255, 0x1f, 0x180},
+	{ 0x36000, 1, 0x1f, 0x1c0},
+	{ 0x36004, 255, 0x1f, 0x180},
+	{ 0x36400, 1, 0x1f, 0x1c0},
+	{ 0x36404, 255, 0x1f, 0x180},
+	{ 0x36800, 1, 0x1f, 0x1c0},
+	{ 0x36804, 255, 0x1f, 0x180},
+	{ 0x36c00, 1, 0x1f, 0x1c0},
+	{ 0x36c04, 255, 0x1f, 0x180},
+	{ 0x37000, 1, 0x1f, 0x1c0},
+	{ 0x37004, 255, 0x1f, 0x180},
+	{ 0x37400, 1, 0x1f, 0x1c0},
+	{ 0x37404, 255, 0x1f, 0x180},
+	{ 0x37800, 1, 0x1f, 0x1c0},
+	{ 0x37804, 255, 0x1f, 0x180},
+	{ 0x37c00, 1, 0x1f, 0x1c0},
+	{ 0x37c04, 255, 0x1f, 0x180},
+	{ 0x38000, 1, 0x1f, 0x1c0},
+	{ 0x38004, 255, 0x1f, 0x180},
+	{ 0x38400, 1, 0x1f, 0x1c0},
+	{ 0x38404, 255, 0x1f, 0x180},
+	{ 0x38800, 1, 0x1f, 0x1c0},
+	{ 0x38804, 255, 0x1f, 0x180},
+	{ 0x38c00, 1, 0x1f, 0x1c0},
+	{ 0x38c04, 255, 0x1f, 0x180},
+	{ 0x39000, 1, 0x1f, 0x1c0},
+	{ 0x39004, 255, 0x1f, 0x180},
+	{ 0x39400, 1, 0x1f, 0x1c0},
+	{ 0x39404, 255, 0x1f, 0x180},
+	{ 0x39800, 1, 0x1f, 0x1c0},
+	{ 0x39804, 255, 0x1f, 0x180},
+	{ 0x39c00, 1, 0x1f, 0x1c0},
+	{ 0x39c04, 255, 0x1f, 0x180},
+	{ 0x3a000, 1, 0x1f, 0x1c0},
+	{ 0x3a004, 255, 0x1f, 0x180},
+	{ 0x3a400, 1, 0x1f, 0x1c0},
+	{ 0x3a404, 255, 0x1f, 0x180},
+	{ 0x3a800, 1, 0x1f, 0x1c0},
+	{ 0x3a804, 255, 0x1f, 0x180},
+	{ 0x3ac00, 1, 0x1f, 0x1c0},
+	{ 0x3ac04, 255, 0x1f, 0x180},
+	{ 0x3b000, 1, 0x1f, 0x1c0},
+	{ 0x3b004, 255, 0x1f, 0x180},
+	{ 0x3b400, 1, 0x1f, 0x1c0},
+	{ 0x3b404, 255, 0x1f, 0x180},
+	{ 0x3b800, 1, 0x1f, 0x1c0},
+	{ 0x3b804, 255, 0x1f, 0x180},
+	{ 0x3bc00, 1, 0x1f, 0x1c0},
+	{ 0x3bc04, 255, 0x1f, 0x180},
+	{ 0x3c000, 1, 0x1f, 0x1c0},
+	{ 0x3c004, 255, 0x1f, 0x180},
+	{ 0x3c400, 1, 0x1f, 0x1c0},
+	{ 0x3c404, 255, 0x1f, 0x180},
+	{ 0x3c800, 1, 0x1f, 0x1c0},
+	{ 0x3c804, 255, 0x1f, 0x180},
+	{ 0x3cc00, 1, 0x1f, 0x1c0},
+	{ 0x3cc04, 255, 0x1f, 0x180},
+	{ 0x3d000, 1, 0x1f, 0x1c0},
+	{ 0x3d004, 255, 0x1f, 0x180},
+	{ 0x3d400, 1, 0x1f, 0x1c0},
+	{ 0x3d404, 255, 0x1f, 0x180},
+	{ 0x3d800, 1, 0x1f, 0x1c0},
+	{ 0x3d804, 255, 0x1f, 0x180},
+	{ 0x3dc00, 1, 0x1f, 0x1c0},
+	{ 0x3dc04, 255, 0x1f, 0x180},
+	{ 0x3e000, 1, 0x1f, 0x1c0},
+	{ 0x3e004, 255, 0x1f, 0x180},
+	{ 0x3e400, 1, 0x1f, 0x1c0},
+	{ 0x3e404, 255, 0x1f, 0x180},
+	{ 0x3e800, 1, 0x1f, 0x1c0},
+	{ 0x3e804, 255, 0x1f, 0x180},
+	{ 0x3ec00, 1, 0x1f, 0x1c0},
+	{ 0x3ec04, 255, 0x1f, 0x180},
+	{ 0x3f000, 1, 0x1f, 0x1c0},
+	{ 0x3f004, 255, 0x1f, 0x180},
+	{ 0x3f400, 1, 0x1f, 0x1c0},
+	{ 0x3f404, 255, 0x1f, 0x180},
+	{ 0x3f800, 1, 0x1f, 0x1c0},
+	{ 0x3f804, 255, 0x1f, 0x180},
+	{ 0x3fc00, 1, 0x1f, 0x1c0},
+	{ 0x3fc04, 255, 0x1f, 0x180},
+	{ 0x40000, 85, 0x1f, 0x924},
+	{ 0x40154, 13, 0x1f, 0xfff},
+	{ 0x40198, 2, 0x1f, 0x1fff},
+	{ 0x401a4, 1, 0x1f, 0x1fff},
+	{ 0x401a8, 8, 0x1e, 0x924},
+	{ 0x401c8, 1, 0x2, 0x924},
+	{ 0x401cc, 2, 0x1e, 0x924},
+	{ 0x401d4, 2, 0x1c, 0x924},
+	{ 0x40200, 4, 0x1f, 0x924},
+	{ 0x40220, 6, 0x1c, 0x924},
+	{ 0x40238, 8, 0xc, 0x924},
+	{ 0x40258, 4, 0x1c, 0x924},
+	{ 0x40268, 2, 0x18, 0x924},
+	{ 0x40270, 17, 0x10, 0x924},
+	{ 0x40400, 43, 0x1f, 0x924},
+	{ 0x404bc, 2, 0x1f, 0x1fff},
+	{ 0x404c8, 1, 0x1f, 0x1fff},
+	{ 0x404cc, 3, 0x1e, 0x924},
+	{ 0x404e0, 1, 0x1c, 0x924},
+	{ 0x40500, 2, 0x1f, 0x924},
+	{ 0x40510, 2, 0x1f, 0x924},
+	{ 0x40520, 2, 0x1f, 0x924},
+	{ 0x40530, 2, 0x1f, 0x924},
+	{ 0x40540, 2, 0x1f, 0x924},
+	{ 0x40550, 10, 0x1c, 0x924},
+	{ 0x40610, 2, 0x1c, 0x924},
+	{ 0x42000, 164, 0x1f, 0x924},
+	{ 0x422b0, 2, 0x1f, 0x1fff},
+	{ 0x422bc, 1, 0x1f, 0x1fff},
+	{ 0x422c0, 4, 0x1c, 0x924},
+	{ 0x422d4, 5, 0x1e, 0x924},
+	{ 0x422e8, 1, 0x1c, 0x924},
+	{ 0x42400, 49, 0x1f, 0x924},
+	{ 0x424c8, 32, 0x1f, 0x924},
+	{ 0x42548, 1, 0x1f, 0xfff},
+	{ 0x4254c, 1, 0x1f, 0x924},
+	{ 0x42550, 1, 0x1f, 0xfff},
+	{ 0x42554, 1, 0x1f, 0x924},
+	{ 0x42558, 1, 0x1f, 0xfff},
+	{ 0x4255c, 1, 0x1f, 0x924},
+	{ 0x42568, 2, 0x1f, 0x924},
+	{ 0x42640, 5, 0x1c, 0x924},
+	{ 0x42800, 1, 0x1f, 0x924},
+	{ 0x50000, 1, 0x1f, 0x1fff},
+	{ 0x50004, 19, 0x1f, 0x924},
+	{ 0x50050, 8, 0x1f, 0x93c},
+	{ 0x50070, 60, 0x1f, 0x924},
+	{ 0x50160, 8, 0x1f, 0xfff},
+	{ 0x50180, 20, 0x1f, 0x924},
+	{ 0x501e0, 2, 0x1f, 0x1fff},
+	{ 0x501ec, 1, 0x1f, 0x1fff},
+	{ 0x501f0, 4, 0x1e, 0x924},
+	{ 0x50200, 1, 0x1f, 0x924},
+	{ 0x50204, 1, 0x1f, 0xfff},
+	{ 0x5020c, 2, 0x1f, 0xfff},
+	{ 0x50214, 2, 0x1f, 0x924},
+	{ 0x5021c, 1, 0x1f, 0xfff},
+	{ 0x50220, 2, 0x1f, 0x924},
+	{ 0x50228, 6, 0x1e, 0x924},
+	{ 0x50240, 1, 0x1f, 0x924},
+	{ 0x50280, 1, 0x1f, 0x924},
+	{ 0x50300, 1, 0x1c, 0x924},
+	{ 0x5030c, 1, 0x1c, 0x924},
+	{ 0x50318, 1, 0x1c, 0x934},
+	{ 0x5031c, 1, 0x1c, 0x924},
+	{ 0x50320, 2, 0x1c, 0x934},
+	{ 0x50330, 1, 0x10, 0x924},
+	{ 0x52000, 1, 0x1f, 0x924},
+	{ 0x54000, 1, 0x1f, 0x93c},
+	{ 0x54004, 255, 0x1f, 0x30},
+	{ 0x54400, 1, 0x1f, 0x38},
+	{ 0x54404, 255, 0x1f, 0x30},
+	{ 0x54800, 1, 0x1f, 0x38},
+	{ 0x54804, 255, 0x1f, 0x30},
+	{ 0x54c00, 1, 0x1f, 0x38},
+	{ 0x54c04, 255, 0x1f, 0x30},
+	{ 0x55000, 1, 0x1f, 0x38},
+	{ 0x55004, 255, 0x1f, 0x30},
+	{ 0x55400, 1, 0x1f, 0x38},
+	{ 0x55404, 255, 0x1f, 0x30},
+	{ 0x55800, 1, 0x1f, 0x38},
+	{ 0x55804, 255, 0x1f, 0x30},
+	{ 0x55c00, 1, 0x1f, 0x38},
+	{ 0x55c04, 255, 0x1f, 0x30},
+	{ 0x56000, 1, 0x1f, 0x38},
+	{ 0x56004, 255, 0x1f, 0x30},
+	{ 0x56400, 1, 0x1f, 0x38},
+	{ 0x56404, 255, 0x1f, 0x30},
+	{ 0x56800, 1, 0x1f, 0x38},
+	{ 0x56804, 255, 0x1f, 0x30},
+	{ 0x56c00, 1, 0x1f, 0x38},
+	{ 0x56c04, 255, 0x1f, 0x30},
+	{ 0x57000, 1, 0x1f, 0x38},
+	{ 0x57004, 255, 0x1f, 0x30},
+	{ 0x58000, 1, 0x1f, 0x934},
+	{ 0x58004, 8191, 0x3, 0x30},
+	{ 0x60000, 26, 0x1f, 0x924},
+	{ 0x60068, 8, 0x3, 0x924},
+	{ 0x60088, 2, 0x1f, 0x924},
+	{ 0x60090, 1, 0x1f, 0xfff},
+	{ 0x60094, 9, 0x1f, 0x924},
+	{ 0x600b8, 9, 0x3, 0x924},
+	{ 0x600dc, 1, 0x1f, 0x924},
+	{ 0x600e0, 5, 0x3, 0x924},
+	{ 0x600f4, 1, 0x7, 0x924},
+	{ 0x600f8, 1, 0x3, 0x924},
+	{ 0x600fc, 8, 0x1f, 0x924},
+	{ 0x6012c, 2, 0x1f, 0x1fff},
+	{ 0x60138, 1, 0x1f, 0x1fff},
+	{ 0x6013c, 24, 0x2, 0x924},
+	{ 0x6019c, 2, 0x1c, 0x924},
+	{ 0x601ac, 18, 0x1c, 0x924},
+	{ 0x60200, 1, 0x1f, 0xb6d},
+	{ 0x60204, 2, 0x1f, 0x249},
+	{ 0x60210, 13, 0x1c, 0x924},
+	{ 0x60244, 16, 0x10, 0x924},
+	{ 0x61000, 1, 0x1f, 0xb6d},
+	{ 0x61004, 511, 0x1f, 0x249},
+	{ 0x61800, 512, 0x18, 0x249},
+	{ 0x70000, 8, 0x1f, 0xb6d},
+	{ 0x70020, 8184, 0x1f, 0x249},
+	{ 0x78000, 8192, 0x18, 0x249},
+	{ 0x85000, 3, 0x1f, 0x1000},
+	{ 0x8501c, 7, 0x1f, 0x1000},
+	{ 0x85048, 1, 0x1f, 0x1000},
+	{ 0x85200, 32, 0x1f, 0x1000},
+	{ 0xa0000, 16384, 0x3, 0x1000},
+	{ 0xb0000, 16384, 0x2, 0x1000},
+	{ 0xc1000, 7, 0x1f, 0x924},
+	{ 0xc102c, 2, 0x1f, 0x1fff},
+	{ 0xc1038, 1, 0x1f, 0x1fff},
+	{ 0xc103c, 2, 0x1c, 0x924},
+	{ 0xc1800, 2, 0x1f, 0x924},
+	{ 0xc2000, 164, 0x1f, 0x924},
+	{ 0xc22b0, 2, 0x1f, 0x1fff},
+	{ 0xc22bc, 1, 0x1f, 0x1fff},
+	{ 0xc22c0, 5, 0x1c, 0x924},
+	{ 0xc22d8, 4, 0x1c, 0x924},
+	{ 0xc2400, 49, 0x1f, 0x924},
+	{ 0xc24c8, 32, 0x1f, 0x924},
+	{ 0xc2548, 1, 0x1f, 0xfff},
+	{ 0xc254c, 1, 0x1f, 0x924},
+	{ 0xc2550, 1, 0x1f, 0xfff},
+	{ 0xc2554, 1, 0x1f, 0x924},
+	{ 0xc2558, 1, 0x1f, 0xfff},
+	{ 0xc255c, 1, 0x1f, 0x924},
+	{ 0xc2568, 2, 0x1f, 0x924},
+	{ 0xc2600, 1, 0x1f, 0x924},
+	{ 0xc4000, 165, 0x1f, 0x924},
+	{ 0xc42b4, 2, 0x1f, 0x1fff},
+	{ 0xc42c0, 1, 0x1f, 0x1fff},
+	{ 0xc42d8, 2, 0x1c, 0x924},
+	{ 0xc42e0, 7, 0x1e, 0x924},
+	{ 0xc42fc, 1, 0x1c, 0x924},
+	{ 0xc4400, 51, 0x1f, 0x924},
+	{ 0xc44d0, 32, 0x1f, 0x924},
+	{ 0xc4550, 1, 0x1f, 0xfff},
+	{ 0xc4554, 1, 0x1f, 0x924},
+	{ 0xc4558, 1, 0x1f, 0xfff},
+	{ 0xc455c, 1, 0x1f, 0x924},
+	{ 0xc4560, 1, 0x1f, 0xfff},
+	{ 0xc4564, 1, 0x1f, 0x924},
+	{ 0xc4570, 2, 0x1f, 0x924},
+	{ 0xc4578, 5, 0x1c, 0x924},
+	{ 0xc4600, 1, 0x1f, 0x924},
+	{ 0xd0000, 19, 0x1f, 0x924},
+	{ 0xd004c, 8, 0x1f, 0x1927},
+	{ 0xd006c, 64, 0x1f, 0x924},
+	{ 0xd016c, 8, 0x1f, 0xfff},
+	{ 0xd018c, 19, 0x1f, 0x924},
+	{ 0xd01e8, 2, 0x1f, 0x1fff},
+	{ 0xd01f4, 1, 0x1f, 0x1fff},
+	{ 0xd01fc, 1, 0x1c, 0x924},
+	{ 0xd0200, 1, 0x1f, 0x924},
+	{ 0xd0204, 1, 0x1f, 0xfff},
+	{ 0xd020c, 3, 0x1f, 0xfff},
+	{ 0xd0218, 4, 0x1f, 0x924},
+	{ 0xd0228, 18, 0x1e, 0x924},
+	{ 0xd0280, 1, 0x1f, 0x924},
+	{ 0xd0300, 1, 0x1f, 0x924},
+	{ 0xd0400, 1, 0x1f, 0x924},
+	{ 0xd0818, 1, 0x10, 0x924},
+	{ 0xd4000, 1, 0x1f, 0x1927},
+	{ 0xd4004, 255, 0x1f, 0x6},
+	{ 0xd4400, 1, 0x1f, 0x1007},
+	{ 0xd4404, 255, 0x1f, 0x6},
+	{ 0xd4800, 1, 0x1f, 0x1007},
+	{ 0xd4804, 255, 0x1f, 0x6},
+	{ 0xd4c00, 1, 0x1f, 0x1007},
+	{ 0xd4c04, 255, 0x1f, 0x6},
+	{ 0xd5000, 1, 0x1f, 0x1007},
+	{ 0xd5004, 255, 0x1f, 0x6},
+	{ 0xd5400, 1, 0x1f, 0x1007},
+	{ 0xd5404, 255, 0x1f, 0x6},
+	{ 0xd5800, 1, 0x1f, 0x1007},
+	{ 0xd5804, 255, 0x1f, 0x6},
+	{ 0xd5c00, 1, 0x1f, 0x1007},
+	{ 0xd5c04, 255, 0x1f, 0x6},
+	{ 0xd6000, 1, 0x1f, 0x1007},
+	{ 0xd6004, 255, 0x1f, 0x6},
+	{ 0xd6400, 1, 0x1f, 0x1007},
+	{ 0xd6404, 255, 0x1f, 0x6},
+	{ 0xd8000, 1, 0x1f, 0x1927},
+	{ 0xd8004, 255, 0x1f, 0x6},
+	{ 0xd8400, 1, 0x1f, 0x1007},
+	{ 0xd8404, 255, 0x1f, 0x6},
+	{ 0xd8800, 1, 0x1f, 0x1007},
+	{ 0xd8804, 255, 0x1f, 0x6},
+	{ 0xd8c00, 1, 0x1f, 0x1007},
+	{ 0xd8c04, 255, 0x1f, 0x6},
+	{ 0xd9000, 1, 0x1f, 0x1007},
+	{ 0xd9004, 255, 0x1f, 0x6},
+	{ 0xd9400, 1, 0x1f, 0x1007},
+	{ 0xd9404, 255, 0x1f, 0x6},
+	{ 0xd9800, 1, 0x1f, 0x1007},
+	{ 0xd9804, 255, 0x1f, 0x6},
+	{ 0xd9c00, 1, 0x1f, 0x1007},
+	{ 0xd9c04, 255, 0x1f, 0x6},
+	{ 0xda000, 1, 0x1f, 0x1007},
+	{ 0xda004, 255, 0x1f, 0x6},
+	{ 0xda400, 1, 0x1f, 0x1007},
+	{ 0xda404, 255, 0x1f, 0x6},
+	{ 0xda800, 1, 0x1f, 0x1007},
+	{ 0xda804, 255, 0x1f, 0x6},
+	{ 0xdac00, 1, 0x1f, 0x1007},
+	{ 0xdac04, 255, 0x1f, 0x6},
+	{ 0xdb000, 1, 0x1f, 0x1007},
+	{ 0xdb004, 255, 0x1f, 0x6},
+	{ 0xdb400, 1, 0x1f, 0x1007},
+	{ 0xdb404, 255, 0x1f, 0x6},
+	{ 0xdb800, 1, 0x1f, 0x1007},
+	{ 0xdb804, 255, 0x1f, 0x6},
+	{ 0xdbc00, 1, 0x1f, 0x1007},
+	{ 0xdbc04, 255, 0x1f, 0x6},
+	{ 0xdc000, 1, 0x1f, 0x1007},
+	{ 0xdc004, 255, 0x1f, 0x6},
+	{ 0xdc400, 1, 0x1f, 0x1007},
+	{ 0xdc404, 255, 0x1f, 0x6},
+	{ 0xdc800, 1, 0x1f, 0x1007},
+	{ 0xdc804, 255, 0x1f, 0x6},
+	{ 0xdcc00, 1, 0x1f, 0x1007},
+	{ 0xdcc04, 255, 0x1f, 0x6},
+	{ 0xdd000, 1, 0x1f, 0x1007},
+	{ 0xdd004, 255, 0x1f, 0x6},
+	{ 0xdd400, 1, 0x1f, 0x1007},
+	{ 0xdd404, 255, 0x1f, 0x6},
+	{ 0xdd800, 1, 0x1f, 0x1007},
+	{ 0xdd804, 255, 0x1f, 0x6},
+	{ 0xddc00, 1, 0x1f, 0x1007},
+	{ 0xddc04, 255, 0x1f, 0x6},
+	{ 0xde000, 1, 0x1f, 0x1007},
+	{ 0xde004, 255, 0x1f, 0x6},
+	{ 0xde400, 1, 0x1f, 0x1007},
+	{ 0xde404, 255, 0x1f, 0x6},
+	{ 0xde800, 1, 0x1f, 0x1007},
+	{ 0xde804, 255, 0x1f, 0x6},
+	{ 0xdec00, 1, 0x1f, 0x1007},
+	{ 0xdec04, 255, 0x1f, 0x6},
+	{ 0xdf000, 1, 0x1f, 0x1007},
+	{ 0xdf004, 255, 0x1f, 0x6},
+	{ 0xdf400, 1, 0x1f, 0x1007},
+	{ 0xdf404, 255, 0x1f, 0x6},
+	{ 0xdf800, 1, 0x1f, 0x1007},
+	{ 0xdf804, 255, 0x1f, 0x6},
+	{ 0xdfc00, 1, 0x1f, 0x1007},
+	{ 0xdfc04, 255, 0x1f, 0x6},
+	{ 0xe0000, 21, 0x1f, 0x924},
+	{ 0xe0054, 8, 0x1f, 0xf24},
+	{ 0xe0074, 49, 0x1f, 0x924},
+	{ 0xe0138, 1, 0x3, 0x924},
+	{ 0xe013c, 6, 0x1f, 0x924},
+	{ 0xe0154, 8, 0x1f, 0xfff},
+	{ 0xe0174, 21, 0x1f, 0x924},
+	{ 0xe01d8, 2, 0x1f, 0x1fff},
+	{ 0xe01e4, 1, 0x1f, 0x1fff},
+	{ 0xe01f4, 1, 0x4, 0x924},
+	{ 0xe01f8, 1, 0x1c, 0x924},
+	{ 0xe0200, 1, 0x1f, 0x924},
+	{ 0xe0204, 1, 0x1f, 0xfff},
+	{ 0xe020c, 2, 0x1f, 0xfff},
+	{ 0xe0214, 2, 0x1f, 0x924},
+	{ 0xe021c, 2, 0x1f, 0xfff},
+	{ 0xe0224, 2, 0x1f, 0x924},
+	{ 0xe022c, 18, 0x1e, 0x924},
+	{ 0xe0280, 1, 0x1f, 0x924},
+	{ 0xe0300, 1, 0x1f, 0x924},
+	{ 0xe0400, 1, 0x10, 0x924},
+	{ 0xe1000, 1, 0x1f, 0x924},
+	{ 0xe2000, 1, 0x1f, 0xf24},
+	{ 0xe2004, 255, 0x1f, 0xc00},
+	{ 0xe2400, 1, 0x1f, 0xe00},
+	{ 0xe2404, 255, 0x1f, 0xc00},
+	{ 0xe2800, 1, 0x1f, 0xe00},
+	{ 0xe2804, 255, 0x1f, 0xc00},
+	{ 0xe2c00, 1, 0x1f, 0xe00},
+	{ 0xe2c04, 255, 0x1f, 0xc00},
+	{ 0xe3000, 1, 0x1f, 0xe00},
+	{ 0xe3004, 255, 0x1f, 0xc00},
+	{ 0xe3400, 1, 0x1f, 0xe00},
+	{ 0xe3404, 255, 0x1f, 0xc00},
+	{ 0xe3800, 1, 0x1f, 0xe00},
+	{ 0xe3804, 255, 0x1f, 0xc00},
+	{ 0xe3c00, 1, 0x1f, 0xe00},
+	{ 0xe3c04, 255, 0x1f, 0xc00},
+	{ 0xf0000, 1, 0x1f, 0xf24},
+	{ 0xf0004, 255, 0x1f, 0xc00},
+	{ 0xf0400, 1, 0x1f, 0xe00},
+	{ 0xf0404, 255, 0x1f, 0xc00},
+	{ 0xf0800, 1, 0x1f, 0xe00},
+	{ 0xf0804, 255, 0x1f, 0xc00},
+	{ 0xf0c00, 1, 0x1f, 0xe00},
+	{ 0xf0c04, 255, 0x1f, 0xc00},
+	{ 0xf1000, 1, 0x1f, 0xe00},
+	{ 0xf1004, 255, 0x1f, 0xc00},
+	{ 0xf1400, 1, 0x1f, 0xe00},
+	{ 0xf1404, 255, 0x1f, 0xc00},
+	{ 0xf1800, 1, 0x1f, 0xe00},
+	{ 0xf1804, 255, 0x1f, 0xc00},
+	{ 0xf1c00, 1, 0x1f, 0xe00},
+	{ 0xf1c04, 255, 0x1f, 0xc00},
+	{ 0xf2000, 1, 0x1f, 0xe00},
+	{ 0xf2004, 255, 0x1f, 0xc00},
+	{ 0xf2400, 1, 0x1f, 0xe00},
+	{ 0xf2404, 255, 0x1f, 0xc00},
+	{ 0xf2800, 1, 0x1f, 0xe00},
+	{ 0xf2804, 255, 0x1f, 0xc00},
+	{ 0xf2c00, 1, 0x1f, 0xe00},
+	{ 0xf2c04, 255, 0x1f, 0xc00},
+	{ 0xf3000, 1, 0x1f, 0xe00},
+	{ 0xf3004, 255, 0x1f, 0xc00},
+	{ 0xf3400, 1, 0x1f, 0xe00},
+	{ 0xf3404, 255, 0x1f, 0xc00},
+	{ 0xf3800, 1, 0x1f, 0xe00},
+	{ 0xf3804, 255, 0x1f, 0xc00},
+	{ 0xf3c00, 1, 0x1f, 0xe00},
+	{ 0xf3c04, 255, 0x1f, 0xc00},
+	{ 0xf4000, 1, 0x1f, 0xe00},
+	{ 0xf4004, 255, 0x1f, 0xc00},
+	{ 0xf4400, 1, 0x1f, 0xe00},
+	{ 0xf4404, 255, 0x1f, 0xc00},
+	{ 0xf4800, 1, 0x1f, 0xe00},
+	{ 0xf4804, 255, 0x1f, 0xc00},
+	{ 0xf4c00, 1, 0x1f, 0xe00},
+	{ 0xf4c04, 255, 0x1f, 0xc00},
+	{ 0xf5000, 1, 0x1f, 0xe00},
+	{ 0xf5004, 255, 0x1f, 0xc00},
+	{ 0xf5400, 1, 0x1f, 0xe00},
+	{ 0xf5404, 255, 0x1f, 0xc00},
+	{ 0xf5800, 1, 0x1f, 0xe00},
+	{ 0xf5804, 255, 0x1f, 0xc00},
+	{ 0xf5c00, 1, 0x1f, 0xe00},
+	{ 0xf5c04, 255, 0x1f, 0xc00},
+	{ 0xf6000, 1, 0x1f, 0xe00},
+	{ 0xf6004, 255, 0x1f, 0xc00},
+	{ 0xf6400, 1, 0x1f, 0xe00},
+	{ 0xf6404, 255, 0x1f, 0xc00},
+	{ 0xf6800, 1, 0x1f, 0xe00},
+	{ 0xf6804, 255, 0x1f, 0xc00},
+	{ 0xf6c00, 1, 0x1f, 0xe00},
+	{ 0xf6c04, 255, 0x1f, 0xc00},
+	{ 0xf7000, 1, 0x1f, 0xe00},
+	{ 0xf7004, 255, 0x1f, 0xc00},
+	{ 0xf7400, 1, 0x1f, 0xe00},
+	{ 0xf7404, 255, 0x1f, 0xc00},
+	{ 0xf7800, 1, 0x1f, 0xe00},
+	{ 0xf7804, 255, 0x1f, 0xc00},
+	{ 0xf7c00, 1, 0x1f, 0xe00},
+	{ 0xf7c04, 255, 0x1f, 0xc00},
+	{ 0xf8000, 1, 0x1f, 0xe00},
+	{ 0xf8004, 255, 0x1f, 0xc00},
+	{ 0xf8400, 1, 0x1f, 0xe00},
+	{ 0xf8404, 255, 0x1f, 0xc00},
+	{ 0xf8800, 1, 0x1f, 0xe00},
+	{ 0xf8804, 255, 0x1f, 0xc00},
+	{ 0xf8c00, 1, 0x1f, 0xe00},
+	{ 0xf8c04, 255, 0x1f, 0xc00},
+	{ 0xf9000, 1, 0x1f, 0xe00},
+	{ 0xf9004, 255, 0x1f, 0xc00},
+	{ 0xf9400, 1, 0x1f, 0xe00},
+	{ 0xf9404, 255, 0x1f, 0xc00},
+	{ 0xf9800, 1, 0x1f, 0xe00},
+	{ 0xf9804, 255, 0x1f, 0xc00},
+	{ 0xf9c00, 1, 0x1f, 0xe00},
+	{ 0xf9c04, 255, 0x1f, 0xc00},
+	{ 0xfa000, 1, 0x1f, 0xe00},
+	{ 0xfa004, 255, 0x1f, 0xc00},
+	{ 0xfa400, 1, 0x1f, 0xe00},
+	{ 0xfa404, 255, 0x1f, 0xc00},
+	{ 0xfa800, 1, 0x1f, 0xe00},
+	{ 0xfa804, 255, 0x1f, 0xc00},
+	{ 0xfac00, 1, 0x1f, 0xe00},
+	{ 0xfac04, 255, 0x1f, 0xc00},
+	{ 0xfb000, 1, 0x1f, 0xe00},
+	{ 0xfb004, 255, 0x1f, 0xc00},
+	{ 0xfb400, 1, 0x1f, 0xe00},
+	{ 0xfb404, 255, 0x1f, 0xc00},
+	{ 0xfb800, 1, 0x1f, 0xe00},
+	{ 0xfb804, 255, 0x1f, 0xc00},
+	{ 0xfbc00, 1, 0x1f, 0xe00},
+	{ 0xfbc04, 255, 0x1f, 0xc00},
+	{ 0xfc000, 1, 0x1f, 0xe00},
+	{ 0xfc004, 255, 0x1f, 0xc00},
+	{ 0xfc400, 1, 0x1f, 0xe00},
+	{ 0xfc404, 255, 0x1f, 0xc00},
+	{ 0xfc800, 1, 0x1f, 0xe00},
+	{ 0xfc804, 255, 0x1f, 0xc00},
+	{ 0xfcc00, 1, 0x1f, 0xe00},
+	{ 0xfcc04, 255, 0x1f, 0xc00},
+	{ 0xfd000, 1, 0x1f, 0xe00},
+	{ 0xfd004, 255, 0x1f, 0xc00},
+	{ 0xfd400, 1, 0x1f, 0xe00},
+	{ 0xfd404, 255, 0x1f, 0xc00},
+	{ 0xfd800, 1, 0x1f, 0xe00},
+	{ 0xfd804, 255, 0x1f, 0xc00},
+	{ 0xfdc00, 1, 0x1f, 0xe00},
+	{ 0xfdc04, 255, 0x1f, 0xc00},
+	{ 0xfe000, 1, 0x1f, 0xe00},
+	{ 0xfe004, 255, 0x1f, 0xc00},
+	{ 0xfe400, 1, 0x1f, 0xe00},
+	{ 0xfe404, 255, 0x1f, 0xc00},
+	{ 0xfe800, 1, 0x1f, 0xe00},
+	{ 0xfe804, 255, 0x1f, 0xc00},
+	{ 0xfec00, 1, 0x1f, 0xe00},
+	{ 0xfec04, 255, 0x1f, 0xc00},
+	{ 0xff000, 1, 0x1f, 0xe00},
+	{ 0xff004, 255, 0x1f, 0xc00},
+	{ 0xff400, 1, 0x1f, 0xe00},
+	{ 0xff404, 255, 0x1f, 0xc00},
+	{ 0xff800, 1, 0x1f, 0xe00},
+	{ 0xff804, 255, 0x1f, 0xc00},
+	{ 0xffc00, 1, 0x1f, 0xe00},
+	{ 0xffc04, 255, 0x1f, 0xc00},
+	{ 0x101000, 5, 0x1f, 0x924},
+	{ 0x101014, 1, 0x1f, 0xfff},
+	{ 0x101018, 6, 0x1f, 0x924},
+	{ 0x101040, 2, 0x1f, 0x1fff},
+	{ 0x10104c, 1, 0x1f, 0x1fff},
+	{ 0x101050, 1, 0x1e, 0x924},
+	{ 0x101054, 3, 0x1c, 0x924},
+	{ 0x101100, 1, 0x1f, 0x924},
+	{ 0x101800, 8, 0x1f, 0x924},
+	{ 0x102000, 18, 0x1f, 0x924},
+	{ 0x102058, 2, 0x1f, 0x1fff},
+	{ 0x102064, 1, 0x1f, 0x1fff},
+	{ 0x102068, 6, 0x1c, 0x924},
+	{ 0x102080, 16, 0x1f, 0xfff},
+	{ 0x1020c0, 1, 0x1f, 0x924},
+	{ 0x1020c8, 8, 0x2, 0x924},
+	{ 0x1020e8, 9, 0x1c, 0x924},
+	{ 0x102400, 1, 0x1f, 0x924},
+	{ 0x103000, 1, 0x1f, 0x924},
+	{ 0x103004, 2, 0x1f, 0xfff},
+	{ 0x10300c, 23, 0x1f, 0x924},
+	{ 0x103088, 2, 0x1f, 0x1fff},
+	{ 0x103094, 1, 0x1f, 0x1fff},
+	{ 0x103098, 1, 0x1e, 0x924},
+	{ 0x10309c, 2, 0x1e, 0xfff},
+	{ 0x1030a4, 2, 0x1e, 0x924},
+	{ 0x1030ac, 2, 0x1c, 0x924},
+	{ 0x1030b4, 1, 0x4, 0x924},
+	{ 0x1030b8, 2, 0x1c, 0xfff},
+	{ 0x1030c0, 3, 0x1c, 0x924},
+	{ 0x1030cc, 1, 0x1c, 0xfff},
+	{ 0x1030d0, 1, 0x1c, 0x924},
+	{ 0x1030d8, 2, 0x1c, 0x924},
+	{ 0x1030e0, 1, 0x1c, 0xfff},
+	{ 0x1030e4, 5, 0x1c, 0x924},
+	{ 0x103400, 136, 0x1c, 0x1fff},
+	{ 0x103800, 8, 0x1f, 0x924},
+	{ 0x104000, 1, 0x1f, 0x924},
+	{ 0x104004, 1, 0x1f, 0xfff},
+	{ 0x104008, 4, 0x1f, 0x924},
+	{ 0x104018, 1, 0x1f, 0xfff},
+	{ 0x10401c, 1, 0x1f, 0x924},
+	{ 0x104020, 1, 0x1f, 0xfff},
+	{ 0x104024, 6, 0x1f, 0x924},
+	{ 0x10403c, 1, 0x1f, 0xfff},
+	{ 0x104040, 47, 0x1f, 0x924},
+	{ 0x10410c, 2, 0x1f, 0x1fff},
+	{ 0x104118, 1, 0x1f, 0x1fff},
+	{ 0x10411c, 16, 0x1c, 0x924},
+	{ 0x104200, 17, 0x1f, 0x924},
+	{ 0x104400, 1, 0x1f, 0x1fff},
+	{ 0x104404, 63, 0x1f, 0xfff},
+	{ 0x104500, 192, 0x1f, 0xdb6},
+	{ 0x104800, 1, 0x1f, 0x1fff},
+	{ 0x104804, 63, 0x1f, 0xfff},
+	{ 0x104900, 192, 0x1f, 0xdb6},
+	{ 0x105000, 4, 0x1f, 0x1fff},
+	{ 0x105010, 252, 0x1f, 0xfff},
+	{ 0x105400, 768, 0x1f, 0xdb6},
+	{ 0x107000, 7, 0x1c, 0x924},
+	{ 0x10701c, 1, 0x18, 0x924},
+	{ 0x108000, 33, 0x3, 0x924},
+	{ 0x1080ac, 5, 0x2, 0x924},
+	{ 0x108100, 5, 0x3, 0x924},
+	{ 0x108120, 5, 0x3, 0x924},
+	{ 0x108200, 74, 0x3, 0x924},
+	{ 0x108400, 74, 0x3, 0x924},
+	{ 0x108800, 152, 0x3, 0x924},
+	{ 0x110000, 111, 0x1c, 0x924},
+	{ 0x1101cc, 2, 0x1c, 0x1fff},
+	{ 0x1101d8, 1, 0x1c, 0x1fff},
+	{ 0x1101dc, 1, 0x18, 0x924},
+	{ 0x110200, 4, 0x1c, 0x924},
+	{ 0x120000, 92, 0x1f, 0x924},
+	{ 0x120170, 2, 0x3, 0x924},
+	{ 0x120178, 14, 0x1f, 0x924},
+	{ 0x1201b0, 2, 0x1f, 0xfff},
+	{ 0x1201b8, 93, 0x1f, 0x924},
+	{ 0x12032c, 1, 0x1f, 0xfff},
+	{ 0x120330, 15, 0x1f, 0x924},
+	{ 0x12036c, 3, 0x1f, 0xfff},
+	{ 0x120378, 36, 0x1f, 0x924},
+	{ 0x120408, 2, 0x1f, 0xfff},
+	{ 0x120410, 1, 0x1f, 0x924},
+	{ 0x120414, 15, 0x1f, 0xfff},
+	{ 0x120450, 10, 0x1f, 0x924},
+	{ 0x120478, 2, 0x1f, 0xfff},
+	{ 0x120480, 43, 0x1f, 0x924},
+	{ 0x12052c, 1, 0x1f, 0xfff},
+	{ 0x120530, 5, 0x1f, 0x924},
+	{ 0x120544, 4, 0x3, 0x924},
+	{ 0x120554, 4, 0x1f, 0x924},
+	{ 0x120564, 2, 0x1f, 0xfff},
+	{ 0x12057c, 2, 0x1f, 0x1fff},
+	{ 0x120588, 3, 0x1f, 0x1fff},
+	{ 0x120598, 1, 0x1f, 0x1fff},
+	{ 0x12059c, 22, 0x1e, 0x924},
+	{ 0x1205f4, 1, 0x6, 0x924},
+	{ 0x1205f8, 4, 0x1c, 0x924},
+	{ 0x120618, 1, 0x1c, 0x924},
+	{ 0x12061c, 31, 0x1e, 0x924},
+	{ 0x120698, 3, 0x1c, 0x924},
+	{ 0x1206a4, 1, 0x4, 0x924},
+	{ 0x1206a8, 1, 0x1c, 0x924},
+	{ 0x1206b0, 38, 0x1c, 0x924},
+	{ 0x120748, 1, 0x1c, 0xfff},
+	{ 0x12074c, 11, 0x1c, 0x924},
+	{ 0x120778, 2, 0x1c, 0xfff},
+	{ 0x120780, 23, 0x1c, 0x924},
+	{ 0x1207dc, 1, 0x4, 0x924},
+	{ 0x1207fc, 1, 0x1c, 0x924},
+	{ 0x12080c, 2, 0x1f, 0xfff},
+	{ 0x120814, 1, 0x1f, 0x924},
+	{ 0x120818, 1, 0x1f, 0xfff},
+	{ 0x12081c, 1, 0x1f, 0x924},
+	{ 0x120820, 1, 0x1f, 0xfff},
+	{ 0x120824, 1, 0x1f, 0x924},
+	{ 0x120828, 1, 0x1f, 0xfff},
+	{ 0x12082c, 1, 0x1f, 0x924},
+	{ 0x120830, 1, 0x1f, 0xfff},
+	{ 0x120834, 1, 0x1f, 0x924},
+	{ 0x120838, 1, 0x1f, 0xfff},
+	{ 0x12083c, 1, 0x1f, 0x924},
+	{ 0x120840, 1, 0x1f, 0xfff},
+	{ 0x120844, 1, 0x1f, 0x924},
+	{ 0x120848, 1, 0x1f, 0xfff},
+	{ 0x12084c, 1, 0x1f, 0x924},
+	{ 0x120850, 1, 0x1f, 0xfff},
+	{ 0x120854, 1, 0x1f, 0x924},
+	{ 0x120858, 1, 0x1f, 0xfff},
+	{ 0x12085c, 1, 0x1f, 0x924},
+	{ 0x120860, 1, 0x1f, 0xfff},
+	{ 0x120864, 1, 0x1f, 0x924},
+	{ 0x120868, 1, 0x1f, 0xfff},
+	{ 0x12086c, 1, 0x1f, 0x924},
+	{ 0x120870, 1, 0x1f, 0xfff},
+	{ 0x120874, 1, 0x1f, 0x924},
+	{ 0x120878, 1, 0x1f, 0xfff},
+	{ 0x12087c, 1, 0x1f, 0x924},
+	{ 0x120880, 1, 0x1f, 0xfff},
+	{ 0x120884, 1, 0x1f, 0x924},
+	{ 0x120888, 1, 0x1f, 0xfff},
+	{ 0x12088c, 1, 0x1f, 0x924},
+	{ 0x120890, 1, 0x1f, 0xfff},
+	{ 0x120894, 1, 0x1f, 0x924},
+	{ 0x120898, 1, 0x1f, 0xfff},
+	{ 0x12089c, 1, 0x1f, 0x924},
+	{ 0x1208a0, 1, 0x1f, 0xfff},
+	{ 0x1208a4, 1, 0x1f, 0x924},
+	{ 0x1208a8, 1, 0x1f, 0xfff},
+	{ 0x1208ac, 1, 0x1f, 0x924},
+	{ 0x1208b0, 1, 0x1f, 0xfff},
+	{ 0x1208b4, 1, 0x1f, 0x924},
+	{ 0x1208b8, 1, 0x1f, 0xfff},
+	{ 0x1208bc, 1, 0x1f, 0x924},
+	{ 0x1208c0, 1, 0x1f, 0xfff},
+	{ 0x1208c4, 1, 0x1f, 0x924},
+	{ 0x1208c8, 1, 0x1f, 0xfff},
+	{ 0x1208cc, 1, 0x1f, 0x924},
+	{ 0x1208d0, 1, 0x1f, 0xfff},
+	{ 0x1208d4, 1, 0x1f, 0x924},
+	{ 0x1208d8, 1, 0x1f, 0xfff},
+	{ 0x1208dc, 1, 0x1f, 0x924},
+	{ 0x1208e0, 1, 0x1f, 0xfff},
+	{ 0x1208e4, 1, 0x1f, 0x924},
+	{ 0x1208e8, 1, 0x1f, 0xfff},
+	{ 0x1208ec, 1, 0x1f, 0x924},
+	{ 0x1208f0, 1, 0x1f, 0xfff},
+	{ 0x1208f4, 1, 0x1f, 0x924},
+	{ 0x1208f8, 1, 0x1f, 0xfff},
+	{ 0x1208fc, 1, 0x1f, 0x924},
+	{ 0x120900, 1, 0x1f, 0xfff},
+	{ 0x120904, 1, 0x1f, 0x924},
+	{ 0x120908, 1, 0x1f, 0xfff},
+	{ 0x12090c, 1, 0x1f, 0x924},
+	{ 0x120910, 7, 0x1c, 0x924},
+	{ 0x120930, 9, 0x1c, 0x924},
+	{ 0x12095c, 37, 0x18, 0x924},
+	{ 0x120a00, 2, 0x7, 0x924},
+	{ 0x120b00, 1, 0x18, 0x924},
+	{ 0x122000, 2, 0x1f, 0x924},
+	{ 0x122008, 2046, 0x1, 0x924},
+	{ 0x128000, 6144, 0x1e, 0x924},
+	{ 0x130000, 1, 0x1c, 0x1fff},
+	{ 0x130004, 11, 0x1c, 0x924},
+	{ 0x130030, 1, 0x1c, 0xfff},
+	{ 0x130034, 6, 0x1c, 0x924},
+	{ 0x13004c, 3, 0x1c, 0xfff},
+	{ 0x130058, 3, 0x1c, 0x924},
+	{ 0x130064, 2, 0x1c, 0xfff},
+	{ 0x13006c, 8, 0x1c, 0x924},
+	{ 0x13009c, 2, 0x1c, 0x1fff},
+	{ 0x1300a8, 1, 0x1c, 0x1fff},
+	{ 0x130100, 12, 0x1c, 0x924},
+	{ 0x130130, 1, 0x1c, 0xfff},
+	{ 0x130134, 14, 0x1c, 0x924},
+	{ 0x13016c, 1, 0x1c, 0xfff},
+	{ 0x130170, 1, 0x1c, 0x924},
+	{ 0x130180, 1, 0x1c, 0x924},
+	{ 0x130200, 1, 0x1c, 0x924},
+	{ 0x130280, 1, 0x1c, 0x924},
+	{ 0x130300, 1, 0x1c, 0xfff},
+	{ 0x130304, 4, 0x1c, 0x924},
+	{ 0x130380, 1, 0x1c, 0x924},
+	{ 0x130400, 1, 0x1c, 0x924},
+	{ 0x130480, 1, 0x1c, 0xfff},
+	{ 0x130484, 4, 0x1c, 0x924},
+	{ 0x130800, 72, 0x1c, 0x924},
+	{ 0x131000, 136, 0x1c, 0x924},
+	{ 0x132000, 148, 0x1c, 0x924},
+	{ 0x134000, 544, 0x1c, 0x924},
+	{ 0x140000, 1, 0x1f, 0x924},
+	{ 0x140004, 9, 0xf, 0x924},
+	{ 0x140028, 8, 0x1f, 0x924},
+	{ 0x140048, 5, 0xf, 0x924},
+	{ 0x14005c, 2, 0xf, 0xfff},
+	{ 0x140064, 3, 0xf, 0x924},
+	{ 0x140070, 1, 0x1f, 0x924},
+	{ 0x140074, 10, 0xf, 0x924},
+	{ 0x14009c, 1, 0x1f, 0x924},
+	{ 0x1400a0, 5, 0xf, 0x924},
+	{ 0x1400b4, 7, 0x1f, 0x924},
+	{ 0x1400d0, 2, 0xf, 0xfff},
+	{ 0x1400d8, 2, 0xf, 0x924},
+	{ 0x1400e0, 1, 0xf, 0xfff},
+	{ 0x1400e4, 5, 0xf, 0x924},
+	{ 0x1400f8, 2, 0x1f, 0x924},
+	{ 0x140100, 5, 0x3, 0x924},
+	{ 0x140114, 5, 0xf, 0x924},
+	{ 0x140128, 7, 0x1f, 0x924},
+	{ 0x140144, 9, 0xf, 0x924},
+	{ 0x140168, 8, 0x1f, 0x924},
+	{ 0x140188, 3, 0xf, 0x924},
+	{ 0x140194, 13, 0x1f, 0x924},
+	{ 0x1401d8, 2, 0x1f, 0x1fff},
+	{ 0x1401e4, 1, 0x1f, 0x1fff},
+	{ 0x140200, 6, 0xf, 0xfff},
+	{ 0x1402e0, 2, 0xc, 0x924},
+	{ 0x1402e8, 2, 0x1c, 0x924},
+	{ 0x1402f0, 9, 0xc, 0x924},
+	{ 0x140314, 9, 0x10, 0x924},
+	{ 0x140338, 7, 0x10, 0xfff},
+	{ 0x140354, 7, 0x10, 0x924},
+	{ 0x140370, 7, 0x10, 0xfff},
+	{ 0x14038c, 14, 0x10, 0x924},
+	{ 0x1404b0, 14, 0x10, 0x924},
+	{ 0x15c000, 2, 0x1e, 0x924},
+	{ 0x15c008, 5, 0x2, 0x924},
+	{ 0x15c020, 8, 0x1c, 0x924},
+	{ 0x15c040, 1, 0xc, 0x924},
+	{ 0x15c044, 2, 0x1c, 0x924},
+	{ 0x15c04c, 8, 0xc, 0x924},
+	{ 0x15c06c, 8, 0x1c, 0x924},
+	{ 0x15c090, 13, 0x1c, 0x924},
+	{ 0x15c0c8, 24, 0x1c, 0x924},
+	{ 0x15c128, 2, 0xc, 0x924},
+	{ 0x15c130, 1, 0x1c, 0x924},
+	{ 0x15c138, 6, 0x1c, 0x924},
+	{ 0x15c150, 2, 0x18, 0x924},
+	{ 0x15c158, 2, 0x8, 0x924},
+	{ 0x15c160, 23, 0x10, 0x924},
+	{ 0x15c1bc, 6, 0x10, 0xfff},
+	{ 0x15c1d4, 23, 0x10, 0x924},
+	{ 0x15c230, 7, 0x10, 0xfff},
+	{ 0x15c24c, 90, 0x10, 0x924},
+	{ 0x160004, 6, 0x18, 0x924},
+	{ 0x16003c, 1, 0x10, 0x924},
+	{ 0x160040, 6, 0x18, 0x924},
+	{ 0x16005c, 6, 0x18, 0x924},
+	{ 0x160074, 1, 0x10, 0x924},
+	{ 0x160078, 2, 0x18, 0x924},
+	{ 0x160300, 8, 0x18, 0x924},
+	{ 0x160330, 6, 0x18, 0x924},
+	{ 0x160404, 6, 0x18, 0x924},
+	{ 0x16043c, 1, 0x10, 0x924},
+	{ 0x160440, 6, 0x18, 0x924},
+	{ 0x16045c, 6, 0x18, 0x924},
+	{ 0x160474, 1, 0x10, 0x924},
+	{ 0x160478, 2, 0x18, 0x924},
+	{ 0x160700, 8, 0x18, 0x924},
+	{ 0x160730, 6, 0x18, 0x924},
+	{ 0x161000, 7, 0x1f, 0x924},
+	{ 0x16102c, 2, 0x1f, 0x1fff},
+	{ 0x161038, 1, 0x1f, 0x1fff},
+	{ 0x16103c, 2, 0x1c, 0x924},
+	{ 0x161800, 2, 0x1f, 0x924},
+	{ 0x162000, 54, 0x18, 0x924},
+	{ 0x162200, 60, 0x18, 0x924},
+	{ 0x162400, 54, 0x18, 0x924},
+	{ 0x162600, 60, 0x18, 0x924},
+	{ 0x162800, 54, 0x18, 0x924},
+	{ 0x162a00, 60, 0x18, 0x924},
+	{ 0x162c00, 54, 0x18, 0x924},
+	{ 0x162e00, 60, 0x18, 0x924},
+	{ 0x163000, 1, 0x18, 0x924},
+	{ 0x163008, 1, 0x18, 0x924},
+	{ 0x163010, 1, 0x18, 0x924},
+	{ 0x163018, 1, 0x18, 0x924},
+	{ 0x163020, 5, 0x18, 0x924},
+	{ 0x163038, 3, 0x18, 0x924},
+	{ 0x163048, 3, 0x18, 0x924},
+	{ 0x163058, 1, 0x18, 0x924},
+	{ 0x163060, 1, 0x18, 0x924},
+	{ 0x163068, 1, 0x18, 0x924},
+	{ 0x163070, 3, 0x18, 0x924},
+	{ 0x163080, 1, 0x18, 0x924},
+	{ 0x163088, 3, 0x18, 0x924},
+	{ 0x163098, 1, 0x18, 0x924},
+	{ 0x1630a0, 1, 0x18, 0x924},
+	{ 0x1630a8, 1, 0x18, 0x924},
+	{ 0x1630b0, 2, 0x10, 0x924},
+	{ 0x1630c0, 1, 0x18, 0x924},
+	{ 0x1630c8, 1, 0x18, 0x924},
+	{ 0x1630d0, 1, 0x18, 0x924},
+	{ 0x1630d8, 1, 0x18, 0x924},
+	{ 0x1630e0, 2, 0x18, 0x924},
+	{ 0x163110, 1, 0x18, 0x924},
+	{ 0x163120, 2, 0x18, 0x924},
+	{ 0x163420, 4, 0x18, 0x924},
+	{ 0x163438, 2, 0x18, 0x924},
+	{ 0x163488, 2, 0x18, 0x924},
+	{ 0x163520, 2, 0x18, 0x924},
+	{ 0x163800, 1, 0x18, 0x924},
+	{ 0x163808, 1, 0x18, 0x924},
+	{ 0x163810, 1, 0x18, 0x924},
+	{ 0x163818, 1, 0x18, 0x924},
+	{ 0x163820, 5, 0x18, 0x924},
+	{ 0x163838, 3, 0x18, 0x924},
+	{ 0x163848, 3, 0x18, 0x924},
+	{ 0x163858, 1, 0x18, 0x924},
+	{ 0x163860, 1, 0x18, 0x924},
+	{ 0x163868, 1, 0x18, 0x924},
+	{ 0x163870, 3, 0x18, 0x924},
+	{ 0x163880, 1, 0x18, 0x924},
+	{ 0x163888, 3, 0x18, 0x924},
+	{ 0x163898, 1, 0x18, 0x924},
+	{ 0x1638a0, 1, 0x18, 0x924},
+	{ 0x1638a8, 1, 0x18, 0x924},
+	{ 0x1638b0, 2, 0x10, 0x924},
+	{ 0x1638c0, 1, 0x18, 0x924},
+	{ 0x1638c8, 1, 0x18, 0x924},
+	{ 0x1638d0, 1, 0x18, 0x924},
+	{ 0x1638d8, 1, 0x18, 0x924},
+	{ 0x1638e0, 2, 0x18, 0x924},
+	{ 0x163910, 1, 0x18, 0x924},
+	{ 0x163920, 2, 0x18, 0x924},
+	{ 0x163c20, 4, 0x18, 0x924},
+	{ 0x163c38, 2, 0x18, 0x924},
+	{ 0x163c88, 2, 0x18, 0x924},
+	{ 0x163d20, 2, 0x18, 0x924},
+	{ 0x164000, 5, 0x1f, 0x924},
+	{ 0x164014, 2, 0x1f, 0xfff},
+	{ 0x16401c, 53, 0x1f, 0x924},
+	{ 0x164100, 2, 0x1f, 0x1fff},
+	{ 0x16410c, 1, 0x1f, 0x1fff},
+	{ 0x164110, 2, 0x1e, 0x924},
+	{ 0x164118, 15, 0x1c, 0x924},
+	{ 0x164200, 1, 0x1f, 0x924},
+	{ 0x164208, 1, 0x1f, 0x924},
+	{ 0x164210, 1, 0x1f, 0x924},
+	{ 0x164218, 1, 0x1f, 0x924},
+	{ 0x164220, 1, 0x1f, 0x924},
+	{ 0x164228, 1, 0x1f, 0x924},
+	{ 0x164230, 1, 0x1f, 0x924},
+	{ 0x164238, 1, 0x1f, 0x924},
+	{ 0x164240, 1, 0x1f, 0x924},
+	{ 0x164248, 1, 0x1f, 0x924},
+	{ 0x164250, 1, 0x1f, 0x924},
+	{ 0x164258, 1, 0x1f, 0x924},
+	{ 0x164260, 1, 0x1f, 0x924},
+	{ 0x164270, 2, 0x1f, 0x924},
+	{ 0x164280, 2, 0x1f, 0x924},
+	{ 0x164800, 2, 0x1f, 0x924},
+	{ 0x165000, 2, 0x1f, 0x924},
+	{ 0x166000, 164, 0x1f, 0x924},
+	{ 0x1662b0, 2, 0x1f, 0x1fff},
+	{ 0x1662bc, 1, 0x1f, 0x1fff},
+	{ 0x1662cc, 7, 0x1c, 0x924},
+	{ 0x166400, 49, 0x1f, 0x924},
+	{ 0x1664c8, 32, 0x1f, 0x924},
+	{ 0x166548, 1, 0x1f, 0xfff},
+	{ 0x16654c, 1, 0x1f, 0x924},
+	{ 0x166550, 1, 0x1f, 0xfff},
+	{ 0x166554, 1, 0x1f, 0x924},
+	{ 0x166558, 1, 0x1f, 0xfff},
+	{ 0x16655c, 1, 0x1f, 0x924},
+	{ 0x166568, 2, 0x1f, 0x924},
+	{ 0x166570, 5, 0x1c, 0x924},
+	{ 0x166800, 1, 0x1f, 0x924},
+	{ 0x168000, 1, 0x1f, 0xfff},
+	{ 0x168004, 1, 0x1f, 0x924},
+	{ 0x168008, 1, 0x1f, 0xfff},
+	{ 0x16800c, 1, 0x1f, 0x924},
+	{ 0x168010, 1, 0x1f, 0xfff},
+	{ 0x168014, 1, 0x1f, 0x924},
+	{ 0x168018, 1, 0x1f, 0xfff},
+	{ 0x16801c, 3, 0x1f, 0x924},
+	{ 0x168028, 2, 0x1f, 0xfff},
+	{ 0x168030, 10, 0x1f, 0x924},
+	{ 0x168058, 9, 0x1f, 0xfff},
+	{ 0x16807c, 106, 0x1f, 0x924},
+	{ 0x168224, 2, 0x3, 0x924},
+	{ 0x16822c, 3, 0x1f, 0x924},
+	{ 0x168238, 1, 0x1f, 0xfff},
+	{ 0x16823c, 25, 0x1f, 0x924},
+	{ 0x1682a0, 12, 0x3, 0x924},
+	{ 0x1682d0, 7, 0x1f, 0xfff},
+	{ 0x1682ec, 5, 0x1f, 0x924},
+	{ 0x168300, 2, 0x3, 0xfff},
+	{ 0x168308, 65, 0x1f, 0xfff},
+	{ 0x16840c, 1, 0x1f, 0x924},
+	{ 0x168410, 2, 0x1f, 0xfff},
+	{ 0x168418, 2, 0x3, 0x924},
+	{ 0x168420, 6, 0x1f, 0x924},
+	{ 0x168448, 2, 0x1f, 0x1fff},
+	{ 0x168454, 1, 0x1f, 0x1fff},
+	{ 0x168800, 19, 0x1f, 0x924},
+	{ 0x168900, 1, 0x1f, 0x924},
+	{ 0x168a00, 128, 0x1f, 0xfff},
+	{ 0x16a000, 1536, 0x1f, 0x924},
+	{ 0x16c000, 1536, 0x1f, 0x924},
+	{ 0x16e000, 16, 0x2, 0x924},
+	{ 0x16e040, 8, 0x1c, 0x924},
+	{ 0x16e100, 1, 0x2, 0x924},
+	{ 0x16e200, 2, 0x2, 0xfff},
+	{ 0x16e400, 1, 0x2, 0x924},
+	{ 0x16e404, 2, 0x2, 0xfff},
+	{ 0x16e40c, 94, 0x2, 0x924},
+	{ 0x16e584, 64, 0x2, 0xfff},
+	{ 0x16e684, 2, 0x1e, 0xfff},
+	{ 0x16e68c, 4, 0x2, 0xfff},
+	{ 0x16e69c, 8, 0x2, 0x924},
+	{ 0x16e6bc, 4, 0x1e, 0x924},
+	{ 0x16e6cc, 4, 0x2, 0x924},
+	{ 0x16e6e0, 2, 0x1c, 0x924},
+	{ 0x16e6e8, 5, 0xc, 0x924},
+	{ 0x16e6fc, 4, 0x1c, 0xfff},
+	{ 0x16e70c, 1, 0x1c, 0x924},
+	{ 0x16e768, 17, 0x1c, 0x924},
+	{ 0x16e7ac, 12, 0x10, 0xfff},
+	{ 0x170000, 24, 0x1f, 0x924},
+	{ 0x170060, 4, 0x3, 0x924},
+	{ 0x170070, 13, 0x1f, 0x924},
+	{ 0x1700a4, 1, 0x1f, 0xfff},
+	{ 0x1700a8, 1, 0x1f, 0x924},
+	{ 0x1700ac, 2, 0x1f, 0xfff},
+	{ 0x1700b4, 3, 0x1f, 0x924},
+	{ 0x1700c0, 1, 0x1f, 0xfff},
+	{ 0x1700c4, 44, 0x1f, 0x924},
+	{ 0x170184, 2, 0x1f, 0x1fff},
+	{ 0x170190, 1, 0x1f, 0x1fff},
+	{ 0x170194, 11, 0x1c, 0x924},
+	{ 0x1701c4, 1, 0x1c, 0x924},
+	{ 0x1701cc, 7, 0x1c, 0x924},
+	{ 0x1701e8, 1, 0x18, 0x924},
+	{ 0x1701ec, 1, 0x1c, 0x924},
+	{ 0x1701f4, 1, 0x1c, 0x924},
+	{ 0x170200, 4, 0x1f, 0x924},
+	{ 0x170214, 1, 0x1f, 0x924},
+	{ 0x170218, 77, 0x1c, 0x924},
+	{ 0x170400, 64, 0x1c, 0x924},
+	{ 0x178000, 1, 0x1f, 0x924},
+	{ 0x180000, 61, 0x1f, 0x924},
+	{ 0x180114, 2, 0x1f, 0x1fff},
+	{ 0x180120, 3, 0x1f, 0x1fff},
+	{ 0x180130, 1, 0x1f, 0x1fff},
+	{ 0x18013c, 2, 0x1e, 0x924},
+	{ 0x180200, 27, 0x1f, 0x924},
+	{ 0x18026c, 1, 0x1f, 0xfff},
+	{ 0x180270, 12, 0x1f, 0x924},
+	{ 0x1802a0, 1, 0x1f, 0xfff},
+	{ 0x1802a4, 17, 0x1f, 0x924},
+	{ 0x180340, 4, 0x1f, 0x924},
+	{ 0x180380, 1, 0x1c, 0x924},
+	{ 0x180388, 1, 0x1c, 0x924},
+	{ 0x180390, 1, 0x1c, 0x924},
+	{ 0x180398, 1, 0x1c, 0x924},
+	{ 0x1803a0, 5, 0x1c, 0x924},
+	{ 0x1803b4, 2, 0x18, 0x924},
+	{ 0x181000, 4, 0x1f, 0x93c},
+	{ 0x181010, 1020, 0x1f, 0x38},
+	{ 0x182000, 4, 0x18, 0x924},
+	{ 0x1a0000, 1, 0x1f, 0x92c},
+	{ 0x1a0004, 5631, 0x1f, 0x8},
+	{ 0x1a5800, 2560, 0x1e, 0x8},
+	{ 0x1a8000, 1, 0x1f, 0x92c},
+	{ 0x1a8004, 8191, 0x1e, 0x8},
+	{ 0x1b0000, 1, 0x1f, 0x92c},
+	{ 0x1b0004, 15, 0x2, 0x8},
+	{ 0x1b0040, 1, 0x1e, 0x92c},
+	{ 0x1b0044, 239, 0x2, 0x8},
+	{ 0x1b0400, 1, 0x1f, 0x92c},
+	{ 0x1b0404, 255, 0x2, 0x8},
+	{ 0x1b0800, 1, 0x1f, 0x924},
+	{ 0x1b0840, 1, 0x1e, 0x924},
+	{ 0x1b0c00, 1, 0x1f, 0x1fff},
+	{ 0x1b1000, 1, 0x1f, 0x1fff},
+	{ 0x1b1040, 1, 0x1e, 0x1fff},
+	{ 0x1b1400, 1, 0x1f, 0x924},
+	{ 0x1b1440, 1, 0x1e, 0x924},
+	{ 0x1b1480, 1, 0x1e, 0x924},
+	{ 0x1b14c0, 1, 0x1e, 0x924},
+	{ 0x1b1800, 128, 0x1f, 0x10},
+	{ 0x1b1c00, 128, 0x1f, 0x10},
+	{ 0x1b2000, 1, 0x1f, 0xdb6},
+	{ 0x1b2400, 1, 0x1e, 0x92c},
+	{ 0x1b2404, 5631, 0x1c, 0x8},
+	{ 0x1b8000, 1, 0x1f, 0xfff},
+	{ 0x1b8040, 1, 0x1f, 0xfff},
+	{ 0x1b8080, 1, 0x1f, 0xfff},
+	{ 0x1b80c0, 1, 0x1f, 0xfff},
+	{ 0x1b8100, 1, 0x1f, 0x924},
+	{ 0x1b8140, 1, 0x1f, 0x924},
+	{ 0x1b8180, 1, 0x1f, 0x924},
+	{ 0x1b81c0, 1, 0x1f, 0x924},
+	{ 0x1b8200, 1, 0x1f, 0x924},
+	{ 0x1b8240, 1, 0x1f, 0x924},
+	{ 0x1b8280, 1, 0x1f, 0x924},
+	{ 0x1b82c0, 1, 0x1f, 0x924},
+	{ 0x1b8300, 1, 0x1f, 0x924},
+	{ 0x1b8340, 1, 0x1f, 0x924},
+	{ 0x1b8380, 1, 0x1f, 0x924},
+	{ 0x1b83c0, 1, 0x1f, 0x924},
+	{ 0x1b8400, 1, 0x1f, 0x924},
+	{ 0x1b8440, 1, 0x1f, 0x924},
+	{ 0x1b8480, 1, 0x1f, 0x924},
+	{ 0x1b84c0, 1, 0x1f, 0x924},
+	{ 0x1b8500, 1, 0x1f, 0x924},
+	{ 0x1b8540, 1, 0x1f, 0x924},
+	{ 0x1b8580, 1, 0x1f, 0x924},
+	{ 0x1b85c0, 19, 0x1c, 0x924},
+	{ 0x1b8800, 1, 0x1f, 0x924},
+	{ 0x1b8840, 1, 0x1f, 0x924},
+	{ 0x1b8880, 1, 0x1f, 0x924},
+	{ 0x1b88c0, 1, 0x1f, 0x924},
+	{ 0x1b8900, 1, 0x1f, 0x924},
+	{ 0x1b8940, 1, 0x1f, 0x924},
+	{ 0x1b8980, 1, 0x1f, 0x924},
+	{ 0x1b89c0, 1, 0x1f, 0x924},
+	{ 0x1b8a00, 1, 0x1f, 0x934},
+	{ 0x1b8a40, 1, 0x1f, 0x924},
+	{ 0x1b8a80, 1, 0x1f, 0x492},
+	{ 0x1b8ac0, 1, 0x1f, 0x924},
+	{ 0x1b8b00, 1, 0x1f, 0x924},
+	{ 0x1b8b40, 1, 0x1f, 0x924},
+	{ 0x1b8b80, 1, 0x1f, 0x924},
+	{ 0x1b8bc0, 1, 0x1f, 0x924},
+	{ 0x1b8c00, 1, 0x1f, 0x924},
+	{ 0x1b8c40, 1, 0x1f, 0x924},
+	{ 0x1b8c80, 1, 0x1f, 0x924},
+	{ 0x1b8cc0, 1, 0x1f, 0x924},
+	{ 0x1b8cc4, 1, 0x1c, 0x924},
+	{ 0x1b8d00, 1, 0x1f, 0x924},
+	{ 0x1b8d40, 1, 0x1f, 0x924},
+	{ 0x1b8d80, 1, 0x1f, 0x924},
+	{ 0x1b8dc0, 1, 0x1f, 0x924},
+	{ 0x1b8e00, 1, 0x1f, 0x924},
+	{ 0x1b8e40, 1, 0x1f, 0x924},
+	{ 0x1b8e80, 1, 0x1f, 0x924},
+	{ 0x1b8e84, 1, 0x1c, 0x924},
+	{ 0x1b8ec0, 1, 0x1e, 0x924},
+	{ 0x1b8f00, 1, 0x1e, 0x924},
+	{ 0x1b8f40, 1, 0x1e, 0x924},
+	{ 0x1b8f80, 1, 0x1e, 0x924},
+	{ 0x1b8fc0, 1, 0x1e, 0x924},
+	{ 0x1b8fd4, 5, 0x1c, 0x924},
+	{ 0x1b8fe8, 2, 0x18, 0x924},
+	{ 0x1b9000, 1, 0x1c, 0x924},
+	{ 0x1b9040, 3, 0x1c, 0x924},
+	{ 0x1b905c, 1, 0x18, 0x924},
+	{ 0x1b9064, 1, 0x10, 0x924},
+	{ 0x1b9080, 10, 0x10, 0x924},
+	{ 0x1c0000, 2, 0x1f, 0x924},
+	{ 0x200000, 65, 0x1f, 0x924},
+	{ 0x200124, 2, 0x1f, 0x1fff},
+	{ 0x200130, 3, 0x1f, 0x1fff},
+	{ 0x200140, 1, 0x1f, 0x1fff},
+	{ 0x20014c, 2, 0x1e, 0x924},
+	{ 0x200200, 27, 0x1f, 0x924},
+	{ 0x20026c, 1, 0x1f, 0xfff},
+	{ 0x200270, 12, 0x1f, 0x924},
+	{ 0x2002a0, 1, 0x1f, 0xfff},
+	{ 0x2002a4, 17, 0x1f, 0x924},
+	{ 0x200340, 4, 0x1f, 0x924},
+	{ 0x200380, 1, 0x1c, 0x924},
+	{ 0x200388, 1, 0x1c, 0x924},
+	{ 0x200390, 1, 0x1c, 0x924},
+	{ 0x200398, 1, 0x1c, 0x924},
+	{ 0x2003a0, 1, 0x1c, 0x924},
+	{ 0x2003a8, 2, 0x1c, 0x924},
+	{ 0x202000, 4, 0x1f, 0x1927},
+	{ 0x202010, 2044, 0x1f, 0x1007},
+	{ 0x204000, 4, 0x18, 0x924},
+	{ 0x220000, 1, 0x1f, 0x925},
+	{ 0x220004, 5631, 0x1f, 0x1},
+	{ 0x225800, 2560, 0x1e, 0x1},
+	{ 0x228000, 1, 0x1f, 0x925},
+	{ 0x228004, 8191, 0x1e, 0x1},
+	{ 0x230000, 1, 0x1f, 0x925},
+	{ 0x230004, 15, 0x2, 0x1},
+	{ 0x230040, 1, 0x1e, 0x925},
+	{ 0x230044, 239, 0x2, 0x1},
+	{ 0x230400, 1, 0x1f, 0x925},
+	{ 0x230404, 255, 0x2, 0x1},
+	{ 0x230800, 1, 0x1f, 0x924},
+	{ 0x230840, 1, 0x1e, 0x924},
+	{ 0x230c00, 1, 0x1f, 0x924},
+	{ 0x231000, 1, 0x1f, 0x924},
+	{ 0x231040, 1, 0x1e, 0x924},
+	{ 0x231400, 1, 0x1f, 0x924},
+	{ 0x231440, 1, 0x1e, 0x924},
+	{ 0x231480, 1, 0x1e, 0x924},
+	{ 0x2314c0, 1, 0x1e, 0x924},
+	{ 0x231800, 128, 0x1f, 0x2},
+	{ 0x231c00, 128, 0x1f, 0x2},
+	{ 0x232000, 1, 0x1f, 0xdb6},
+	{ 0x232400, 1, 0x1e, 0x925},
+	{ 0x232404, 5631, 0x1c, 0x1},
+	{ 0x238000, 1, 0x1f, 0xfff},
+	{ 0x238040, 1, 0x1f, 0xfff},
+	{ 0x238080, 1, 0x1f, 0xfff},
+	{ 0x2380c0, 1, 0x1f, 0xfff},
+	{ 0x238100, 1, 0x1f, 0x924},
+	{ 0x238140, 1, 0x1f, 0x924},
+	{ 0x238180, 1, 0x1f, 0x924},
+	{ 0x2381c0, 1, 0x1f, 0x924},
+	{ 0x238200, 1, 0x1f, 0x924},
+	{ 0x238240, 1, 0x1f, 0x924},
+	{ 0x238280, 1, 0x1f, 0x924},
+	{ 0x2382c0, 1, 0x1f, 0x924},
+	{ 0x238300, 1, 0x1f, 0x924},
+	{ 0x238340, 1, 0x1f, 0x924},
+	{ 0x238380, 1, 0x1f, 0x924},
+	{ 0x2383c0, 1, 0x1f, 0x924},
+	{ 0x238400, 1, 0x1f, 0x924},
+	{ 0x238440, 1, 0x1f, 0x924},
+	{ 0x238480, 1, 0x1f, 0x924},
+	{ 0x2384c0, 1, 0x1f, 0x924},
+	{ 0x238500, 1, 0x1f, 0x924},
+	{ 0x238540, 1, 0x1f, 0x924},
+	{ 0x238580, 1, 0x1f, 0x924},
+	{ 0x2385c0, 19, 0x1c, 0x924},
+	{ 0x238800, 1, 0x1f, 0x924},
+	{ 0x238840, 1, 0x1f, 0x924},
+	{ 0x238880, 1, 0x1f, 0x924},
+	{ 0x2388c0, 1, 0x1f, 0x924},
+	{ 0x238900, 1, 0x1f, 0x924},
+	{ 0x238940, 1, 0x1f, 0x924},
+	{ 0x238980, 1, 0x1f, 0x924},
+	{ 0x2389c0, 1, 0x1f, 0x924},
+	{ 0x238a00, 1, 0x1f, 0x926},
+	{ 0x238a40, 1, 0x1f, 0x924},
+	{ 0x238a80, 1, 0x1f, 0x492},
+	{ 0x238ac0, 1, 0x1f, 0x924},
+	{ 0x238b00, 1, 0x1f, 0x924},
+	{ 0x238b40, 1, 0x1f, 0x924},
+	{ 0x238b80, 1, 0x1f, 0x924},
+	{ 0x238bc0, 1, 0x1f, 0x924},
+	{ 0x238c00, 1, 0x1f, 0x924},
+	{ 0x238c40, 1, 0x1f, 0x924},
+	{ 0x238c80, 1, 0x1f, 0x924},
+	{ 0x238cc0, 1, 0x1f, 0x924},
+	{ 0x238cc4, 1, 0x1c, 0x924},
+	{ 0x238d00, 1, 0x1f, 0x924},
+	{ 0x238d40, 1, 0x1f, 0x924},
+	{ 0x238d80, 1, 0x1f, 0x924},
+	{ 0x238dc0, 1, 0x1f, 0x924},
+	{ 0x238e00, 1, 0x1f, 0x924},
+	{ 0x238e40, 1, 0x1f, 0x924},
+	{ 0x238e80, 1, 0x1f, 0x924},
+	{ 0x238e84, 1, 0x1c, 0x924},
+	{ 0x238ec0, 1, 0x1e, 0x924},
+	{ 0x238f00, 1, 0x1e, 0x924},
+	{ 0x238f40, 1, 0x1e, 0x924},
+	{ 0x238f80, 1, 0x1e, 0x924},
+	{ 0x238fc0, 1, 0x1e, 0x924},
+	{ 0x238fd4, 5, 0x1c, 0x924},
+	{ 0x238fe8, 2, 0x18, 0x924},
+	{ 0x239000, 1, 0x1c, 0x924},
+	{ 0x239040, 3, 0x1c, 0x924},
+	{ 0x23905c, 1, 0x18, 0x924},
+	{ 0x239064, 1, 0x10, 0x924},
+	{ 0x239080, 10, 0x10, 0x924},
+	{ 0x240000, 2, 0x1f, 0x924},
+	{ 0x280000, 65, 0x1f, 0x924},
+	{ 0x280124, 2, 0x1f, 0x1fff},
+	{ 0x280130, 3, 0x1f, 0x1fff},
+	{ 0x280140, 1, 0x1f, 0x1fff},
+	{ 0x28014c, 2, 0x1e, 0x924},
+	{ 0x280200, 27, 0x1f, 0x924},
+	{ 0x28026c, 1, 0x1f, 0xfff},
+	{ 0x280270, 12, 0x1f, 0x924},
+	{ 0x2802a0, 1, 0x1f, 0xfff},
+	{ 0x2802a4, 17, 0x1f, 0x924},
+	{ 0x280340, 4, 0x1f, 0x924},
+	{ 0x280380, 1, 0x1c, 0x924},
+	{ 0x280388, 1, 0x1c, 0x924},
+	{ 0x280390, 1, 0x1c, 0x924},
+	{ 0x280398, 1, 0x1c, 0x924},
+	{ 0x2803a0, 1, 0x1c, 0x924},
+	{ 0x2803a8, 2, 0x1c, 0x924},
+	{ 0x282000, 4, 0x1f, 0x9e4},
+	{ 0x282010, 2044, 0x1f, 0x1c0},
+	{ 0x284000, 4, 0x18, 0x924},
+	{ 0x2a0000, 1, 0x1f, 0x964},
+	{ 0x2a0004, 5631, 0x1f, 0x40},
+	{ 0x2a5800, 2560, 0x1e, 0x40},
+	{ 0x2a8000, 1, 0x1f, 0x964},
+	{ 0x2a8004, 8191, 0x1e, 0x40},
+	{ 0x2b0000, 1, 0x1f, 0x964},
+	{ 0x2b0004, 15, 0x2, 0x40},
+	{ 0x2b0040, 1, 0x1e, 0x964},
+	{ 0x2b0044, 239, 0x2, 0x40},
+	{ 0x2b0400, 1, 0x1f, 0x964},
+	{ 0x2b0404, 255, 0x2, 0x40},
+	{ 0x2b0800, 1, 0x1f, 0x924},
+	{ 0x2b0840, 1, 0x1e, 0x924},
+	{ 0x2b0c00, 1, 0x1f, 0x924},
+	{ 0x2b1000, 1, 0x1f, 0x924},
+	{ 0x2b1040, 1, 0x1e, 0x924},
+	{ 0x2b1400, 1, 0x1f, 0x924},
+	{ 0x2b1440, 1, 0x1e, 0x924},
+	{ 0x2b1480, 1, 0x1e, 0x924},
+	{ 0x2b14c0, 1, 0x1e, 0x924},
+	{ 0x2b1800, 128, 0x1f, 0x80},
+	{ 0x2b1c00, 128, 0x1f, 0x80},
+	{ 0x2b2000, 1, 0x1f, 0xdb6},
+	{ 0x2b2400, 1, 0x1e, 0x964},
+	{ 0x2b2404, 5631, 0x1c, 0x40},
+	{ 0x2b8000, 1, 0x1f, 0xfff},
+	{ 0x2b8040, 1, 0x1f, 0xfff},
+	{ 0x2b8080, 1, 0x1f, 0xfff},
+	{ 0x2b80c0, 1, 0x1f, 0x924},
+	{ 0x2b8100, 1, 0x1f, 0x924},
+	{ 0x2b8140, 1, 0x1f, 0x924},
+	{ 0x2b8180, 1, 0x1f, 0x924},
+	{ 0x2b81c0, 1, 0x1f, 0x924},
+	{ 0x2b8200, 1, 0x1f, 0x924},
+	{ 0x2b8240, 1, 0x1f, 0x924},
+	{ 0x2b8280, 1, 0x1f, 0x924},
+	{ 0x2b82c0, 1, 0x1f, 0x924},
+	{ 0x2b8300, 1, 0x1f, 0x924},
+	{ 0x2b8340, 1, 0x1f, 0x924},
+	{ 0x2b8380, 1, 0x1f, 0x924},
+	{ 0x2b83c0, 1, 0x1f, 0x924},
+	{ 0x2b8400, 1, 0x1f, 0x924},
+	{ 0x2b8440, 1, 0x1f, 0x924},
+	{ 0x2b8480, 1, 0x1f, 0x924},
+	{ 0x2b84c0, 1, 0x1f, 0x924},
+	{ 0x2b8500, 1, 0x1f, 0x924},
+	{ 0x2b8540, 1, 0x1f, 0x924},
+	{ 0x2b8580, 1, 0x1f, 0x924},
+	{ 0x2b85c0, 19, 0x1c, 0x924},
+	{ 0x2b8800, 1, 0x1f, 0x924},
+	{ 0x2b8840, 1, 0x1f, 0x924},
+	{ 0x2b8880, 1, 0x1f, 0x924},
+	{ 0x2b88c0, 1, 0x1f, 0x924},
+	{ 0x2b8900, 1, 0x1f, 0x924},
+	{ 0x2b8940, 1, 0x1f, 0x924},
+	{ 0x2b8980, 1, 0x1f, 0x924},
+	{ 0x2b89c0, 1, 0x1f, 0x924},
+	{ 0x2b8a00, 1, 0x1f, 0x9a4},
+	{ 0x2b8a40, 1, 0x1f, 0x924},
+	{ 0x2b8a80, 1, 0x1f, 0x492},
+	{ 0x2b8ac0, 1, 0x1f, 0x924},
+	{ 0x2b8b00, 1, 0x1f, 0x924},
+	{ 0x2b8b40, 1, 0x1f, 0x924},
+	{ 0x2b8b80, 1, 0x1f, 0x924},
+	{ 0x2b8bc0, 1, 0x1f, 0x924},
+	{ 0x2b8c00, 1, 0x1f, 0x924},
+	{ 0x2b8c40, 1, 0x1f, 0x924},
+	{ 0x2b8c80, 1, 0x1f, 0x924},
+	{ 0x2b8cc0, 1, 0x1f, 0x924},
+	{ 0x2b8cc4, 1, 0x1c, 0x924},
+	{ 0x2b8d00, 1, 0x1f, 0x924},
+	{ 0x2b8d40, 1, 0x1f, 0x924},
+	{ 0x2b8d80, 1, 0x1f, 0x924},
+	{ 0x2b8dc0, 1, 0x1f, 0x924},
+	{ 0x2b8e00, 1, 0x1f, 0x924},
+	{ 0x2b8e40, 1, 0x1f, 0x924},
+	{ 0x2b8e80, 1, 0x1f, 0x924},
+	{ 0x2b8e84, 1, 0x1c, 0x924},
+	{ 0x2b8ec0, 1, 0x1e, 0x924},
+	{ 0x2b8f00, 1, 0x1e, 0x924},
+	{ 0x2b8f40, 1, 0x1e, 0x924},
+	{ 0x2b8f80, 1, 0x1e, 0x924},
+	{ 0x2b8fc0, 1, 0x1e, 0x924},
+	{ 0x2b8fd4, 5, 0x1c, 0x924},
+	{ 0x2b8fe8, 2, 0x18, 0x924},
+	{ 0x2b9000, 1, 0x1c, 0x924},
+	{ 0x2b9040, 3, 0x1c, 0x924},
+	{ 0x2b905c, 1, 0x18, 0x924},
+	{ 0x2b9064, 1, 0x10, 0x924},
+	{ 0x2b9080, 10, 0x10, 0x924},
+	{ 0x2c0000, 2, 0x1f, 0x1fff},
+	{ 0x300000, 65, 0x1f, 0x924},
+	{ 0x300124, 2, 0x1f, 0x1fff},
+	{ 0x300130, 3, 0x1f, 0x1fff},
+	{ 0x300140, 1, 0x1f, 0x1fff},
+	{ 0x30014c, 2, 0x1e, 0x924},
+	{ 0x300200, 27, 0x1f, 0x924},
+	{ 0x30026c, 1, 0x1f, 0xfff},
+	{ 0x300270, 12, 0x1f, 0x924},
+	{ 0x3002a0, 1, 0x1f, 0xfff},
+	{ 0x3002a4, 17, 0x1f, 0x924},
+	{ 0x300340, 4, 0x1f, 0x924},
+	{ 0x300380, 1, 0x1c, 0x924},
+	{ 0x300388, 1, 0x1c, 0x924},
+	{ 0x300390, 1, 0x1c, 0x924},
+	{ 0x300398, 1, 0x1c, 0x924},
+	{ 0x3003a0, 1, 0x1c, 0x924},
+	{ 0x3003a8, 2, 0x1c, 0x924},
+	{ 0x302000, 4, 0x1f, 0xf24},
+	{ 0x302010, 2044, 0x1f, 0xe00},
+	{ 0x304000, 4, 0x18, 0x924},
+	{ 0x320000, 1, 0x1f, 0xb24},
+	{ 0x320004, 5631, 0x1f, 0x200},
+	{ 0x325800, 2560, 0x1e, 0x200},
+	{ 0x328000, 1, 0x1f, 0xb24},
+	{ 0x328004, 8191, 0x1e, 0x200},
+	{ 0x330000, 1, 0x1f, 0xb24},
+	{ 0x330004, 15, 0x2, 0x200},
+	{ 0x330040, 1, 0x1e, 0xb24},
+	{ 0x330044, 239, 0x2, 0x200},
+	{ 0x330400, 1, 0x1f, 0xb24},
+	{ 0x330404, 255, 0x2, 0x200},
+	{ 0x330800, 1, 0x1f, 0x924},
+	{ 0x330840, 1, 0x1e, 0x924},
+	{ 0x330c00, 1, 0x1f, 0x924},
+	{ 0x331000, 1, 0x1f, 0x924},
+	{ 0x331040, 1, 0x1e, 0x924},
+	{ 0x331400, 1, 0x1f, 0x924},
+	{ 0x331440, 1, 0x1e, 0x924},
+	{ 0x331480, 1, 0x1e, 0x924},
+	{ 0x3314c0, 1, 0x1e, 0x924},
+	{ 0x331800, 128, 0x1f, 0x400},
+	{ 0x331c00, 128, 0x1f, 0x400},
+	{ 0x332000, 1, 0x1f, 0xdb6},
+	{ 0x332400, 1, 0x1e, 0xb24},
+	{ 0x332404, 5631, 0x1c, 0x200},
+	{ 0x338000, 1, 0x1f, 0xfff},
+	{ 0x338040, 1, 0x1f, 0xfff},
+	{ 0x338080, 1, 0x1f, 0xfff},
+	{ 0x3380c0, 1, 0x1f, 0xfff},
+	{ 0x338100, 1, 0x1f, 0x924},
+	{ 0x338140, 1, 0x1f, 0x924},
+	{ 0x338180, 1, 0x1f, 0x924},
+	{ 0x3381c0, 1, 0x1f, 0x924},
+	{ 0x338200, 1, 0x1f, 0x924},
+	{ 0x338240, 1, 0x1f, 0x924},
+	{ 0x338280, 1, 0x1f, 0x924},
+	{ 0x3382c0, 1, 0x1f, 0x924},
+	{ 0x338300, 1, 0x1f, 0x924},
+	{ 0x338340, 1, 0x1f, 0x924},
+	{ 0x338380, 1, 0x1f, 0x924},
+	{ 0x3383c0, 1, 0x1f, 0x924},
+	{ 0x338400, 1, 0x1f, 0x924},
+	{ 0x338440, 1, 0x1f, 0x924},
+	{ 0x338480, 1, 0x1f, 0x924},
+	{ 0x3384c0, 1, 0x1f, 0x924},
+	{ 0x338500, 1, 0x1f, 0x924},
+	{ 0x338540, 1, 0x1f, 0x924},
+	{ 0x338580, 1, 0x1f, 0x924},
+	{ 0x3385c0, 19, 0x1c, 0x924},
+	{ 0x338800, 1, 0x1f, 0x924},
+	{ 0x338840, 1, 0x1f, 0x924},
+	{ 0x338880, 1, 0x1f, 0x924},
+	{ 0x3388c0, 1, 0x1f, 0x924},
+	{ 0x338900, 1, 0x1f, 0x924},
+	{ 0x338940, 1, 0x1f, 0x924},
+	{ 0x338980, 1, 0x1f, 0x924},
+	{ 0x3389c0, 1, 0x1f, 0x924},
+	{ 0x338a00, 1, 0x1f, 0xd24},
+	{ 0x338a40, 1, 0x1f, 0x924},
+	{ 0x338a80, 1, 0x1f, 0x492},
+	{ 0x338ac0, 1, 0x1f, 0x924},
+	{ 0x338b00, 1, 0x1f, 0x924},
+	{ 0x338b40, 1, 0x1f, 0x924},
+	{ 0x338b80, 1, 0x1f, 0x924},
+	{ 0x338bc0, 1, 0x1f, 0x924},
+	{ 0x338c00, 1, 0x1f, 0x924},
+	{ 0x338c40, 1, 0x1f, 0x924},
+	{ 0x338c80, 1, 0x1f, 0x924},
+	{ 0x338cc0, 1, 0x1f, 0x924},
+	{ 0x338cc4, 1, 0x1c, 0x924},
+	{ 0x338d00, 1, 0x1f, 0x924},
+	{ 0x338d40, 1, 0x1f, 0x924},
+	{ 0x338d80, 1, 0x1f, 0x924},
+	{ 0x338dc0, 1, 0x1f, 0x924},
+	{ 0x338e00, 1, 0x1f, 0x924},
+	{ 0x338e40, 1, 0x1f, 0x924},
+	{ 0x338e80, 1, 0x1f, 0x924},
+	{ 0x338e84, 1, 0x1c, 0x924},
+	{ 0x338ec0, 1, 0x1e, 0x924},
+	{ 0x338f00, 1, 0x1e, 0x924},
+	{ 0x338f40, 1, 0x1e, 0x924},
+	{ 0x338f80, 1, 0x1e, 0x924},
+	{ 0x338fc0, 1, 0x1e, 0x924},
+	{ 0x338fd4, 5, 0x1c, 0x924},
+	{ 0x338fe8, 2, 0x18, 0x924},
+	{ 0x339000, 1, 0x1c, 0x924},
+	{ 0x339040, 3, 0x1c, 0x924},
+	{ 0x33905c, 1, 0x18, 0x924},
+	{ 0x339064, 1, 0x10, 0x924},
+	{ 0x339080, 10, 0x10, 0x924},
+	{ 0x340000, 2, 0x1f, 0x924},
+	{ 0x3a0000, 40960, 0x1c, 0x1000}
+};
+
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
+
+static const struct reg_addr idle_reg_addrs[] = {
+	{ 0x2104, 1, 0x1f, 0xfff},
+	{ 0x2110, 2, 0x1f, 0xfff},
+	{ 0x211c, 8, 0x1f, 0xfff},
+	{ 0x2814, 1, 0x1f, 0xfff},
+	{ 0x281c, 2, 0x1f, 0xfff},
+	{ 0x2854, 1, 0x1f, 0xfff},
+	{ 0x285c, 1, 0x1f, 0xfff},
+	{ 0x3040, 1, 0x1f, 0xfff},
+	{ 0x9010, 7, 0x1c, 0xfff},
+	{ 0x9030, 1, 0x1c, 0xfff},
+	{ 0x9068, 16, 0x1c, 0xfff},
+	{ 0x9230, 2, 0x1c, 0xfff},
+	{ 0x9244, 1, 0x1c, 0xfff},
+	{ 0x9298, 1, 0x1c, 0xfff},
+	{ 0x92a8, 1, 0x1c, 0x1fff},
+	{ 0xa38c, 1, 0x1f, 0x1fff},
+	{ 0xa3c4, 1, 0x1e, 0xfff},
+	{ 0xa404, 1, 0x1f, 0xfff},
+	{ 0xa408, 2, 0x1f, 0x1fff},
+	{ 0xa42c, 12, 0x1f, 0xfff},
+	{ 0xa580, 1, 0x1f, 0x1fff},
+	{ 0xa590, 1, 0x1f, 0x1fff},
+	{ 0xa600, 5, 0x1e, 0xfff},
+	{ 0xa618, 1, 0x1e, 0xfff},
+	{ 0xa714, 1, 0x1c, 0xfff},
+	{ 0xa720, 1, 0x1c, 0xfff},
+	{ 0xa750, 1, 0x1c, 0xfff},
+	{ 0xc09c, 1, 0x3, 0xfff},
+	{ 0x103b0, 1, 0x1f, 0xfff},
+	{ 0x103c0, 1, 0x1f, 0xfff},
+	{ 0x103d0, 1, 0x3, 0x1fff},
+	{ 0x10418, 1, 0x1f, 0xfff},
+	{ 0x10420, 1, 0x1f, 0xfff},
+	{ 0x10428, 1, 0x1f, 0xfff},
+	{ 0x10460, 1, 0x1f, 0xfff},
+	{ 0x10474, 1, 0x1f, 0xfff},
+	{ 0x104e0, 1, 0x1f, 0xfff},
+	{ 0x104ec, 1, 0x1f, 0xfff},
+	{ 0x104f8, 1, 0x1f, 0xfff},
+	{ 0x10508, 1, 0x1f, 0xfff},
+	{ 0x10530, 1, 0x1f, 0xfff},
+	{ 0x10538, 1, 0x1f, 0xfff},
+	{ 0x10548, 1, 0x1f, 0xfff},
+	{ 0x10558, 1, 0x1f, 0xfff},
+	{ 0x182a8, 1, 0x1c, 0xfff},
+	{ 0x182b8, 1, 0x1c, 0xfff},
+	{ 0x18308, 1, 0x1c, 0xfff},
+	{ 0x18318, 1, 0x1c, 0xfff},
+	{ 0x18338, 1, 0x1c, 0xfff},
+	{ 0x18348, 1, 0x1c, 0xfff},
+	{ 0x183bc, 1, 0x1c, 0x1fff},
+	{ 0x183cc, 1, 0x1c, 0x1fff},
+	{ 0x18570, 1, 0x18, 0xfff},
+	{ 0x18578, 1, 0x18, 0xfff},
+	{ 0x1858c, 1, 0x18, 0xfff},
+	{ 0x18594, 1, 0x18, 0xfff},
+	{ 0x1862c, 4, 0x10, 0xfff},
+	{ 0x2021c, 11, 0x1f, 0xfff},
+	{ 0x202a8, 1, 0x1f, 0xfff},
+	{ 0x202b8, 1, 0x1f, 0x1fff},
+	{ 0x20404, 1, 0x1f, 0xfff},
+	{ 0x2040c, 2, 0x1f, 0xfff},
+	{ 0x2041c, 2, 0x1f, 0xfff},
+	{ 0x40154, 14, 0x1f, 0xfff},
+	{ 0x40198, 1, 0x1f, 0x1fff},
+	{ 0x404ac, 1, 0x1f, 0xfff},
+	{ 0x404bc, 1, 0x1f, 0x1fff},
+	{ 0x42290, 1, 0x1f, 0xfff},
+	{ 0x422a0, 1, 0x1f, 0xfff},
+	{ 0x422b0, 1, 0x1f, 0x1fff},
+	{ 0x42548, 1, 0x1f, 0xfff},
+	{ 0x42550, 1, 0x1f, 0xfff},
+	{ 0x42558, 1, 0x1f, 0xfff},
+	{ 0x50160, 8, 0x1f, 0xfff},
+	{ 0x501d0, 1, 0x1f, 0xfff},
+	{ 0x501e0, 1, 0x1f, 0x1fff},
+	{ 0x50204, 1, 0x1f, 0xfff},
+	{ 0x5020c, 2, 0x1f, 0xfff},
+	{ 0x5021c, 1, 0x1f, 0xfff},
+	{ 0x60090, 1, 0x1f, 0xfff},
+	{ 0x6011c, 1, 0x1f, 0xfff},
+	{ 0x6012c, 1, 0x1f, 0x1fff},
+	{ 0xc101c, 1, 0x1f, 0xfff},
+	{ 0xc102c, 1, 0x1f, 0x1fff},
+	{ 0xc2290, 1, 0x1f, 0xfff},
+	{ 0xc22a0, 1, 0x1f, 0xfff},
+	{ 0xc22b0, 1, 0x1f, 0x1fff},
+	{ 0xc2548, 1, 0x1f, 0xfff},
+	{ 0xc2550, 1, 0x1f, 0xfff},
+	{ 0xc2558, 1, 0x1f, 0xfff},
+	{ 0xc4294, 1, 0x1f, 0xfff},
+	{ 0xc42a4, 1, 0x1f, 0xfff},
+	{ 0xc42b4, 1, 0x1f, 0x1fff},
+	{ 0xc4550, 1, 0x1f, 0xfff},
+	{ 0xc4558, 1, 0x1f, 0xfff},
+	{ 0xc4560, 1, 0x1f, 0xfff},
+	{ 0xd016c, 8, 0x1f, 0xfff},
+	{ 0xd01d8, 1, 0x1f, 0xfff},
+	{ 0xd01e8, 1, 0x1f, 0x1fff},
+	{ 0xd0204, 1, 0x1f, 0xfff},
+	{ 0xd020c, 3, 0x1f, 0xfff},
+	{ 0xe0154, 8, 0x1f, 0xfff},
+	{ 0xe01c8, 1, 0x1f, 0xfff},
+	{ 0xe01d8, 1, 0x1f, 0x1fff},
+	{ 0xe0204, 1, 0x1f, 0xfff},
+	{ 0xe020c, 2, 0x1f, 0xfff},
+	{ 0xe021c, 2, 0x1f, 0xfff},
+	{ 0x101014, 1, 0x1f, 0xfff},
+	{ 0x101030, 1, 0x1f, 0xfff},
+	{ 0x101040, 1, 0x1f, 0x1fff},
+	{ 0x102058, 1, 0x1f, 0x1fff},
+	{ 0x102080, 16, 0x1f, 0xfff},
+	{ 0x103004, 2, 0x1f, 0xfff},
+	{ 0x103068, 1, 0x1f, 0xfff},
+	{ 0x103078, 1, 0x1f, 0xfff},
+	{ 0x103088, 1, 0x1f, 0x1fff},
+	{ 0x10309c, 2, 0x1e, 0xfff},
+	{ 0x1030b8, 2, 0x1c, 0xfff},
+	{ 0x1030cc, 1, 0x1c, 0xfff},
+	{ 0x1030e0, 1, 0x1c, 0xfff},
+	{ 0x104004, 1, 0x1f, 0xfff},
+	{ 0x104018, 1, 0x1f, 0xfff},
+	{ 0x104020, 1, 0x1f, 0xfff},
+	{ 0x10403c, 1, 0x1f, 0xfff},
+	{ 0x1040fc, 1, 0x1f, 0xfff},
+	{ 0x10410c, 1, 0x1f, 0x1fff},
+	{ 0x104400, 1, 0x1f, 0x1fff},
+	{ 0x104404, 63, 0x1f, 0xfff},
+	{ 0x104800, 1, 0x1f, 0x1fff},
+	{ 0x104804, 63, 0x1f, 0xfff},
+	{ 0x105000, 4, 0x1f, 0x1fff},
+	{ 0x105010, 252, 0x1f, 0xfff},
+	{ 0x108094, 1, 0x3, 0xfff},
+	{ 0x1201b0, 2, 0x1f, 0xfff},
+	{ 0x12032c, 1, 0x1f, 0xfff},
+	{ 0x12036c, 3, 0x1f, 0xfff},
+	{ 0x120408, 2, 0x1f, 0xfff},
+	{ 0x120414, 15, 0x1f, 0xfff},
+	{ 0x120478, 2, 0x1f, 0xfff},
+	{ 0x12052c, 1, 0x1f, 0xfff},
+	{ 0x120564, 3, 0x1f, 0xfff},
+	{ 0x12057c, 1, 0x1f, 0x1fff},
+	{ 0x12058c, 1, 0x1f, 0x1fff},
+	{ 0x120608, 1, 0x1e, 0xfff},
+	{ 0x120748, 1, 0x1c, 0xfff},
+	{ 0x120778, 2, 0x1c, 0xfff},
+	{ 0x120808, 3, 0x1f, 0xfff},
+	{ 0x120818, 1, 0x1f, 0xfff},
+	{ 0x120820, 1, 0x1f, 0xfff},
+	{ 0x120828, 1, 0x1f, 0xfff},
+	{ 0x120830, 1, 0x1f, 0xfff},
+	{ 0x120838, 1, 0x1f, 0xfff},
+	{ 0x120840, 1, 0x1f, 0xfff},
+	{ 0x120848, 1, 0x1f, 0xfff},
+	{ 0x120850, 1, 0x1f, 0xfff},
+	{ 0x120858, 1, 0x1f, 0xfff},
+	{ 0x120860, 1, 0x1f, 0xfff},
+	{ 0x120868, 1, 0x1f, 0xfff},
+	{ 0x120870, 1, 0x1f, 0xfff},
+	{ 0x120878, 1, 0x1f, 0xfff},
+	{ 0x120880, 1, 0x1f, 0xfff},
+	{ 0x120888, 1, 0x1f, 0xfff},
+	{ 0x120890, 1, 0x1f, 0xfff},
+	{ 0x120898, 1, 0x1f, 0xfff},
+	{ 0x1208a0, 1, 0x1f, 0xfff},
+	{ 0x1208a8, 1, 0x1f, 0xfff},
+	{ 0x1208b0, 1, 0x1f, 0xfff},
+	{ 0x1208b8, 1, 0x1f, 0xfff},
+	{ 0x1208c0, 1, 0x1f, 0xfff},
+	{ 0x1208c8, 1, 0x1f, 0xfff},
+	{ 0x1208d0, 1, 0x1f, 0xfff},
+	{ 0x1208d8, 1, 0x1f, 0xfff},
+	{ 0x1208e0, 1, 0x1f, 0xfff},
+	{ 0x1208e8, 1, 0x1f, 0xfff},
+	{ 0x1208f0, 1, 0x1f, 0xfff},
+	{ 0x1208f8, 1, 0x1f, 0xfff},
+	{ 0x120900, 1, 0x1f, 0xfff},
+	{ 0x120908, 1, 0x1f, 0xfff},
+	{ 0x130030, 1, 0x1c, 0xfff},
+	{ 0x13004c, 3, 0x1c, 0xfff},
+	{ 0x130064, 2, 0x1c, 0xfff},
+	{ 0x13009c, 1, 0x1c, 0x1fff},
+	{ 0x130130, 1, 0x1c, 0xfff},
+	{ 0x13016c, 1, 0x1c, 0xfff},
+	{ 0x130300, 1, 0x1c, 0xfff},
+	{ 0x130480, 1, 0x1c, 0xfff},
+	{ 0x14005c, 2, 0xf, 0xfff},
+	{ 0x1400d0, 2, 0xf, 0xfff},
+	{ 0x1400e0, 1, 0xf, 0xfff},
+	{ 0x1401c8, 1, 0xf, 0xfff},
+	{ 0x140200, 6, 0xf, 0xfff},
+	{ 0x140338, 7, 0x10, 0xfff},
+	{ 0x140370, 7, 0x10, 0xfff},
+	{ 0x15c1bc, 6, 0x10, 0xfff},
+	{ 0x15c230, 7, 0x10, 0xfff},
+	{ 0x16101c, 1, 0x1f, 0xfff},
+	{ 0x16102c, 1, 0x1f, 0x1fff},
+	{ 0x164014, 2, 0x1f, 0xfff},
+	{ 0x1640f0, 1, 0x1f, 0xfff},
+	{ 0x166290, 1, 0x1f, 0xfff},
+	{ 0x1662a0, 1, 0x1f, 0xfff},
+	{ 0x1662b0, 1, 0x1f, 0x1fff},
+	{ 0x166548, 1, 0x1f, 0xfff},
+	{ 0x166550, 1, 0x1f, 0xfff},
+	{ 0x166558, 1, 0x1f, 0xfff},
+	{ 0x168000, 1, 0x1f, 0xfff},
+	{ 0x168008, 1, 0x1f, 0xfff},
+	{ 0x168010, 1, 0x1f, 0xfff},
+	{ 0x168018, 1, 0x1f, 0xfff},
+	{ 0x168028, 2, 0x1f, 0xfff},
+	{ 0x168058, 9, 0x1f, 0xfff},
+	{ 0x168238, 1, 0x1f, 0xfff},
+	{ 0x1682d0, 7, 0x1f, 0xfff},
+	{ 0x168300, 2, 0x3, 0xfff},
+	{ 0x168308, 65, 0x1f, 0xfff},
+	{ 0x168410, 2, 0x1f, 0xfff},
+	{ 0x168438, 1, 0x1f, 0xfff},
+	{ 0x168448, 1, 0x1f, 0x1fff},
+	{ 0x168a00, 128, 0x1f, 0xfff},
+	{ 0x16e200, 128, 0x2, 0xfff},
+	{ 0x16e404, 2, 0x2, 0xfff},
+	{ 0x16e584, 64, 0x2, 0xfff},
+	{ 0x16e684, 2, 0x1e, 0xfff},
+	{ 0x16e68c, 4, 0x2, 0xfff},
+	{ 0x16e6fc, 4, 0x1c, 0xfff},
+	{ 0x16e7ac, 12, 0x10, 0xfff},
+	{ 0x1700a4, 1, 0x1f, 0xfff},
+	{ 0x1700ac, 2, 0x1f, 0xfff},
+	{ 0x1700c0, 1, 0x1f, 0xfff},
+	{ 0x170174, 1, 0x1f, 0xfff},
+	{ 0x170184, 1, 0x1f, 0x1fff},
+	{ 0x1800f4, 1, 0x1f, 0xfff},
+	{ 0x180104, 1, 0x1f, 0xfff},
+	{ 0x180114, 1, 0x1f, 0x1fff},
+	{ 0x180124, 1, 0x1f, 0x1fff},
+	{ 0x18026c, 1, 0x1f, 0xfff},
+	{ 0x1802a0, 1, 0x1f, 0xfff},
+	{ 0x1b8000, 1, 0x1f, 0xfff},
+	{ 0x1b8040, 1, 0x1f, 0xfff},
+	{ 0x1b8080, 1, 0x1f, 0xfff},
+	{ 0x1b80c0, 1, 0x1f, 0xfff},
+	{ 0x200104, 1, 0x1f, 0xfff},
+	{ 0x200114, 1, 0x1f, 0xfff},
+	{ 0x200124, 1, 0x1f, 0x1fff},
+	{ 0x200134, 1, 0x1f, 0x1fff},
+	{ 0x20026c, 1, 0x1f, 0xfff},
+	{ 0x2002a0, 1, 0x1f, 0xfff},
+	{ 0x238000, 1, 0x1f, 0xfff},
+	{ 0x238040, 1, 0x1f, 0xfff},
+	{ 0x238080, 1, 0x1f, 0xfff},
+	{ 0x2380c0, 1, 0x1f, 0xfff},
+	{ 0x280104, 1, 0x1f, 0xfff},
+	{ 0x280114, 1, 0x1f, 0xfff},
+	{ 0x280124, 1, 0x1f, 0x1fff},
+	{ 0x280134, 1, 0x1f, 0x1fff},
+	{ 0x28026c, 1, 0x1f, 0xfff},
+	{ 0x2802a0, 1, 0x1f, 0xfff},
+	{ 0x2b8000, 1, 0x1f, 0xfff},
+	{ 0x2b8040, 1, 0x1f, 0xfff},
+	{ 0x2b8080, 1, 0x1f, 0xfff},
+	{ 0x300104, 1, 0x1f, 0xfff},
+	{ 0x300114, 1, 0x1f, 0xfff},
+	{ 0x300124, 1, 0x1f, 0x1fff},
+	{ 0x300134, 1, 0x1f, 0x1fff},
+	{ 0x30026c, 1, 0x1f, 0xfff},
+	{ 0x3002a0, 1, 0x1f, 0xfff},
+	{ 0x338000, 1, 0x1f, 0xfff},
+	{ 0x338040, 1, 0x1f, 0xfff},
+	{ 0x338080, 1, 0x1f, 0xfff},
+	{ 0x3380c0, 1, 0x1f, 0xfff}
+};
+
+#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs)
+
+static const u32 read_reg_e1[] = {
+	0x1b1000};
+
+static const struct wreg_addr wreg_addr_e1 = {
+	0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff};
+
+static const u32 read_reg_e1h[] = {
+	0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e1h = {
+	0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff};
+
+static const u32 read_reg_e2[] = {
+	0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e2 = {
+	0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3[] = {
+	0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3 = {
+	0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3b0[] = {
+	0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3b0 = {
+	0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
+
+static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
+	{19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788,
+	 25223, 34631, 19074},
+	{31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780,
+	 25953, 39933, 35895},
+	{36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
+	 25608, 41377, 43903},
+	{45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
+	 25616, 42067, 43903},
+	{45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332,
+	 25679, 42482, 43903}
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 0000000..d84efcd
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,3669 @@
+/* bnx2x_ethtool.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
+
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN	4
+static const struct {
+	long offset;
+	int size;
+	char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */	{ Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+	{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+						8, "[%s]: rx_ucast_packets" },
+	{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+						8, "[%s]: rx_mcast_packets" },
+	{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+						8, "[%s]: rx_bcast_packets" },
+	{ Q_STATS_OFFSET32(no_buff_discard_hi),	8, "[%s]: rx_discards" },
+	{ Q_STATS_OFFSET32(rx_err_discard_pkt),
+					 4, "[%s]: rx_phy_ip_err_discards"},
+	{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
+					 4, "[%s]: rx_skb_alloc_discard" },
+	{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+
+	{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+						8, "[%s]: tx_ucast_packets" },
+	{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+						8, "[%s]: tx_mcast_packets" },
+	{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+						8, "[%s]: tx_bcast_packets" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+						8, "[%s]: tpa_aggregations" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+					8, "[%s]: tpa_aggregated_frames"},
+	{ Q_STATS_OFFSET32(total_tpa_bytes_hi),	8, "[%s]: tpa_bytes"},
+	{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
+					4, "[%s]: driver_filtered_tx_pkt" }
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+	long offset;
+	int size;
+	u32 flags;
+#define STATS_FLAGS_PORT		1
+#define STATS_FLAGS_FUNC		2
+#define STATS_FLAGS_BOTH		(STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+	char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */	{ STATS_OFFSET32(total_bytes_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_bytes" },
+	{ STATS_OFFSET32(error_bytes_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+	{ STATS_OFFSET32(total_unicast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+	{ STATS_OFFSET32(total_multicast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+	{ STATS_OFFSET32(total_broadcast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+	{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+				8, STATS_FLAGS_PORT, "rx_crc_errors" },
+	{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+				8, STATS_FLAGS_PORT, "rx_align_errors" },
+	{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+				8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+	{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+				8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+				8, STATS_FLAGS_PORT, "rx_fragments" },
+	{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+				8, STATS_FLAGS_PORT, "rx_jabbers" },
+	{ STATS_OFFSET32(no_buff_discard_hi),
+				8, STATS_FLAGS_BOTH, "rx_discards" },
+	{ STATS_OFFSET32(mac_filter_discard),
+				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+	{ STATS_OFFSET32(mf_tag_discard),
+				4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+	{ STATS_OFFSET32(pfc_frames_received_hi),
+				8, STATS_FLAGS_PORT, "pfc_frames_received" },
+	{ STATS_OFFSET32(pfc_frames_sent_hi),
+				8, STATS_FLAGS_PORT, "pfc_frames_sent" },
+	{ STATS_OFFSET32(brb_drop_hi),
+				8, STATS_FLAGS_PORT, "rx_brb_discard" },
+	{ STATS_OFFSET32(brb_truncate_hi),
+				8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+	{ STATS_OFFSET32(pause_frames_received_hi),
+				8, STATS_FLAGS_PORT, "rx_pause_frames" },
+	{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+				8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+	{ STATS_OFFSET32(nig_timer_max),
+			4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+				4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+	{ STATS_OFFSET32(rx_skb_alloc_failed),
+				4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+	{ STATS_OFFSET32(hw_csum_err),
+				4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+	{ STATS_OFFSET32(total_bytes_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_bytes" },
+	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+				8, STATS_FLAGS_PORT, "tx_error_bytes" },
+	{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+	{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+	{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+	{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+				8, STATS_FLAGS_PORT, "tx_mac_errors" },
+	{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+				8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+				8, STATS_FLAGS_PORT, "tx_single_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+				8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+				8, STATS_FLAGS_PORT, "tx_deferred" },
+	{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_late_collisions" },
+	{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_total_collisions" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+				8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+			8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+			8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+			8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+			8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+	{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+			8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+	{ STATS_OFFSET32(pause_frames_sent_hi),
+				8, STATS_FLAGS_PORT, "tx_pause_frames" },
+	{ STATS_OFFSET32(total_tpa_aggregations_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+	{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+	{ STATS_OFFSET32(total_tpa_bytes_hi),
+			8, STATS_FLAGS_FUNC, "tpa_bytes"},
+	{ STATS_OFFSET32(recoverable_error),
+			4, STATS_FLAGS_FUNC, "recoverable_errors" },
+	{ STATS_OFFSET32(unrecoverable_error),
+			4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+	{ STATS_OFFSET32(driver_filtered_tx_pkt),
+			4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" },
+	{ STATS_OFFSET32(eee_tx_lpi),
+			4, STATS_FLAGS_PORT, "Tx LPI entry count"}
+};
+
+#define BNX2X_NUM_STATS		ARRAY_SIZE(bnx2x_stats_arr)
+
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+	int port_type;
+	u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+	switch (bp->link_params.phy[phy_idx].media_type) {
+	case ETH_PHY_SFPP_10G_FIBER:
+	case ETH_PHY_SFP_1G_FIBER:
+	case ETH_PHY_XFP_FIBER:
+	case ETH_PHY_KR:
+	case ETH_PHY_CX4:
+		port_type = PORT_FIBRE;
+		break;
+	case ETH_PHY_DA_TWINAX:
+		port_type = PORT_DA;
+		break;
+	case ETH_PHY_BASE_T:
+		port_type = PORT_TP;
+		break;
+	case ETH_PHY_NOT_PRESENT:
+		port_type = PORT_NONE;
+		break;
+	case ETH_PHY_UNSPECIFIED:
+	default:
+		port_type = PORT_OTHER;
+		break;
+	}
+	return port_type;
+}
+
+static int bnx2x_get_vf_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->state == BNX2X_STATE_OPEN) {
+		if (test_bit(BNX2X_LINK_REPORT_FD,
+			     &bp->vf_link_vars.link_report_flags))
+			cmd->duplex = DUPLEX_FULL;
+		else
+			cmd->duplex = DUPLEX_HALF;
+
+		ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+	} else {
+		cmd->duplex = DUPLEX_UNKNOWN;
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+	}
+
+	cmd->port		= PORT_OTHER;
+	cmd->phy_address	= 0;
+	cmd->transceiver	= XCVR_INTERNAL;
+	cmd->autoneg		= AUTONEG_DISABLE;
+	cmd->maxtxpkt		= 0;
+	cmd->maxrxpkt		= 0;
+
+	DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+	   "  supported 0x%x  advertising 0x%x  speed %u\n"
+	   "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising,
+	   ethtool_cmd_speed(cmd),
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	return 0;
+}
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	u32 media_type;
+
+	/* Dual Media boards present all available port types */
+	cmd->supported = bp->port.supported[cfg_idx] |
+		(bp->port.supported[cfg_idx ^ 1] &
+		 (SUPPORTED_TP | SUPPORTED_FIBRE));
+	cmd->advertising = bp->port.advertising[cfg_idx];
+	media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
+	if (media_type == ETH_PHY_SFP_1G_FIBER) {
+		cmd->supported &= ~(SUPPORTED_10000baseT_Full);
+		cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+	}
+
+	if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
+	    !(bp->flags & MF_FUNC_DIS)) {
+		cmd->duplex = bp->link_vars.duplex;
+
+		if (IS_MF(bp) && !BP_NOMCP(bp))
+			ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+		else
+			ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+	} else {
+		cmd->duplex = DUPLEX_UNKNOWN;
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+	}
+
+	cmd->port = bnx2x_get_port_type(bp);
+
+	cmd->phy_address = bp->mdio.prtad;
+	cmd->transceiver = XCVR_INTERNAL;
+
+	if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
+		cmd->autoneg = AUTONEG_ENABLE;
+	else
+		cmd->autoneg = AUTONEG_DISABLE;
+
+	/* Publish LP advertised speeds and FC */
+	if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+		u32 status = bp->link_vars.link_status;
+
+		cmd->lp_advertising |= ADVERTISED_Autoneg;
+		if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
+			cmd->lp_advertising |= ADVERTISED_Pause;
+		if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+			cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+
+		if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+		if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+		if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+		if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+		if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+		if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
+			if (media_type == ETH_PHY_KR) {
+				cmd->lp_advertising |=
+					ADVERTISED_1000baseKX_Full;
+			} else {
+				cmd->lp_advertising |=
+					ADVERTISED_1000baseT_Full;
+			}
+		}
+		if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+		if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
+			if (media_type == ETH_PHY_KR) {
+				cmd->lp_advertising |=
+					ADVERTISED_10000baseKR_Full;
+			} else {
+				cmd->lp_advertising |=
+					ADVERTISED_10000baseT_Full;
+			}
+		}
+		if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+			cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
+	}
+
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+
+	DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+	   "  supported 0x%x  advertising 0x%x  speed %u\n"
+	   "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising,
+	   ethtool_cmd_speed(cmd),
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+	u32 speed, phy_idx;
+
+	if (IS_MF_SD(bp))
+		return 0;
+
+	DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+	   "  supported 0x%x  advertising 0x%x  speed %u\n"
+	   "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising,
+	   ethtool_cmd_speed(cmd),
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	speed = ethtool_cmd_speed(cmd);
+
+	/* If received a request for an unknown duplex, assume full*/
+	if (cmd->duplex == DUPLEX_UNKNOWN)
+		cmd->duplex = DUPLEX_FULL;
+
+	if (IS_MF_SI(bp)) {
+		u32 part;
+		u32 line_speed = bp->link_vars.line_speed;
+
+		/* use 10G if no link detected */
+		if (!line_speed)
+			line_speed = 10000;
+
+		if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "To set speed BC %X or higher is required, please upgrade BC\n",
+			   REQ_BC_VER_4_SET_MF_BW);
+			return -EINVAL;
+		}
+
+		part = (speed * 100) / line_speed;
+
+		if (line_speed < speed || !part) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
+			return -EINVAL;
+		}
+
+		if (bp->state != BNX2X_STATE_OPEN)
+			/* store value for following "load" */
+			bp->pending_max = part;
+		else
+			bnx2x_update_max_mf_config(bp, part);
+
+		return 0;
+	}
+
+	cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	old_multi_phy_config = bp->link_params.multi_phy_config;
+	if (cmd->port != bnx2x_get_port_type(bp)) {
+		switch (cmd->port) {
+		case PORT_TP:
+			if (!(bp->port.supported[0] & SUPPORTED_TP ||
+			      bp->port.supported[1] & SUPPORTED_TP)) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "Unsupported port type\n");
+				return -EINVAL;
+			}
+			bp->link_params.multi_phy_config &=
+				~PORT_HW_CFG_PHY_SELECTION_MASK;
+			if (bp->link_params.multi_phy_config &
+			    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+			else
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+			break;
+		case PORT_FIBRE:
+		case PORT_DA:
+		case PORT_NONE:
+			if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+			      bp->port.supported[1] & SUPPORTED_FIBRE)) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "Unsupported port type\n");
+				return -EINVAL;
+			}
+			bp->link_params.multi_phy_config &=
+				~PORT_HW_CFG_PHY_SELECTION_MASK;
+			if (bp->link_params.multi_phy_config &
+			    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+			else
+				bp->link_params.multi_phy_config |=
+				PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+			break;
+		default:
+			DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
+			return -EINVAL;
+		}
+	}
+	/* Save new config in case command complete successfully */
+	new_multi_phy_config = bp->link_params.multi_phy_config;
+	/* Get the new cfg_idx */
+	cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	/* Restore old config in case command failed */
+	bp->link_params.multi_phy_config = old_multi_phy_config;
+	DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		u32 an_supported_speed = bp->port.supported[cfg_idx];
+		if (bp->link_params.phy[EXT_PHY1].type ==
+		    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+			an_supported_speed |= (SUPPORTED_100baseT_Half |
+					       SUPPORTED_100baseT_Full);
+		if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+			DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
+			return -EINVAL;
+		}
+
+		/* advertise the requested speed and duplex if supported */
+		if (cmd->advertising & ~an_supported_speed) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Advertisement parameters are not supported\n");
+			return -EINVAL;
+		}
+
+		bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
+		bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+		bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
+					 cmd->advertising);
+		if (cmd->advertising) {
+
+			bp->link_params.speed_cap_mask[cfg_idx] = 0;
+			if (cmd->advertising & ADVERTISED_10baseT_Half) {
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+			}
+			if (cmd->advertising & ADVERTISED_10baseT_Full)
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+
+			if (cmd->advertising & ADVERTISED_100baseT_Full)
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+			if (cmd->advertising & ADVERTISED_100baseT_Half) {
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+				     PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+			}
+			if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+			}
+			if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+						ADVERTISED_1000baseKX_Full))
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+			if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+						ADVERTISED_10000baseKX4_Full |
+						ADVERTISED_10000baseKR_Full))
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+			if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+				bp->link_params.speed_cap_mask[cfg_idx] |=
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
+		}
+	} else { /* forced speed */
+		/* advertise the requested speed and duplex if supported */
+		switch (speed) {
+		case SPEED_10:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->port.supported[cfg_idx] &
+				      SUPPORTED_10baseT_Full)) {
+					DP(BNX2X_MSG_ETHTOOL,
+					   "10M full not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_10baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->port.supported[cfg_idx] &
+				      SUPPORTED_10baseT_Half)) {
+					DP(BNX2X_MSG_ETHTOOL,
+					   "10M half not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_10baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_100:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->port.supported[cfg_idx] &
+						SUPPORTED_100baseT_Full)) {
+					DP(BNX2X_MSG_ETHTOOL,
+					   "100M full not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_100baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->port.supported[cfg_idx] &
+						SUPPORTED_100baseT_Half)) {
+					DP(BNX2X_MSG_ETHTOOL,
+					   "100M half not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_100baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_1000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "1G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (bp->port.supported[cfg_idx] &
+			     SUPPORTED_1000baseT_Full) {
+				advertising = (ADVERTISED_1000baseT_Full |
+					       ADVERTISED_TP);
+
+			} else if (bp->port.supported[cfg_idx] &
+				   SUPPORTED_1000baseKX_Full) {
+				advertising = ADVERTISED_1000baseKX_Full;
+			} else {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "1G full not supported\n");
+				return -EINVAL;
+			}
+
+			break;
+
+		case SPEED_2500:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "2.5G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported[cfg_idx]
+			      & SUPPORTED_2500baseX_Full)) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "2.5G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_2500baseX_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_10000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "10G half not supported\n");
+				return -EINVAL;
+			}
+			phy_idx = bnx2x_get_cur_phy_idx(bp);
+			if ((bp->port.supported[cfg_idx] &
+			     SUPPORTED_10000baseT_Full) &&
+			    (bp->link_params.phy[phy_idx].media_type !=
+			     ETH_PHY_SFP_1G_FIBER)) {
+				advertising = (ADVERTISED_10000baseT_Full |
+					       ADVERTISED_FIBRE);
+			} else if (bp->port.supported[cfg_idx] &
+			       SUPPORTED_10000baseKR_Full) {
+				advertising = (ADVERTISED_10000baseKR_Full |
+					       ADVERTISED_FIBRE);
+			} else {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "10G full not supported\n");
+				return -EINVAL;
+			}
+
+			break;
+
+		default:
+			DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
+			return -EINVAL;
+		}
+
+		bp->link_params.req_line_speed[cfg_idx] = speed;
+		bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+		bp->port.advertising[cfg_idx] = advertising;
+	}
+
+	DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
+	   "  req_duplex %d  advertising 0x%x\n",
+	   bp->link_params.req_line_speed[cfg_idx],
+	   bp->link_params.req_duplex[cfg_idx],
+	   bp->port.advertising[cfg_idx]);
+
+	/* Set new config */
+	bp->link_params.multi_phy_config = new_multi_phy_config;
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_force_link_reset(bp);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+#define DUMP_ALL_PRESETS		0x1FFF
+#define DUMP_MAX_PRESETS		13
+
+static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
+{
+	if (CHIP_IS_E1(bp))
+		return dump_num_registers[0][preset-1];
+	else if (CHIP_IS_E1H(bp))
+		return dump_num_registers[1][preset-1];
+	else if (CHIP_IS_E2(bp))
+		return dump_num_registers[2][preset-1];
+	else if (CHIP_IS_E3A0(bp))
+		return dump_num_registers[3][preset-1];
+	else if (CHIP_IS_E3B0(bp))
+		return dump_num_registers[4][preset-1];
+	else
+		return 0;
+}
+
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+	u32 preset_idx;
+	int regdump_len = 0;
+
+	/* Calculate the total preset regs length */
+	for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
+		regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
+
+	return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int regdump_len = 0;
+
+	if (IS_VF(bp))
+		return 0;
+
+	regdump_len = __bnx2x_get_regs_len(bp);
+	regdump_len *= 4;
+	regdump_len += sizeof(struct dump_header);
+
+	return regdump_len;
+}
+
+#define IS_E1_REG(chips)	((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
+#define IS_E1H_REG(chips)	((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
+#define IS_E2_REG(chips)	((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
+#define IS_E3A0_REG(chips)	((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
+#define IS_E3B0_REG(chips)	((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
+
+#define IS_REG_IN_PRESET(presets, idx)  \
+		((presets & (1 << (idx-1))) == (1 << (idx-1)))
+
+/******* Paged registers info selectors ********/
+static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return page_vals_e2;
+	else if (CHIP_IS_E3(bp))
+		return page_vals_e3;
+	else
+		return NULL;
+}
+
+static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return PAGE_MODE_VALUES_E2;
+	else if (CHIP_IS_E3(bp))
+		return PAGE_MODE_VALUES_E3;
+	else
+		return 0;
+}
+
+static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return page_write_regs_e2;
+	else if (CHIP_IS_E3(bp))
+		return page_write_regs_e3;
+	else
+		return NULL;
+}
+
+static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return PAGE_WRITE_REGS_E2;
+	else if (CHIP_IS_E3(bp))
+		return PAGE_WRITE_REGS_E3;
+	else
+		return 0;
+}
+
+static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return page_read_regs_e2;
+	else if (CHIP_IS_E3(bp))
+		return page_read_regs_e3;
+	else
+		return NULL;
+}
+
+static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return PAGE_READ_REGS_E2;
+	else if (CHIP_IS_E3(bp))
+		return PAGE_READ_REGS_E3;
+	else
+		return 0;
+}
+
+static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
+				       const struct reg_addr *reg_info)
+{
+	if (CHIP_IS_E1(bp))
+		return IS_E1_REG(reg_info->chips);
+	else if (CHIP_IS_E1H(bp))
+		return IS_E1H_REG(reg_info->chips);
+	else if (CHIP_IS_E2(bp))
+		return IS_E2_REG(reg_info->chips);
+	else if (CHIP_IS_E3A0(bp))
+		return IS_E3A0_REG(reg_info->chips);
+	else if (CHIP_IS_E3B0(bp))
+		return IS_E3B0_REG(reg_info->chips);
+	else
+		return false;
+}
+
+static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
+	const struct wreg_addr *wreg_info)
+{
+	if (CHIP_IS_E1(bp))
+		return IS_E1_REG(wreg_info->chips);
+	else if (CHIP_IS_E1H(bp))
+		return IS_E1H_REG(wreg_info->chips);
+	else if (CHIP_IS_E2(bp))
+		return IS_E2_REG(wreg_info->chips);
+	else if (CHIP_IS_E3A0(bp))
+		return IS_E3A0_REG(wreg_info->chips);
+	else if (CHIP_IS_E3B0(bp))
+		return IS_E3B0_REG(wreg_info->chips);
+	else
+		return false;
+}
+
+/**
+ * bnx2x_read_pages_regs - read "paged" registers
+ *
+ * @bp		device handle
+ * @p		output buffer
+ *
+ * Reads "paged" memories: memories that may only be read by first writing to a
+ * specific address ("write address") and then reading from a specific address
+ * ("read address"). There may be more than one write address per "page" and
+ * more than one read address per write address.
+ */
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
+{
+	u32 i, j, k, n;
+
+	/* addresses of the paged registers */
+	const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
+	/* number of paged registers */
+	int num_pages = __bnx2x_get_page_reg_num(bp);
+	/* write addresses */
+	const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
+	/* number of write addresses */
+	int write_num = __bnx2x_get_page_write_num(bp);
+	/* read addresses info */
+	const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
+	/* number of read addresses */
+	int read_num = __bnx2x_get_page_read_num(bp);
+	u32 addr, size;
+
+	for (i = 0; i < num_pages; i++) {
+		for (j = 0; j < write_num; j++) {
+			REG_WR(bp, write_addr[j], page_addr[i]);
+
+			for (k = 0; k < read_num; k++) {
+				if (IS_REG_IN_PRESET(read_addr[k].presets,
+						     preset)) {
+					size = read_addr[k].size;
+					for (n = 0; n < size; n++) {
+						addr = read_addr[k].addr + n*4;
+						*p++ = REG_RD(bp, addr);
+					}
+				}
+			}
+		}
+	}
+}
+
+static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
+{
+	u32 i, j, addr;
+	const struct wreg_addr *wreg_addr_p = NULL;
+
+	if (CHIP_IS_E1(bp))
+		wreg_addr_p = &wreg_addr_e1;
+	else if (CHIP_IS_E1H(bp))
+		wreg_addr_p = &wreg_addr_e1h;
+	else if (CHIP_IS_E2(bp))
+		wreg_addr_p = &wreg_addr_e2;
+	else if (CHIP_IS_E3A0(bp))
+		wreg_addr_p = &wreg_addr_e3;
+	else if (CHIP_IS_E3B0(bp))
+		wreg_addr_p = &wreg_addr_e3b0;
+
+	/* Read the idle_chk registers */
+	for (i = 0; i < IDLE_REGS_COUNT; i++) {
+		if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
+		    IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
+			for (j = 0; j < idle_reg_addrs[i].size; j++)
+				*p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
+		}
+	}
+
+	/* Read the regular registers */
+	for (i = 0; i < REGS_COUNT; i++) {
+		if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
+		    IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
+			for (j = 0; j < reg_addrs[i].size; j++)
+				*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+		}
+	}
+
+	/* Read the CAM registers */
+	if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
+	    IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
+		for (i = 0; i < wreg_addr_p->size; i++) {
+			*p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
+
+			/* In case of wreg_addr register, read additional
+			   registers from read_regs array
+			*/
+			for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
+				addr = *(wreg_addr_p->read_regs);
+				*p++ = REG_RD(bp, addr + j*4);
+			}
+		}
+	}
+
+	/* Paged registers are supported in E2 & E3 only */
+	if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
+		/* Read "paged" registers */
+		bnx2x_read_pages_regs(bp, p, preset);
+	}
+
+	return 0;
+}
+
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+	u32 preset_idx;
+
+	/* Read all registers, by reading all preset registers */
+	for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
+		/* Skip presets with IOR */
+		if ((preset_idx == 2) ||
+		    (preset_idx == 5) ||
+		    (preset_idx == 8) ||
+		    (preset_idx == 11))
+			continue;
+		__bnx2x_get_preset_regs(bp, p, preset_idx);
+		p += __bnx2x_get_preset_regs_len(bp, preset_idx);
+	}
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+			   struct ethtool_regs *regs, void *_p)
+{
+	u32 *p = _p;
+	struct bnx2x *bp = netdev_priv(dev);
+	struct dump_header dump_hdr = {0};
+
+	regs->version = 2;
+	memset(p, 0, regs->len);
+
+	if (!netif_running(bp->dev))
+		return;
+
+	/* Disable parity attentions as long as following dump may
+	 * cause false alarms by reading never written registers. We
+	 * will re-enable parity attentions right after the dump.
+	 */
+
+	bnx2x_disable_blocks_parity(bp);
+
+	dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+	dump_hdr.preset = DUMP_ALL_PRESETS;
+	dump_hdr.version = BNX2X_DUMP_VERSION;
+
+	/* dump_meta_data presents OR of CHIP and PATH. */
+	if (CHIP_IS_E1(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+	} else if (CHIP_IS_E1H(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+	} else if (CHIP_IS_E2(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+		(BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+	} else if (CHIP_IS_E3A0(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+		(BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+	} else if (CHIP_IS_E3B0(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+		(BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+	}
+
+	memcpy(p, &dump_hdr, sizeof(struct dump_header));
+	p += dump_hdr.header_size + 1;
+
+	/* Actually read the registers */
+	__bnx2x_get_regs(bp, p);
+
+	/* Re-enable parity attentions */
+	bnx2x_clear_blocks_parity(bp);
+	bnx2x_enable_blocks_parity(bp);
+}
+
+static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int regdump_len = 0;
+
+	regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
+	regdump_len *= 4;
+	regdump_len += sizeof(struct dump_header);
+
+	return regdump_len;
+}
+
+static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Use the ethtool_dump "flag" field as the dump preset index */
+	if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
+		return -EINVAL;
+
+	bp->dump_preset_idx = val->flag;
+	return 0;
+}
+
+static int bnx2x_get_dump_flag(struct net_device *dev,
+			       struct ethtool_dump *dump)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	dump->version = BNX2X_DUMP_VERSION;
+	dump->flag = bp->dump_preset_idx;
+	/* Calculate the requested preset idx length */
+	dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
+	DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
+	   bp->dump_preset_idx, dump->len);
+	return 0;
+}
+
+static int bnx2x_get_dump_data(struct net_device *dev,
+			       struct ethtool_dump *dump,
+			       void *buffer)
+{
+	u32 *p = buffer;
+	struct bnx2x *bp = netdev_priv(dev);
+	struct dump_header dump_hdr = {0};
+
+	/* Disable parity attentions as long as following dump may
+	 * cause false alarms by reading never written registers. We
+	 * will re-enable parity attentions right after the dump.
+	 */
+
+	bnx2x_disable_blocks_parity(bp);
+
+	dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+	dump_hdr.preset = bp->dump_preset_idx;
+	dump_hdr.version = BNX2X_DUMP_VERSION;
+
+	DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
+
+	/* dump_meta_data presents OR of CHIP and PATH. */
+	if (CHIP_IS_E1(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+	} else if (CHIP_IS_E1H(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+	} else if (CHIP_IS_E2(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+		(BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+	} else if (CHIP_IS_E3A0(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+		(BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+	} else if (CHIP_IS_E3B0(bp)) {
+		dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+		(BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+	}
+
+	memcpy(p, &dump_hdr, sizeof(struct dump_header));
+	p += dump_hdr.header_size + 1;
+
+	/* Actually read the registers */
+	__bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
+
+	/* Re-enable parity attentions */
+	bnx2x_clear_blocks_parity(bp);
+	bnx2x_enable_blocks_parity(bp);
+
+	return 0;
+}
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+	bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+
+	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & NO_WOL_FLAG) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		wol->supported = WAKE_MAGIC;
+		if (bp->wol)
+			wol->wolopts = WAKE_MAGIC;
+		else
+			wol->wolopts = 0;
+	}
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (wol->wolopts & ~WAKE_MAGIC) {
+		DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
+		return -EINVAL;
+	}
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (bp->flags & NO_WOL_FLAG) {
+			DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
+			return -EINVAL;
+		}
+		bp->wol = 1;
+	} else
+		bp->wol = 0;
+
+	if (SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
+	return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (capable(CAP_NET_ADMIN)) {
+		/* dump MCP trace */
+		if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
+			bnx2x_fw_dump_lvl(bp, KERN_INFO);
+		bp->msg_enable = level;
+	}
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!bp->port.pmf)
+		return 0;
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_force_link_reset(bp);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
+		return 0;
+
+	if (IS_VF(bp))
+		return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+				 &bp->vf_link_vars.link_report_flags);
+
+	return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->common.flash_size;
+}
+
+/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
+ * had we done things the other way around, if two pfs from the same port would
+ * attempt to access nvram at the same time, we could run into a scenario such
+ * as:
+ * pf A takes the port lock.
+ * pf B succeeds in taking the same lock since they are from the same port.
+ * pf A takes the per pf misc lock. Performs eeprom access.
+ * pf A finishes. Unlocks the per pf misc lock.
+ * Pf B takes the lock and proceeds to perform it's own access.
+ * pf A unlocks the per port lock, while pf B is still working (!).
+ * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * access corrupted by pf B)
+ */
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int count, i;
+	u32 val;
+
+	/* acquire HW lock: protect against other PFs in PF Direct Assignment */
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* request access to nvram interface */
+	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+		if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+			break;
+
+		udelay(5);
+	}
+
+	if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot get access to nvram interface\n");
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int count, i;
+	u32 val;
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* relinquish nvram interface */
+	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+		if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+			break;
+
+		udelay(5);
+	}
+
+	if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot free access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	/* release HW lock: protect against other PFs in PF Direct Assignment */
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
+	return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* enable both bits, even on read */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val | MCPR_NVM_ACCESS_ENABLE_EN |
+		      MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* disable both bits, even after read */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+			MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+				  u32 cmd_flags)
+{
+	int count, i, rc;
+	u32 val;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+	/* address of the NVRAM to read from */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue a read command */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* wait for completion */
+	*ret_val = 0;
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+			/* we read nvram data in cpu order
+			 * but ethtool sees it as an array of bytes
+			 * converting to big-endian will do the work
+			 */
+			*ret_val = cpu_to_be32(val);
+			rc = 0;
+			break;
+		}
+	}
+	if (rc == -EBUSY)
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "nvram read timeout expired\n");
+	return rc;
+}
+
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+		     int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	__be32 val;
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	/* read the first word(s) */
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((buf_size > sizeof(u32)) && (rc == 0)) {
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		ret_buf += sizeof(u32);
+		buf_size -= sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	if (rc == 0) {
+		cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
+			      int buf_size)
+{
+	int rc;
+
+	rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
+
+	if (!rc) {
+		__be32 *be = (__be32 *)buf;
+
+		while ((buf_size -= 4) >= 0)
+			*buf++ = be32_to_cpu(*be++);
+	}
+
+	return rc;
+}
+
+static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
+{
+	int rc = 1;
+	u16 pm = 0;
+	struct net_device *dev = pci_get_drvdata(bp->pdev);
+
+	if (bp->pdev->pm_cap)
+		rc = pci_read_config_word(bp->pdev,
+					  bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
+
+	if ((rc && !netif_running(dev)) ||
+	    (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
+		return false;
+
+	return true;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL  | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return -EAGAIN;
+	}
+
+	DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_get_eeprom */
+
+	return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+}
+
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+				   struct ethtool_eeprom *ee,
+				   u8 *data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = -EINVAL, phy_idx;
+	u8 *user_data = data;
+	unsigned int start_addr = ee->offset, xfer_size = 0;
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return -EAGAIN;
+	}
+
+	phy_idx = bnx2x_get_cur_phy_idx(bp);
+
+	/* Read A0 section */
+	if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+		/* Limit transfer size to the A0 section boundary */
+		if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+			xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+		else
+			xfer_size = ee->len;
+		bnx2x_acquire_phy_lock(bp);
+		rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+						  &bp->link_params,
+						  I2C_DEV_ADDR_A0,
+						  start_addr,
+						  xfer_size,
+						  user_data);
+		bnx2x_release_phy_lock(bp);
+		if (rc) {
+			DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+			return -EINVAL;
+		}
+		user_data += xfer_size;
+		start_addr += xfer_size;
+	}
+
+	/* Read A2 section */
+	if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+	    (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+		xfer_size = ee->len - xfer_size;
+		/* Limit transfer size to the A2 section boundary */
+		if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+			xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+		start_addr -= ETH_MODULE_SFF_8079_LEN;
+		bnx2x_acquire_phy_lock(bp);
+		rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+						  &bp->link_params,
+						  I2C_DEV_ADDR_A2,
+						  start_addr,
+						  xfer_size,
+						  user_data);
+		bnx2x_release_phy_lock(bp);
+		if (rc) {
+			DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+				 struct ethtool_modinfo *modinfo)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int phy_idx, rc;
+	u8 sff8472_comp, diag_type;
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return -EAGAIN;
+	}
+	phy_idx = bnx2x_get_cur_phy_idx(bp);
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+					  &bp->link_params,
+					  I2C_DEV_ADDR_A0,
+					  SFP_EEPROM_SFF_8472_COMP_ADDR,
+					  SFP_EEPROM_SFF_8472_COMP_SIZE,
+					  &sff8472_comp);
+	bnx2x_release_phy_lock(bp);
+	if (rc) {
+		DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+					  &bp->link_params,
+					  I2C_DEV_ADDR_A0,
+					  SFP_EEPROM_DIAG_TYPE_ADDR,
+					  SFP_EEPROM_DIAG_TYPE_SIZE,
+					  &diag_type);
+	bnx2x_release_phy_lock(bp);
+	if (rc) {
+		DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+		return -EINVAL;
+	}
+
+	if (!sff8472_comp ||
+	    (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
+		modinfo->type = ETH_MODULE_SFF_8079;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+	} else {
+		modinfo->type = ETH_MODULE_SFF_8472;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+	}
+	return 0;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+				   u32 cmd_flags)
+{
+	int count, i, rc;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+	/* write the data */
+	REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+	/* address of the NVRAM to write to */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue the write command */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* wait for completion */
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			rc = 0;
+			break;
+		}
+	}
+
+	if (rc == -EBUSY)
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "nvram write timeout expired\n");
+	return rc;
+}
+
+#define BYTE_OFFSET(offset)		(8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			      int buf_size)
+{
+	int rc;
+	u32 cmd_flags, align_offset, val;
+	__be32 val_be;
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+	align_offset = (offset & ~0x03);
+	rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
+
+	if (rc == 0) {
+		/* nvram data is returned as an array of bytes
+		 * convert it back to cpu order
+		 */
+		val = be32_to_cpu(val_be);
+
+		val &= ~le32_to_cpu((__force __le32)
+				    (0xff << BYTE_OFFSET(offset)));
+		val |= le32_to_cpu((__force __le32)
+				   (*data_buf << BYTE_OFFSET(offset)));
+
+		rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+					     cmd_flags);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			     int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 val;
+	u32 written_so_far;
+
+	if (buf_size == 1)	/* ethtool */
+		return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	written_so_far = 0;
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((written_so_far < buf_size) && (rc == 0)) {
+		if (written_so_far == (buf_size - sizeof(u32)))
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+		memcpy(&val, data_buf, 4);
+
+		/* Notice unlike bnx2x_nvram_read_dword() this will not
+		 * change val using be32_to_cpu(), which causes data to flip
+		 * if the eeprom is read and then written back. This is due
+		 * to tools utilizing this functionality that would break
+		 * if this would be resolved.
+		 */
+		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		data_buf += sizeof(u32);
+		written_so_far += sizeof(u32);
+
+		/* At end of each 4Kb page, release nvram lock to allow MFW
+		 * chance to take it for its own use.
+		 */
+		if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+		    (written_so_far < buf_size)) {
+			DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+			   "Releasing NVM lock after offset 0x%x\n",
+			   (u32)(offset - sizeof(u32)));
+			bnx2x_release_nvram_lock(bp);
+			usleep_range(1000, 2000);
+			rc = bnx2x_acquire_nvram_lock(bp);
+			if (rc)
+				return rc;
+		}
+
+		cmd_flags = 0;
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int port = BP_PORT(bp);
+	int rc = 0;
+	u32 ext_phy_config;
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return -EAGAIN;
+	}
+
+	DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_set_eeprom */
+
+	/* PHY eeprom can be accessed only by the PMF */
+	if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+	    !bp->port.pmf) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "wrong magic or interface is not pmf\n");
+		return -EINVAL;
+	}
+
+	ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+
+	if (eeprom->magic == 0x50485950) {
+		/* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+		bnx2x_acquire_phy_lock(bp);
+		rc |= bnx2x_link_reset(&bp->link_params,
+				       &bp->link_vars, 0);
+		if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+					PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+				       MISC_REGISTERS_GPIO_HIGH, port);
+		bnx2x_release_phy_lock(bp);
+		bnx2x_link_report(bp);
+
+	} else if (eeprom->magic == 0x50485952) {
+		/* 'PHYR' (0x50485952): re-init link after FW upgrade */
+		if (bp->state == BNX2X_STATE_OPEN) {
+			bnx2x_acquire_phy_lock(bp);
+			rc |= bnx2x_link_reset(&bp->link_params,
+					       &bp->link_vars, 1);
+
+			rc |= bnx2x_phy_init(&bp->link_params,
+					     &bp->link_vars);
+			bnx2x_release_phy_lock(bp);
+			bnx2x_calc_fc_adv(bp);
+		}
+	} else if (eeprom->magic == 0x53985943) {
+		/* 'PHYC' (0x53985943): PHY FW upgrade completed */
+		if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+
+			/* DSP Remove Download Mode */
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+				       MISC_REGISTERS_GPIO_LOW, port);
+
+			bnx2x_acquire_phy_lock(bp);
+
+			bnx2x_sfx7101_sp_sw_reset(bp,
+						&bp->link_params.phy[EXT_PHY1]);
+
+			/* wait 0.5 sec to allow it to run */
+			msleep(500);
+			bnx2x_ext_phy_hw_reset(bp, port);
+			msleep(500);
+			bnx2x_release_phy_lock(bp);
+		}
+	} else
+		rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int bnx2x_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+	coal->rx_coalesce_usecs = bp->rx_ticks;
+	coal->tx_coalesce_usecs = bp->tx_ticks;
+
+	return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+	if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+		bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+	bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+	if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+		bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+	if (netif_running(dev))
+		bnx2x_update_coalesce(bp);
+
+	return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = MAX_RX_AVAIL;
+
+	if (bp->rx_ring_size)
+		ering->rx_pending = bp->rx_ring_size;
+	else
+		ering->rx_pending = MAX_RX_AVAIL;
+
+	ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	DP(BNX2X_MSG_ETHTOOL,
+	   "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+	   ering->rx_pending, ering->tx_pending);
+
+	if (pci_num_vf(bp->pdev)) {
+		DP(BNX2X_MSG_IOV,
+		   "VFs are enabled, can not change ring parameters\n");
+		return -EPERM;
+	}
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		DP(BNX2X_MSG_ETHTOOL,
+		   "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	if ((ering->rx_pending > MAX_RX_AVAIL) ||
+	    (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+						    MIN_RX_SIZE_TPA)) ||
+	    (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
+		DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+		return -EINVAL;
+	}
+
+	bp->rx_ring_size = ering->rx_pending;
+	bp->tx_ring_size = ering->tx_pending;
+
+	return bnx2x_reload_if_running(dev);
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+				 struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	int cfg_reg;
+
+	epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+			   BNX2X_FLOW_CTRL_AUTO);
+
+	if (!epause->autoneg)
+		cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
+	else
+		cfg_reg = bp->link_params.req_fc_auto_adv;
+
+	epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
+			    BNX2X_FLOW_CTRL_RX);
+	epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
+			    BNX2X_FLOW_CTRL_TX);
+
+	DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
+	   "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	if (IS_MF(bp))
+		return 0;
+
+	DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
+	   "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+	bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
+
+	if (epause->rx_pause)
+		bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
+
+	if (epause->tx_pause)
+		bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
+
+	if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+		bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
+
+	if (epause->autoneg) {
+		if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+			DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
+			return -EINVAL;
+		}
+
+		if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+			bp->link_params.req_flow_ctrl[cfg_idx] =
+				BNX2X_FLOW_CTRL_AUTO;
+		}
+		bp->link_params.req_fc_auto_adv = 0;
+		if (epause->rx_pause)
+			bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
+
+		if (epause->tx_pause)
+			bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
+
+		if (!bp->link_params.req_fc_auto_adv)
+			bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
+	}
+
+	DP(BNX2X_MSG_ETHTOOL,
+	   "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_force_link_reset(bp);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
+	"register_test (offline)    ",
+	"memory_test (offline)      ",
+	"int_loopback_test (offline)",
+	"ext_loopback_test (offline)",
+	"nvram_test (online)        ",
+	"interrupt_test (online)    ",
+	"link_test (online)         "
+};
+
+enum {
+	BNX2X_PRI_FLAG_ISCSI,
+	BNX2X_PRI_FLAG_FCOE,
+	BNX2X_PRI_FLAG_STORAGE,
+	BNX2X_PRI_FLAG_LEN,
+};
+
+static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+	"iSCSI offload support",
+	"FCoE offload support",
+	"Storage only interface"
+};
+
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+	u32 modes = 0;
+
+	if (eee_adv & SHMEM_EEE_100M_ADV)
+		modes |= ADVERTISED_100baseT_Full;
+	if (eee_adv & SHMEM_EEE_1G_ADV)
+		modes |= ADVERTISED_1000baseT_Full;
+	if (eee_adv & SHMEM_EEE_10G_ADV)
+		modes |= ADVERTISED_10000baseT_Full;
+
+	return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+	u32 eee_adv = 0;
+	if (modes & ADVERTISED_100baseT_Full)
+		eee_adv |= SHMEM_EEE_100M_ADV;
+	if (modes & ADVERTISED_1000baseT_Full)
+		eee_adv |= SHMEM_EEE_1G_ADV;
+	if (modes & ADVERTISED_10000baseT_Full)
+		eee_adv |= SHMEM_EEE_10G_ADV;
+
+	return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 eee_cfg;
+
+	if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+		DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+		return -EOPNOTSUPP;
+	}
+
+	eee_cfg = bp->link_vars.eee_status;
+
+	edata->supported =
+		bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+				 SHMEM_EEE_SUPPORTED_SHIFT);
+
+	edata->advertised =
+		bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+				 SHMEM_EEE_ADV_STATUS_SHIFT);
+	edata->lp_advertised =
+		bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+				 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+	/* SHMEM value is in 16u units --> Convert to 1u units. */
+	edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+	edata->eee_enabled    = (eee_cfg & SHMEM_EEE_REQUESTED_BIT)	? 1 : 0;
+	edata->eee_active     = (eee_cfg & SHMEM_EEE_ACTIVE_BIT)	? 1 : 0;
+	edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+	return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 eee_cfg;
+	u32 advertised;
+
+	if (IS_MF(bp))
+		return 0;
+
+	if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+		DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+		return -EOPNOTSUPP;
+	}
+
+	eee_cfg = bp->link_vars.eee_status;
+
+	if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+		DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+		return -EOPNOTSUPP;
+	}
+
+	advertised = bnx2x_adv_to_eee(edata->advertised,
+				      SHMEM_EEE_ADV_STATUS_SHIFT);
+	if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+		DP(BNX2X_MSG_ETHTOOL,
+		   "Direct manipulation of EEE advertisement is not supported\n");
+		return -EINVAL;
+	}
+
+	if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+		DP(BNX2X_MSG_ETHTOOL,
+		   "Maximal Tx Lpi timer supported is %x(u)\n",
+		   EEE_MODE_TIMER_MASK);
+		return -EINVAL;
+	}
+	if (edata->tx_lpi_enabled &&
+	    (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+		DP(BNX2X_MSG_ETHTOOL,
+		   "Minimal Tx Lpi timer supported is %d(u)\n",
+		   EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+		return -EINVAL;
+	}
+
+	/* All is well; Apply changes*/
+	if (edata->eee_enabled)
+		bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+	else
+		bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+	if (edata->tx_lpi_enabled)
+		bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+	else
+		bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+	bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+	bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+				    EEE_MODE_TIMER_MASK) |
+				    EEE_MODE_OVERRIDE_NVRAM |
+				    EEE_MODE_OUTPUT_TIME;
+
+	/* Restart link to propagate changes */
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_force_link_reset(bp);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+enum {
+	BNX2X_CHIP_E1_OFST = 0,
+	BNX2X_CHIP_E1H_OFST,
+	BNX2X_CHIP_E2_OFST,
+	BNX2X_CHIP_E3_OFST,
+	BNX2X_CHIP_E3B0_OFST,
+	BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1	(1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H	(1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2	(1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3	(1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0	(1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL	((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X	(BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+	int idx, i, rc = -ENODEV;
+	u32 wr_val = 0, hw;
+	int port = BP_PORT(bp);
+	static const struct {
+		u32 hw;
+		u32 offset0;
+		u32 offset1;
+		u32 mask;
+	} reg_tbl[] = {
+/* 0 */		{ BNX2X_CHIP_MASK_ALL,
+			BRB1_REG_PAUSE_LOW_THRESHOLD_0,	4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			DORQ_REG_DB_ADDR0,		4, 0xffffffff },
+		{ BNX2X_CHIP_MASK_E1X,
+			HC_REG_AGG_INT_0,		4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PBF_REG_MAC_IF0_ENABLE,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+			PBF_REG_P0_INIT_CRD,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_E3B0,
+			PBF_REG_INIT_CRD_Q0,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PRS_REG_CID_PORT_0,		4, 0x00ffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_CDU0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TM0_L2P,		4, 0x000fffff },
+/* 10 */	{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TSDM0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			QM_REG_CONNNUM_0,		4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			TM_REG_LIN0_MAX_ACTIVE_CID,	4, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_0,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_7,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_CNT_CMD00,	4, 0x00000003 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_GLB_DEL_ACK_MAX_CNT_0,	4, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_T_BIT,		4, 0x00000001 },
+/* 20 */	{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_EMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_BMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_XCM0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_BRB0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_XCM_MASK,		4, 0x00000007 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_6_LEN,	68, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_0_CRC,	68, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_MAC_0_0,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_IP_0_1,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_IPV4_IPV6_0,	160, 0x00000001 },
+/* 30 */	{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_UDP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_TCP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_VLAN_ID_0,	160, 0x00000fff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS_SERDES0_MODE_SEL,	4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_STATUS_INTERRUPT_PORT0,	4, 0x07ffffff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_SERDES0_CTRL_PHY_ADDR,	16, 0x0000001f },
+
+		{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
+	};
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return rc;
+	}
+
+	if (CHIP_IS_E1(bp))
+		hw = BNX2X_CHIP_MASK_E1;
+	else if (CHIP_IS_E1H(bp))
+		hw = BNX2X_CHIP_MASK_E1H;
+	else if (CHIP_IS_E2(bp))
+		hw = BNX2X_CHIP_MASK_E2;
+	else if (CHIP_IS_E3B0(bp))
+		hw = BNX2X_CHIP_MASK_E3B0;
+	else /* e3 A0 */
+		hw = BNX2X_CHIP_MASK_E3;
+
+	/* Repeat the test twice:
+	 * First by writing 0x00000000, second by writing 0xffffffff
+	 */
+	for (idx = 0; idx < 2; idx++) {
+
+		switch (idx) {
+		case 0:
+			wr_val = 0;
+			break;
+		case 1:
+			wr_val = 0xffffffff;
+			break;
+		}
+
+		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+			u32 offset, mask, save_val, val;
+			if (!(hw & reg_tbl[i].hw))
+				continue;
+
+			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+			mask = reg_tbl[i].mask;
+
+			save_val = REG_RD(bp, offset);
+
+			REG_WR(bp, offset, wr_val & mask);
+
+			val = REG_RD(bp, offset);
+
+			/* Restore the original register's value */
+			REG_WR(bp, offset, save_val);
+
+			/* verify value is as expected */
+			if ((val & mask) != (wr_val & mask)) {
+				DP(BNX2X_MSG_ETHTOOL,
+				   "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+				   offset, val, wr_val, mask);
+				goto test_reg_exit;
+			}
+		}
+	}
+
+	rc = 0;
+
+test_reg_exit:
+	return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+	int i, j, rc = -ENODEV;
+	u32 val, index;
+	static const struct {
+		u32 offset;
+		int size;
+	} mem_tbl[] = {
+		{ CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
+		{ CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+		{ CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
+		{ DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
+		{ TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
+		{ UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
+		{ XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
+
+		{ 0xffffffff, 0 }
+	};
+
+	static const struct {
+		char *name;
+		u32 offset;
+		u32 hw_mask[BNX2X_CHIP_MAX_OFST];
+	} prty_tbl[] = {
+		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,
+			{0x2,     0x2, 0, 0} },
+		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+			{0,       0,   0, 0} },
+		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,
+			{0x3ffc1, 0,   0, 0} },
+
+		{ NULL, 0xffffffff, {0, 0, 0, 0} }
+	};
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return rc;
+	}
+
+	if (CHIP_IS_E1(bp))
+		index = BNX2X_CHIP_E1_OFST;
+	else if (CHIP_IS_E1H(bp))
+		index = BNX2X_CHIP_E1H_OFST;
+	else if (CHIP_IS_E2(bp))
+		index = BNX2X_CHIP_E2_OFST;
+	else /* e3 */
+		index = BNX2X_CHIP_E3_OFST;
+
+	/* pre-Check the parity status */
+	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+		val = REG_RD(bp, prty_tbl[i].offset);
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "%s is 0x%x\n", prty_tbl[i].name, val);
+			goto test_mem_exit;
+		}
+	}
+
+	/* Go through all the memories */
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+		for (j = 0; j < mem_tbl[i].size; j++)
+			REG_RD(bp, mem_tbl[i].offset + j*4);
+
+	/* Check the parity status */
+	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+		val = REG_RD(bp, prty_tbl[i].offset);
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "%s is 0x%x\n", prty_tbl[i].name, val);
+			goto test_mem_exit;
+		}
+	}
+
+	rc = 0;
+
+test_mem_exit:
+	return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
+{
+	int cnt = 1400;
+
+	if (link_up) {
+		while (bnx2x_link_test(bp, is_serdes) && cnt--)
+			msleep(20);
+
+		if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+			DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+		cnt = 1400;
+		while (!bp->link_vars.link_up && cnt--)
+			msleep(20);
+
+		if (cnt <= 0 && !bp->link_vars.link_up)
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Timeout waiting for link init\n");
+	}
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
+{
+	unsigned int pkt_size, num_pkts, i;
+	struct sk_buff *skb;
+	unsigned char *packet;
+	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+	struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
+	u16 tx_start_idx, tx_idx;
+	u16 rx_start_idx, rx_idx;
+	u16 pkt_prod, bd_prod;
+	struct sw_tx_bd *tx_buf;
+	struct eth_tx_start_bd *tx_start_bd;
+	dma_addr_t mapping;
+	union eth_rx_cqe *cqe;
+	u8 cqe_fp_flags, cqe_fp_type;
+	struct sw_rx_bd *rx_buf;
+	u16 len;
+	int rc = -ENODEV;
+	u8 *data;
+	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+						       txdata->txq_index);
+
+	/* check the loopback mode */
+	switch (loopback_mode) {
+	case BNX2X_PHY_LOOPBACK:
+		if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+			DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
+			return -EINVAL;
+		}
+		break;
+	case BNX2X_MAC_LOOPBACK:
+		if (CHIP_IS_E3(bp)) {
+			int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+			if (bp->port.supported[cfg_idx] &
+			    (SUPPORTED_10000baseT_Full |
+			     SUPPORTED_20000baseMLD2_Full |
+			     SUPPORTED_20000baseKR2_Full))
+				bp->link_params.loopback_mode = LOOPBACK_XMAC;
+			else
+				bp->link_params.loopback_mode = LOOPBACK_UMAC;
+		} else
+			bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		break;
+	case BNX2X_EXT_LOOPBACK:
+		if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Can't configure external loopback\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+		return -EINVAL;
+	}
+
+	/* prepare the loopback packet */
+	pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+		     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+	skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
+	if (!skb) {
+		DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
+		rc = -ENOMEM;
+		goto test_loopback_exit;
+	}
+	packet = skb_put(skb, pkt_size);
+	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+	eth_zero_addr(packet + ETH_ALEN);
+	memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		packet[i] = (unsigned char) (i & 0xff);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		rc = -ENOMEM;
+		dev_kfree_skb(skb);
+		DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
+		goto test_loopback_exit;
+	}
+
+	/* send the loopback packet */
+	num_pkts = 0;
+	tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
+	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+	netdev_tx_sent_queue(txq, skb->len);
+
+	pkt_prod = txdata->tx_pkt_prod++;
+	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+	tx_buf->first_bd = txdata->tx_bd_prod;
+	tx_buf->skb = skb;
+	tx_buf->flags = 0;
+
+	bd_prod = TX_BD(txdata->tx_bd_prod);
+	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+	tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+	SET_FLAG(tx_start_bd->general_data,
+		 ETH_TX_START_BD_HDR_NBDS,
+		 1);
+	SET_FLAG(tx_start_bd->general_data,
+		 ETH_TX_START_BD_PARSE_NBDS,
+		 0);
+
+	/* turn on parsing and get a BD */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	if (CHIP_IS_E1x(bp)) {
+		u16 global_data = 0;
+		struct eth_tx_parse_bd_e1x  *pbd_e1x =
+			&txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+		SET_FLAG(global_data,
+			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+		pbd_e1x->global_data = cpu_to_le16(global_data);
+	} else {
+		u32 parsing_data = 0;
+		struct eth_tx_parse_bd_e2  *pbd_e2 =
+			&txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+		SET_FLAG(parsing_data,
+			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+		pbd_e2->parsing_data = cpu_to_le32(parsing_data);
+	}
+	wmb();
+
+	txdata->tx_db.data.prod += 2;
+	barrier();
+	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+	mmiowb();
+	barrier();
+
+	num_pkts++;
+	txdata->tx_bd_prod += 2; /* start + pbd */
+
+	udelay(100);
+
+	tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
+	if (tx_idx != tx_start_idx + num_pkts)
+		goto test_loopback_exit;
+
+	/* Unlike HC IGU won't generate an interrupt for status block
+	 * updates that have been performed while interrupts were
+	 * disabled.
+	 */
+	if (bp->common.int_block == INT_BLOCK_IGU) {
+		/* Disable local BHes to prevent a dead-lock situation between
+		 * sch_direct_xmit() and bnx2x_run_loopback() (calling
+		 * bnx2x_tx_int()), as both are taking netif_tx_lock().
+		 */
+		local_bh_disable();
+		bnx2x_tx_int(bp, txdata);
+		local_bh_enable();
+	}
+
+	rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+	if (rx_idx != rx_start_idx + num_pkts)
+		goto test_loopback_exit;
+
+	cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+	if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+		goto test_loopback_rx_exit;
+
+	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
+	if (len != pkt_size)
+		goto test_loopback_rx_exit;
+
+	rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+	dma_sync_single_for_cpu(&bp->pdev->dev,
+				   dma_unmap_addr(rx_buf, mapping),
+				   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
+	data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		if (*(data + i) != (unsigned char) (i & 0xff))
+			goto test_loopback_rx_exit;
+
+	rc = 0;
+
+test_loopback_rx_exit:
+
+	fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+	fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+	fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+	fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+	/* Update producers */
+	bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+			     fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+	bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+	return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp)
+{
+	int rc = 0, res;
+
+	if (BP_NOMCP(bp))
+		return rc;
+
+	if (!netif_running(bp->dev))
+		return BNX2X_LOOPBACK_FAILED;
+
+	bnx2x_netif_stop(bp, 1);
+	bnx2x_acquire_phy_lock(bp);
+
+	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
+	if (res) {
+		DP(BNX2X_MSG_ETHTOOL, "  PHY loopback failed  (res %d)\n", res);
+		rc |= BNX2X_PHY_LOOPBACK_FAILED;
+	}
+
+	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
+	if (res) {
+		DP(BNX2X_MSG_ETHTOOL, "  MAC loopback failed  (res %d)\n", res);
+		rc |= BNX2X_MAC_LOOPBACK_FAILED;
+	}
+
+	bnx2x_release_phy_lock(bp);
+	bnx2x_netif_start(bp);
+
+	return rc;
+}
+
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+	int rc;
+	u8 is_serdes =
+		(bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+	if (BP_NOMCP(bp))
+		return -ENODEV;
+
+	if (!netif_running(bp->dev))
+		return BNX2X_EXT_LOOPBACK_FAILED;
+
+	bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
+	rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+	if (rc) {
+		DP(BNX2X_MSG_ETHTOOL,
+		   "Can't perform self-test, nic_load (for external lb) failed\n");
+		return -ENODEV;
+	}
+	bnx2x_wait_for_link(bp, 1, is_serdes);
+
+	bnx2x_netif_stop(bp, 1);
+
+	rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+	if (rc)
+		DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed  (res %d)\n", rc);
+
+	bnx2x_netif_start(bp);
+
+	return rc;
+}
+
+struct code_entry {
+	u32 sram_start_addr;
+	u32 code_attribute;
+#define CODE_IMAGE_TYPE_MASK			0xf0800003
+#define CODE_IMAGE_VNTAG_PROFILES_DATA		0xd0000003
+#define CODE_IMAGE_LENGTH_MASK			0x007ffffc
+#define CODE_IMAGE_TYPE_EXTENDED_DIR		0xe0000000
+	u32 nvm_start_addr;
+};
+
+#define CODE_ENTRY_MAX			16
+#define CODE_ENTRY_EXTENDED_DIR_IDX	15
+#define MAX_IMAGES_IN_EXTENDED_DIR	64
+#define NVRAM_DIR_OFFSET		0x14
+
+#define EXTENDED_DIR_EXISTS(code)					  \
+	((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
+	 (code & CODE_IMAGE_LENGTH_MASK) != 0)
+
+#define CRC32_RESIDUAL			0xdebb20e3
+#define CRC_BUFF_SIZE			256
+
+static int bnx2x_nvram_crc(struct bnx2x *bp,
+			   int offset,
+			   int size,
+			   u8 *buff)
+{
+	u32 crc = ~0;
+	int rc = 0, done = 0;
+
+	DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+	   "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
+
+	while (done < size) {
+		int count = min_t(int, size - done, CRC_BUFF_SIZE);
+
+		rc = bnx2x_nvram_read(bp, offset + done, buff, count);
+
+		if (rc)
+			return rc;
+
+		crc = crc32_le(crc, buff, count);
+		done += count;
+	}
+
+	if (crc != CRC32_RESIDUAL)
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static int bnx2x_test_nvram_dir(struct bnx2x *bp,
+				struct code_entry *entry,
+				u8 *buff)
+{
+	size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
+	u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
+	int rc;
+
+	/* Zero-length images and AFEX profiles do not have CRC */
+	if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
+		return 0;
+
+	rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
+	if (rc)
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "image %x has failed crc test (rc %d)\n", type, rc);
+
+	return rc;
+}
+
+static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
+{
+	int rc;
+	struct code_entry entry;
+
+	rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
+	if (rc)
+		return rc;
+
+	return bnx2x_test_nvram_dir(bp, &entry, buff);
+}
+
+static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
+{
+	u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
+	struct code_entry entry;
+	int i;
+
+	rc = bnx2x_nvram_read32(bp,
+				dir_offset +
+				sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
+				(u32 *)&entry, sizeof(entry));
+	if (rc)
+		return rc;
+
+	if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
+		return 0;
+
+	rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
+				&cnt, sizeof(u32));
+	if (rc)
+		return rc;
+
+	dir_offset = entry.nvm_start_addr + 8;
+
+	for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
+		rc = bnx2x_test_dir_entry(bp, dir_offset +
+					      sizeof(struct code_entry) * i,
+					  buff);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
+{
+	u32 rc, dir_offset = NVRAM_DIR_OFFSET;
+	int i;
+
+	DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
+
+	for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
+		rc = bnx2x_test_dir_entry(bp, dir_offset +
+					      sizeof(struct code_entry) * i,
+					  buff);
+		if (rc)
+			return rc;
+	}
+
+	return bnx2x_test_nvram_ext_dirs(bp, buff);
+}
+
+struct crc_pair {
+	int offset;
+	int size;
+};
+
+static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
+				const struct crc_pair *nvram_tbl, u8 *buf)
+{
+	int i;
+
+	for (i = 0; nvram_tbl[i].size; i++) {
+		int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
+					 nvram_tbl[i].size, buf);
+		if (rc) {
+			DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+			   "nvram_tbl[%d] has failed crc test (rc %d)\n",
+			   i, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+	const struct crc_pair nvram_tbl[] = {
+		{     0,  0x14 }, /* bootstrap */
+		{  0x14,  0xec }, /* dir */
+		{ 0x100, 0x350 }, /* manuf_info */
+		{ 0x450,  0xf0 }, /* feature_info */
+		{ 0x640,  0x64 }, /* upgrade_key_info */
+		{ 0x708,  0x70 }, /* manuf_key_info */
+		{     0,     0 }
+	};
+	const struct crc_pair nvram_tbl2[] = {
+		{ 0x7e8, 0x350 }, /* manuf_info2 */
+		{ 0xb38,  0xf0 }, /* feature_info */
+		{     0,     0 }
+	};
+
+	u8 *buf;
+	int rc;
+	u32 magic;
+
+	if (BP_NOMCP(bp))
+		return 0;
+
+	buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
+		rc = -ENOMEM;
+		goto test_nvram_exit;
+	}
+
+	rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
+	if (rc) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "magic value read (rc %d)\n", rc);
+		goto test_nvram_exit;
+	}
+
+	if (magic != 0x669955aa) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "wrong magic value (0x%08x)\n", magic);
+		rc = -ENODEV;
+		goto test_nvram_exit;
+	}
+
+	DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
+	rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
+	if (rc)
+		goto test_nvram_exit;
+
+	if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
+		u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+			   SHARED_HW_CFG_HIDE_PORT1;
+
+		if (!hide) {
+			DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+			   "Port 1 CRC test-set\n");
+			rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
+			if (rc)
+				goto test_nvram_exit;
+		}
+	}
+
+	rc = bnx2x_test_nvram_dirs(bp, buf);
+
+test_nvram_exit:
+	kfree(buf);
+	return rc;
+}
+
+/* Send an EMPTY ramrod on the first queue */
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+	struct bnx2x_queue_state_params params = {NULL};
+
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return -ENODEV;
+	}
+
+	params.q_obj = &bp->sp_objs->q_obj;
+	params.cmd = BNX2X_Q_CMD_EMPTY;
+
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	return bnx2x_queue_state_change(bp, &params);
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+			    struct ethtool_test *etest, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u8 is_serdes, link_up;
+	int rc, cnt = 0;
+
+	if (pci_num_vf(bp->pdev)) {
+		DP(BNX2X_MSG_IOV,
+		   "VFs are enabled, can not perform self test\n");
+		return;
+	}
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		netdev_err(bp->dev,
+			   "Handling parity error recovery. Try again later\n");
+		etest->flags |= ETH_TEST_FL_FAILED;
+		return;
+	}
+
+	DP(BNX2X_MSG_ETHTOOL,
+	   "Self-test command parameters: offline = %d, external_lb = %d\n",
+	   (etest->flags & ETH_TEST_FL_OFFLINE),
+	   (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
+
+	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+
+	if (bnx2x_test_nvram(bp) != 0) {
+		if (!IS_MF(bp))
+			buf[4] = 1;
+		else
+			buf[0] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (!netif_running(dev)) {
+		DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
+		return;
+	}
+
+	is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+	link_up = bp->link_vars.link_up;
+	/* offline tests are not supported in MF mode */
+	if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
+		int port = BP_PORT(bp);
+		u32 val;
+
+		/* save current value of input enable for TX port IF */
+		val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+		/* disable input for TX port IF */
+		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
+		rc = bnx2x_nic_load(bp, LOAD_DIAG);
+		if (rc) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Can't perform self-test, nic_load (for offline) failed\n");
+			return;
+		}
+
+		/* wait until link state is restored */
+		bnx2x_wait_for_link(bp, 1, is_serdes);
+
+		if (bnx2x_test_registers(bp) != 0) {
+			buf[0] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if (bnx2x_test_memory(bp) != 0) {
+			buf[1] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+
+		buf[2] = bnx2x_test_loopback(bp); /* internal LB */
+		if (buf[2] != 0)
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+			buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+			if (buf[3] != 0)
+				etest->flags |= ETH_TEST_FL_FAILED;
+			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+		}
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
+
+		/* restore input for TX port IF */
+		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+		if (rc) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Can't perform self-test, nic_load (for online) failed\n");
+			return;
+		}
+		/* wait until link state is restored */
+		bnx2x_wait_for_link(bp, link_up, is_serdes);
+	}
+
+	if (bnx2x_test_intr(bp) != 0) {
+		if (!IS_MF(bp))
+			buf[5] = 1;
+		else
+			buf[1] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (link_up) {
+		cnt = 100;
+		while (bnx2x_link_test(bp, is_serdes) && --cnt)
+			msleep(20);
+	}
+
+	if (!cnt) {
+		if (!IS_MF(bp))
+			buf[6] = 1;
+		else
+			buf[2] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+}
+
+#define IS_PORT_STAT(i) \
+	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
+#define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
+#define HIDE_PORT_STAT(bp) \
+		((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
+		 IS_VF(bp))
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+	return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, num_strings = 0;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		if (is_multi(bp)) {
+			num_strings = bnx2x_num_stat_queues(bp) *
+				      BNX2X_NUM_Q_STATS;
+		} else
+			num_strings = 0;
+		if (HIDE_PORT_STAT(bp)) {
+			for (i = 0; i < BNX2X_NUM_STATS; i++)
+				if (IS_FUNC_STAT(i))
+					num_strings++;
+		} else
+			num_strings += BNX2X_NUM_STATS;
+
+		return num_strings;
+
+	case ETH_SS_TEST:
+		return BNX2X_NUM_TESTS(bp);
+
+	case ETH_SS_PRIV_FLAGS:
+		return BNX2X_PRI_FLAG_LEN;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static u32 bnx2x_get_private_flags(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 flags = 0;
+
+	flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
+	flags |= (!(bp->flags & NO_FCOE_FLAG)  ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
+	flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
+
+	return flags;
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, j, k, start;
+	char queue_name[MAX_QUEUE_NAME_LEN+1];
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		k = 0;
+		if (is_multi(bp)) {
+			for_each_eth_queue(bp, i) {
+				memset(queue_name, 0, sizeof(queue_name));
+				sprintf(queue_name, "%d", i);
+				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+					snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+						ETH_GSTRING_LEN,
+						bnx2x_q_stats_arr[j].string,
+						queue_name);
+				k += BNX2X_NUM_Q_STATS;
+			}
+		}
+
+		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+			if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
+				continue;
+			strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+				   bnx2x_stats_arr[i].string);
+			j++;
+		}
+
+		break;
+
+	case ETH_SS_TEST:
+		/* First 4 tests cannot be done in MF mode */
+		if (!IS_MF(bp))
+			start = 0;
+		else
+			start = 4;
+		memcpy(buf, bnx2x_tests_str_arr + start,
+		       ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+		break;
+
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(buf, bnx2x_private_arr,
+		       ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+		break;
+	}
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 *hw_stats, *offset;
+	int i, j, k = 0;
+
+	if (is_multi(bp)) {
+		for_each_eth_queue(bp, i) {
+			hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
+			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+				if (bnx2x_q_stats_arr[j].size == 0) {
+					/* skip this counter */
+					buf[k + j] = 0;
+					continue;
+				}
+				offset = (hw_stats +
+					  bnx2x_q_stats_arr[j].offset);
+				if (bnx2x_q_stats_arr[j].size == 4) {
+					/* 4-byte counter */
+					buf[k + j] = (u64) *offset;
+					continue;
+				}
+				/* 8-byte counter */
+				buf[k + j] = HILO_U64(*offset, *(offset + 1));
+			}
+			k += BNX2X_NUM_Q_STATS;
+		}
+	}
+
+	hw_stats = (u32 *)&bp->eth_stats;
+	for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+		if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
+			continue;
+		if (bnx2x_stats_arr[i].size == 0) {
+			/* skip this counter */
+			buf[k + j] = 0;
+			j++;
+			continue;
+		}
+		offset = (hw_stats + bnx2x_stats_arr[i].offset);
+		if (bnx2x_stats_arr[i].size == 4) {
+			/* 4-byte counter */
+			buf[k + j] = (u64) *offset;
+			j++;
+			continue;
+		}
+		/* 8-byte counter */
+		buf[k + j] = HILO_U64(*offset, *(offset + 1));
+		j++;
+	}
+}
+
+static int bnx2x_set_phys_id(struct net_device *dev,
+			     enum ethtool_phys_id_state state)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!bnx2x_is_nvm_accessible(bp)) {
+		DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+		   "cannot access eeprom when the interface is down\n");
+		return -EAGAIN;
+	}
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_ON:
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_set_led(&bp->link_params, &bp->link_vars,
+			      LED_MODE_ON, SPEED_1000);
+		bnx2x_release_phy_lock(bp);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_set_led(&bp->link_params, &bp->link_vars,
+			      LED_MODE_FRONT_PANEL_OFF, 0);
+		bnx2x_release_phy_lock(bp);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_set_led(&bp->link_params, &bp->link_vars,
+			      LED_MODE_OPER,
+			      bp->link_vars.line_speed);
+		bnx2x_release_phy_lock(bp);
+	}
+
+	return 0;
+}
+
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+	switch (info->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		info->data = RXH_IP_SRC | RXH_IP_DST |
+			     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		break;
+	case UDP_V4_FLOW:
+		if (bp->rss_conf_obj.udp_rss_v4)
+			info->data = RXH_IP_SRC | RXH_IP_DST |
+				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		else
+			info->data = RXH_IP_SRC | RXH_IP_DST;
+		break;
+	case UDP_V6_FLOW:
+		if (bp->rss_conf_obj.udp_rss_v6)
+			info->data = RXH_IP_SRC | RXH_IP_DST |
+				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		else
+			info->data = RXH_IP_SRC | RXH_IP_DST;
+		break;
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		info->data = RXH_IP_SRC | RXH_IP_DST;
+		break;
+	default:
+		info->data = 0;
+		break;
+	}
+
+	return 0;
+}
+
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+			   u32 *rules __always_unused)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = BNX2X_NUM_ETH_QUEUES(bp);
+		return 0;
+	case ETHTOOL_GRXFH:
+		return bnx2x_get_rss_flags(bp, info);
+	default:
+		DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+		return -EOPNOTSUPP;
+	}
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+	int udp_rss_requested;
+
+	DP(BNX2X_MSG_ETHTOOL,
+	   "Set rss flags command parameters: flow type = %d, data = %llu\n",
+	   info->flow_type, info->data);
+
+	switch (info->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		/* For TCP only 4-tupple hash is supported */
+		if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+				  RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Command parameters not supported\n");
+			return -EINVAL;
+		}
+		return 0;
+
+	case UDP_V4_FLOW:
+	case UDP_V6_FLOW:
+		/* For UDP either 2-tupple hash or 4-tupple hash is supported */
+		if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+				   RXH_L4_B_0_1 | RXH_L4_B_2_3))
+			udp_rss_requested = 1;
+		else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+			udp_rss_requested = 0;
+		else
+			return -EINVAL;
+
+		if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+			return -EINVAL;
+		}
+
+		if ((info->flow_type == UDP_V4_FLOW) &&
+		    (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+			bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+			DP(BNX2X_MSG_ETHTOOL,
+			   "rss re-configured, UDP 4-tupple %s\n",
+			   udp_rss_requested ? "enabled" : "disabled");
+			return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+		} else if ((info->flow_type == UDP_V6_FLOW) &&
+			   (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+			bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+			DP(BNX2X_MSG_ETHTOOL,
+			   "rss re-configured, UDP 4-tupple %s\n",
+			   udp_rss_requested ? "enabled" : "disabled");
+			return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+		}
+		return 0;
+
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		/* For IP only 2-tupple hash is supported */
+		if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Command parameters not supported\n");
+			return -EINVAL;
+		}
+		return 0;
+
+	case SCTP_V4_FLOW:
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case SCTP_V6_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case IP_USER_FLOW:
+	case ETHER_FLOW:
+		/* RSS is not supported for these protocols */
+		if (info->data) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "Command parameters not supported\n");
+			return -EINVAL;
+		}
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_SRXFH:
+		return bnx2x_set_rss_flags(bp, info);
+	default:
+		DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+	return T_ETH_INDIRECTION_TABLE_SIZE;
+}
+
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+			  u8 *hfunc)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	size_t i;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+	if (!indir)
+		return 0;
+
+	/* Get the current configuration of the RSS indirection table */
+	bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+	/*
+	 * We can't use a memcpy() as an internal storage of an
+	 * indirection table is a u8 array while indir->ring_index
+	 * points to an array of u32.
+	 *
+	 * Indirection table contains the FW Client IDs, so we need to
+	 * align the returned table to the Client ID of the leading RSS
+	 * queue.
+	 */
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+		indir[i] = ind_table[i] - bp->fp->cl_id;
+
+	return 0;
+}
+
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	size_t i;
+
+	/* We require at least one supported parameter to be changed and no
+	 * change in any of the unsupported parameters
+	 */
+	if (key ||
+	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+		return -EOPNOTSUPP;
+
+	if (!indir)
+		return 0;
+
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		/*
+		 * The same as in bnx2x_get_rxfh: we can't use a memcpy()
+		 * as an internal storage of an indirection table is a u8 array
+		 * while indir->ring_index points to an array of u32.
+		 *
+		 * Indirection table contains the FW Client IDs, so we need to
+		 * align the received table to the Client ID of the leading RSS
+		 * queue
+		 */
+		bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
+	}
+
+	return bnx2x_config_rss_eth(bp, false);
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev:		net device
+ * @channels:		returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+			       struct ethtool_channels *channels)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+	channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp:			bnx2x private structure
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+	bnx2x_disable_msi(bp);
+	bp->num_ethernet_queues = num_rss;
+	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
+	bnx2x_set_int_mode(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev:		net device
+ * @channels:		includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+			      struct ethtool_channels *channels)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	DP(BNX2X_MSG_ETHTOOL,
+	   "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+	   channels->rx_count, channels->tx_count, channels->other_count,
+	   channels->combined_count);
+
+	if (pci_num_vf(bp->pdev)) {
+		DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
+		return -EPERM;
+	}
+
+	/* We don't support separate rx / tx channels.
+	 * We don't allow setting 'other' channels.
+	 */
+	if (channels->rx_count || channels->tx_count || channels->other_count
+	    || (channels->combined_count == 0) ||
+	    (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+		DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+		return -EINVAL;
+	}
+
+	/* Check if there was a change in the active parameters */
+	if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+		DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+		return 0;
+	}
+
+	/* Set the requested number of queues in bp context.
+	 * Note that the actual number of queues created during load may be
+	 * less than requested if memory is low.
+	 */
+	if (unlikely(!netif_running(dev))) {
+		bnx2x_change_num_queues(bp, channels->combined_count);
+		return 0;
+	}
+	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+	bnx2x_change_num_queues(bp, channels->combined_count);
+	return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+static int bnx2x_get_ts_info(struct net_device *dev,
+			     struct ethtool_ts_info *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & PTP_SUPPORTED) {
+		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+					SOF_TIMESTAMPING_RX_SOFTWARE |
+					SOF_TIMESTAMPING_SOFTWARE |
+					SOF_TIMESTAMPING_TX_HARDWARE |
+					SOF_TIMESTAMPING_RX_HARDWARE |
+					SOF_TIMESTAMPING_RAW_HARDWARE;
+
+		if (bp->ptp_clock)
+			info->phc_index = ptp_clock_index(bp->ptp_clock);
+		else
+			info->phc_index = -1;
+
+		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+		info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
+
+		return 0;
+	}
+
+	return ethtool_op_get_ts_info(dev, info);
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+	.get_settings		= bnx2x_get_settings,
+	.set_settings		= bnx2x_set_settings,
+	.get_drvinfo		= bnx2x_get_drvinfo,
+	.get_regs_len		= bnx2x_get_regs_len,
+	.get_regs		= bnx2x_get_regs,
+	.get_dump_flag		= bnx2x_get_dump_flag,
+	.get_dump_data		= bnx2x_get_dump_data,
+	.set_dump		= bnx2x_set_dump,
+	.get_wol		= bnx2x_get_wol,
+	.set_wol		= bnx2x_set_wol,
+	.get_msglevel		= bnx2x_get_msglevel,
+	.set_msglevel		= bnx2x_set_msglevel,
+	.nway_reset		= bnx2x_nway_reset,
+	.get_link		= bnx2x_get_link,
+	.get_eeprom_len		= bnx2x_get_eeprom_len,
+	.get_eeprom		= bnx2x_get_eeprom,
+	.set_eeprom		= bnx2x_set_eeprom,
+	.get_coalesce		= bnx2x_get_coalesce,
+	.set_coalesce		= bnx2x_set_coalesce,
+	.get_ringparam		= bnx2x_get_ringparam,
+	.set_ringparam		= bnx2x_set_ringparam,
+	.get_pauseparam		= bnx2x_get_pauseparam,
+	.set_pauseparam		= bnx2x_set_pauseparam,
+	.self_test		= bnx2x_self_test,
+	.get_sset_count		= bnx2x_get_sset_count,
+	.get_priv_flags		= bnx2x_get_private_flags,
+	.get_strings		= bnx2x_get_strings,
+	.set_phys_id		= bnx2x_set_phys_id,
+	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
+	.get_rxnfc		= bnx2x_get_rxnfc,
+	.set_rxnfc		= bnx2x_set_rxnfc,
+	.get_rxfh_indir_size	= bnx2x_get_rxfh_indir_size,
+	.get_rxfh		= bnx2x_get_rxfh,
+	.set_rxfh		= bnx2x_set_rxfh,
+	.get_channels		= bnx2x_get_channels,
+	.set_channels		= bnx2x_set_channels,
+	.get_module_info	= bnx2x_get_module_info,
+	.get_module_eeprom	= bnx2x_get_module_eeprom,
+	.get_eee		= bnx2x_get_eee,
+	.set_eee		= bnx2x_set_eee,
+	.get_ts_info		= bnx2x_get_ts_info,
+};
+
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+	.get_settings		= bnx2x_get_vf_settings,
+	.get_drvinfo		= bnx2x_get_drvinfo,
+	.get_msglevel		= bnx2x_get_msglevel,
+	.set_msglevel		= bnx2x_set_msglevel,
+	.get_link		= bnx2x_get_link,
+	.get_coalesce		= bnx2x_get_coalesce,
+	.get_ringparam		= bnx2x_get_ringparam,
+	.set_ringparam		= bnx2x_set_ringparam,
+	.get_sset_count		= bnx2x_get_sset_count,
+	.get_strings		= bnx2x_get_strings,
+	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
+	.get_rxnfc		= bnx2x_get_rxnfc,
+	.set_rxnfc		= bnx2x_set_rxnfc,
+	.get_rxfh_indir_size	= bnx2x_get_rxfh_indir_size,
+	.get_rxfh		= bnx2x_get_rxfh,
+	.set_rxfh		= bnx2x_set_rxfh,
+	.get_channels		= bnx2x_get_channels,
+	.set_channels		= bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
+{
+	netdev->ethtool_ops = (IS_PF(bp)) ?
+		&bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 0000000..226ab29
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,398 @@
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_FW_DEFS_H
+#define BNX2X_FW_DEFS_H
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[152].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[151].base + ((assertListEntry) * IRO[151].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+	(IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
+	IRO[157].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+	(IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
+	IRO[158].m2))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+	(IRO[163].base + ((funcId) * IRO[163].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[153].base + ((funcId) * IRO[153].m1))
+#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
+	(IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
+#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
+	(IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
+	* IRO[142].m2) + ((sbId) * IRO[142].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+	(IRO[323].base + ((pfId) * IRO[323].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[324].base + ((pfId) * IRO[324].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+	(IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+	(IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+	(IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+	(IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+	(IRO[322].base + ((pfId) * IRO[322].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[314].base + ((pfId) * IRO[314].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[313].base + ((pfId) * IRO[313].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[312].base + ((pfId) * IRO[312].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[155].base + ((funcId) * IRO[155].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+	(IRO[146].base + ((pfId) * IRO[146].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+	(IRO[147].base + ((pfId) * IRO[147].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+	(IRO[145].base + ((pfId) * IRO[145].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+	(IRO[148].base + ((pfId) * IRO[148].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+	(IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+	(IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+	(IRO[138].base + ((sbId) * IRO[138].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+	(IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+	(IRO[136].base + ((sbId) * IRO[136].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+	(IRO[141].base + ((sbId) * IRO[141].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+	(IRO[159].base + ((vfId) * IRO[159].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+	(IRO[160].base + ((vfId) * IRO[160].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[154].base + ((funcId) * IRO[154].m1))
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+	(IRO[207].base + ((pfId) * IRO[207].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+	(IRO[205].base + ((pfId) * IRO[205].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[107].base + ((funcId) * IRO[107].m1))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[278].base + ((pfId) * IRO[278].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
+	(IRO[279].base + ((pfId) * IRO[279].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+	(IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[277].base + ((pfId) * IRO[277].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[276].base + ((pfId) * IRO[276].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[275].base + ((pfId) * IRO[275].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+	(IRO[284].base + ((pfId) * IRO[284].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+	(IRO[206].base + ((pfId) * IRO[206].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[109].base + ((funcId) * IRO[109].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+	(IRO[223].base + ((pfId) * IRO[223].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[108].base + ((funcId) * IRO[108].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
+#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[181].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[180].base + ((assertListEntry) * IRO[180].m1))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+	(IRO[187].base + ((portId) * IRO[187].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+	(IRO[325].base + ((pfId) * IRO[325].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[182].base + ((funcId) * IRO[182].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+	(IRO[289].base + ((pfId) * IRO[289].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[294].base + ((pfId) * IRO[294].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+	(IRO[291].base + ((pfId) * IRO[291].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[287].base + ((pfId) * IRO[287].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[288].base + ((pfId) * IRO[288].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+	(IRO[292].base + ((pfId) * IRO[292].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[293].base + ((pfId) * IRO[293].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+	(IRO[186].base + ((pfId) * IRO[186].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[184].base + ((funcId) * IRO[184].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+	(IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
+	IRO[215].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+	(IRO[216].base + ((qzoneId) * IRO[216].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
+#define USTORM_TPA_BTR_SIZE (IRO[213].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[183].base + ((funcId) * IRO[183].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+	(IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+	(IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+	(IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+	(IRO[305].base + ((pfId) * IRO[305].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+	(IRO[306].base + ((pfId) * IRO[306].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+	(IRO[307].base + ((pfId) * IRO[307].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+	(IRO[308].base + ((pfId) * IRO[308].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+	(IRO[309].base + ((pfId) * IRO[309].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+	(IRO[310].base + ((pfId) * IRO[310].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+	(IRO[311].base + ((pfId) * IRO[311].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+	(IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+	(IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+	(IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+	(IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+	(IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[49].base + ((funcId) * IRO[49].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+	(IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+	(IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+	(IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+	(IRO[217].base + ((portId) * IRO[217].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+	(IRO[218].base + ((portId) * IRO[218].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+	(IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
+	IRO[220].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[48].base + ((funcId) * IRO[48].m1))
+#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+
+/* eth hsi version */
+#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
+
+/* Ethernet Ring parameters */
+#define X_ETH_LOCAL_RING_SIZE 13
+#define FIRST_BD_IN_PKT	0
+#define PARSE_BD_INDEX 1
+#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define U_ETH_NUM_OF_SGES_TO_FETCH 8
+#define U_ETH_MAX_SGES_FOR_PACKET 3
+
+/* Rx ring params */
+#define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_LOCAL_SGE_RING_SIZE 10
+#define U_ETH_SGL_SIZE 8
+	/* The fw will padd the buffer with this value, so the IP header \
+	will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
+
+#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
+	(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+
+#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
+#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
+
+#define U_ETH_BDS_PER_PAGE_MASK	(U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK	(TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
+
+#define U_ETH_UNDEFINED_Q 0xFF
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
+
+/*The CRC32 seed, that is used for the hash(reduction) multicast address */
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE	(8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
+
+/* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1 18
+#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H	56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
+/* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1 32
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
+
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
+
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
+
+
+/* This file defines HSI constants common to all microcode flows */
+
+#define PROTOCOL_STATE_BIT_OFFSET 6
+
+#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+/* microcode fixed page page size 4K (chains and ring segments) */
+#define MC_PAGE_SIZE 4096
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8
+#define HC_SB_MAX_INDICES_E2 8
+
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2	136
+
+#define HC_SP_SB_ID 0xde
+
+#define HC_SB_MAX_SM 2
+
+#define HC_SB_MAX_DYNAMIC_INDICES 4
+
+/* max number of slow path commands per port */
+#define MAX_RAMRODS_PER_PORT 8
+
+/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define TIMERS_TICK_SIZE_CHIP (1e-3)
+
+#define TSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define XSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
+
+/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define XSTORM_IP_ID_ROLL_HALF 0x8000
+#define XSTORM_IP_ID_ROLL_ALL 0
+
+#define FW_LOG_LIST_SIZE 50
+
+#define NUM_OF_SAFC_BITS 16
+#define MAX_COS_NUMBER 4
+#define MAX_TRAFFIC_TYPES 8
+#define MAX_PFC_PRIORITIES 8
+#define MAX_VLAN_PRIORITIES 8
+	/* used by array traffic_type_to_priority[] to mark traffic type \
+	that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+#define C_ERES_PER_PAGE \
+	(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+#define STATS_QUERY_CMD_COUNT 16
+
+#define AFEX_LIST_TABLE_SIZE 4096
+
+#define INVALID_VNIC_ID	0xFF
+
+#define UNDEF_IRO 0x80000000
+
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
+
+#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 0000000..9e3b5a1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,40 @@
+/* bnx2x_fw_file_hdr.h: FW binary file header structure.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ * Based on the original idea of John Wright <john.wright@hp.com>.
+ */
+
+#ifndef BNX2X_INIT_FILE_HDR_H
+#define BNX2X_INIT_FILE_HDR_H
+
+struct bnx2x_fw_file_section {
+	__be32 len;
+	__be32 offset;
+};
+
+struct bnx2x_fw_file_hdr {
+	struct bnx2x_fw_file_section init_ops;
+	struct bnx2x_fw_file_section init_ops_offsets;
+	struct bnx2x_fw_file_section init_data;
+	struct bnx2x_fw_file_section tsem_int_table_data;
+	struct bnx2x_fw_file_section tsem_pram_data;
+	struct bnx2x_fw_file_section usem_int_table_data;
+	struct bnx2x_fw_file_section usem_pram_data;
+	struct bnx2x_fw_file_section csem_int_table_data;
+	struct bnx2x_fw_file_section csem_pram_data;
+	struct bnx2x_fw_file_section xsem_int_table_data;
+	struct bnx2x_fw_file_section xsem_pram_data;
+	struct bnx2x_fw_file_section iro_arr;
+	struct bnx2x_fw_file_section fw_version;
+};
+
+#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
new file mode 100644
index 0000000..cafd5de
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,6033 @@
+/* bnx2x_hsi.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef BNX2X_HSI_H
+#define BNX2X_HSI_H
+
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
+
+#define FW_ENCODE_32BIT_PATTERN         0x1e1e1e1e
+
+struct license_key {
+	u32 reserved[6];
+
+	u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK	0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT	0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK	0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT	16
+
+	u32 reserved_a;
+
+	u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK	0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT	0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK	0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT	16
+
+	u32 reserved_b[4];
+};
+
+/****************************************************************************
+ * Shared HW configuration                                                  *
+ ****************************************************************************/
+#define PIN_CFG_NA                          0x00000000
+#define PIN_CFG_GPIO0_P0                    0x00000001
+#define PIN_CFG_GPIO1_P0                    0x00000002
+#define PIN_CFG_GPIO2_P0                    0x00000003
+#define PIN_CFG_GPIO3_P0                    0x00000004
+#define PIN_CFG_GPIO0_P1                    0x00000005
+#define PIN_CFG_GPIO1_P1                    0x00000006
+#define PIN_CFG_GPIO2_P1                    0x00000007
+#define PIN_CFG_GPIO3_P1                    0x00000008
+#define PIN_CFG_EPIO0                       0x00000009
+#define PIN_CFG_EPIO1                       0x0000000a
+#define PIN_CFG_EPIO2                       0x0000000b
+#define PIN_CFG_EPIO3                       0x0000000c
+#define PIN_CFG_EPIO4                       0x0000000d
+#define PIN_CFG_EPIO5                       0x0000000e
+#define PIN_CFG_EPIO6                       0x0000000f
+#define PIN_CFG_EPIO7                       0x00000010
+#define PIN_CFG_EPIO8                       0x00000011
+#define PIN_CFG_EPIO9                       0x00000012
+#define PIN_CFG_EPIO10                      0x00000013
+#define PIN_CFG_EPIO11                      0x00000014
+#define PIN_CFG_EPIO12                      0x00000015
+#define PIN_CFG_EPIO13                      0x00000016
+#define PIN_CFG_EPIO14                      0x00000017
+#define PIN_CFG_EPIO15                      0x00000018
+#define PIN_CFG_EPIO16                      0x00000019
+#define PIN_CFG_EPIO17                      0x0000001a
+#define PIN_CFG_EPIO18                      0x0000001b
+#define PIN_CFG_EPIO19                      0x0000001c
+#define PIN_CFG_EPIO20                      0x0000001d
+#define PIN_CFG_EPIO21                      0x0000001e
+#define PIN_CFG_EPIO22                      0x0000001f
+#define PIN_CFG_EPIO23                      0x00000020
+#define PIN_CFG_EPIO24                      0x00000021
+#define PIN_CFG_EPIO25                      0x00000022
+#define PIN_CFG_EPIO26                      0x00000023
+#define PIN_CFG_EPIO27                      0x00000024
+#define PIN_CFG_EPIO28                      0x00000025
+#define PIN_CFG_EPIO29                      0x00000026
+#define PIN_CFG_EPIO30                      0x00000027
+#define PIN_CFG_EPIO31                      0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA                         0x00000000
+#define EPIO_CFG_EPIO0                      0x00000001
+#define EPIO_CFG_EPIO1                      0x00000002
+#define EPIO_CFG_EPIO2                      0x00000003
+#define EPIO_CFG_EPIO3                      0x00000004
+#define EPIO_CFG_EPIO4                      0x00000005
+#define EPIO_CFG_EPIO5                      0x00000006
+#define EPIO_CFG_EPIO6                      0x00000007
+#define EPIO_CFG_EPIO7                      0x00000008
+#define EPIO_CFG_EPIO8                      0x00000009
+#define EPIO_CFG_EPIO9                      0x0000000a
+#define EPIO_CFG_EPIO10                     0x0000000b
+#define EPIO_CFG_EPIO11                     0x0000000c
+#define EPIO_CFG_EPIO12                     0x0000000d
+#define EPIO_CFG_EPIO13                     0x0000000e
+#define EPIO_CFG_EPIO14                     0x0000000f
+#define EPIO_CFG_EPIO15                     0x00000010
+#define EPIO_CFG_EPIO16                     0x00000011
+#define EPIO_CFG_EPIO17                     0x00000012
+#define EPIO_CFG_EPIO18                     0x00000013
+#define EPIO_CFG_EPIO19                     0x00000014
+#define EPIO_CFG_EPIO20                     0x00000015
+#define EPIO_CFG_EPIO21                     0x00000016
+#define EPIO_CFG_EPIO22                     0x00000017
+#define EPIO_CFG_EPIO23                     0x00000018
+#define EPIO_CFG_EPIO24                     0x00000019
+#define EPIO_CFG_EPIO25                     0x0000001a
+#define EPIO_CFG_EPIO26                     0x0000001b
+#define EPIO_CFG_EPIO27                     0x0000001c
+#define EPIO_CFG_EPIO28                     0x0000001d
+#define EPIO_CFG_EPIO29                     0x0000001e
+#define EPIO_CFG_EPIO30                     0x0000001f
+#define EPIO_CFG_EPIO31                     0x00000020
+
+struct mac_addr {
+	u32 upper;
+	u32 lower;
+};
+
+struct shared_hw_cfg {			 /* NVRAM Offset */
+	/* Up to 16 bytes of NULL-terminated string */
+	u8  part_num[16];		    /* 0x104 */
+
+	u32 config;			/* 0x114 */
+	#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK             0x00000001
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT             0
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V              0x00000000
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V              0x00000001
+	#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN        0x00000002
+
+	#define SHARED_HW_CFG_PORT_SWAP                     0x00000004
+
+	#define SHARED_HW_CFG_BEACON_WOL_EN                 0x00000008
+
+	#define SHARED_HW_CFG_PCIE_GEN3_DISABLED            0x00000000
+	#define SHARED_HW_CFG_PCIE_GEN3_ENABLED             0x00000010
+
+	#define SHARED_HW_CFG_MFW_SELECT_MASK               0x00000700
+		#define SHARED_HW_CFG_MFW_SELECT_SHIFT               8
+	/* Whatever MFW found in NVM
+	   (if multiple found, priority order is: NC-SI, UMP, IPMI) */
+		#define SHARED_HW_CFG_MFW_SELECT_DEFAULT             0x00000000
+		#define SHARED_HW_CFG_MFW_SELECT_NC_SI               0x00000100
+		#define SHARED_HW_CFG_MFW_SELECT_UMP                 0x00000200
+		#define SHARED_HW_CFG_MFW_SELECT_IPMI                0x00000300
+	/* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
+	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI    0x00000400
+	/* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
+	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI      0x00000500
+	/* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
+	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP     0x00000600
+
+	#define SHARED_HW_CFG_LED_MODE_MASK                 0x000f0000
+		#define SHARED_HW_CFG_LED_MODE_SHIFT                 16
+		#define SHARED_HW_CFG_LED_MAC1                       0x00000000
+		#define SHARED_HW_CFG_LED_PHY1                       0x00010000
+		#define SHARED_HW_CFG_LED_PHY2                       0x00020000
+		#define SHARED_HW_CFG_LED_PHY3                       0x00030000
+		#define SHARED_HW_CFG_LED_MAC2                       0x00040000
+		#define SHARED_HW_CFG_LED_PHY4                       0x00050000
+		#define SHARED_HW_CFG_LED_PHY5                       0x00060000
+		#define SHARED_HW_CFG_LED_PHY6                       0x00070000
+		#define SHARED_HW_CFG_LED_MAC3                       0x00080000
+		#define SHARED_HW_CFG_LED_PHY7                       0x00090000
+		#define SHARED_HW_CFG_LED_PHY9                       0x000a0000
+		#define SHARED_HW_CFG_LED_PHY11                      0x000b0000
+		#define SHARED_HW_CFG_LED_MAC4                       0x000c0000
+		#define SHARED_HW_CFG_LED_PHY8                       0x000d0000
+		#define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
+		#define SHARED_HW_CFG_LED_EXTPHY2                    0x000f0000
+
+
+	#define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
+		#define SHARED_HW_CFG_AN_ENABLE_SHIFT                24
+		#define SHARED_HW_CFG_AN_ENABLE_CL37                 0x01000000
+		#define SHARED_HW_CFG_AN_ENABLE_CL73                 0x02000000
+		#define SHARED_HW_CFG_AN_ENABLE_BAM                  0x04000000
+		#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION   0x08000000
+		#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT  0x10000000
+		#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY           0x20000000
+
+	#define SHARED_HW_CFG_SRIOV_MASK                    0x40000000
+		#define SHARED_HW_CFG_SRIOV_DISABLED                 0x00000000
+		#define SHARED_HW_CFG_SRIOV_ENABLED                  0x40000000
+
+	#define SHARED_HW_CFG_ATC_MASK                      0x80000000
+		#define SHARED_HW_CFG_ATC_DISABLED                   0x00000000
+		#define SHARED_HW_CFG_ATC_ENABLED                    0x80000000
+
+	u32 config2;			    /* 0x118 */
+	/* one time auto detect grace period (in sec) */
+	#define SHARED_HW_CFG_GRACE_PERIOD_MASK             0x000000ff
+	#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT                     0
+
+	#define SHARED_HW_CFG_PCIE_GEN2_ENABLED             0x00000100
+	#define SHARED_HW_CFG_PCIE_GEN2_DISABLED            0x00000000
+
+	/* The default value for the core clock is 250MHz and it is
+	   achieved by setting the clock change to 4 */
+	#define SHARED_HW_CFG_CLOCK_CHANGE_MASK             0x00000e00
+	#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT                     9
+
+	#define SHARED_HW_CFG_SMBUS_TIMING_MASK             0x00001000
+		#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ            0x00000000
+		#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ            0x00001000
+
+	#define SHARED_HW_CFG_HIDE_PORT1                    0x00002000
+
+	#define SHARED_HW_CFG_WOL_CAPABLE_MASK              0x00004000
+		#define SHARED_HW_CFG_WOL_CAPABLE_DISABLED           0x00000000
+		#define SHARED_HW_CFG_WOL_CAPABLE_ENABLED            0x00004000
+
+		/* Output low when PERST is asserted */
+	#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK       0x00008000
+		#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED    0x00000000
+		#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED     0x00008000
+
+	#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK    0x00070000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT    16
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW       0x00000000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB      0x00010000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB    0x00020000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB    0x00030000
+
+	/*  The fan failure mechanism is usually related to the PHY type
+	      since the power consumption of the board is determined by the PHY.
+	      Currently, fan is required for most designs with SFX7101, BCM8727
+	      and BCM8481. If a fan is not required for a board which uses one
+	      of those PHYs, this field should be set to "Disabled". If a fan is
+	      required for a different PHY type, this option should be set to
+	      "Enabled". The fan failure indication is expected on SPIO5 */
+	#define SHARED_HW_CFG_FAN_FAILURE_MASK              0x00180000
+		#define SHARED_HW_CFG_FAN_FAILURE_SHIFT              19
+		#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE           0x00000000
+		#define SHARED_HW_CFG_FAN_FAILURE_DISABLED           0x00080000
+		#define SHARED_HW_CFG_FAN_FAILURE_ENABLED            0x00100000
+
+		/* ASPM Power Management support */
+	#define SHARED_HW_CFG_ASPM_SUPPORT_MASK             0x00600000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT             21
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED    0x00000000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED      0x00200000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED       0x00400000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED   0x00600000
+
+	/* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+	   tl_control_0 (register 0x2800) */
+	#define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK         0x00800000
+		#define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED      0x00000000
+		#define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED       0x00800000
+
+	#define SHARED_HW_CFG_PORT_MODE_MASK                0x01000000
+		#define SHARED_HW_CFG_PORT_MODE_2                    0x00000000
+		#define SHARED_HW_CFG_PORT_MODE_4                    0x01000000
+
+	#define SHARED_HW_CFG_PATH_SWAP_MASK                0x02000000
+		#define SHARED_HW_CFG_PATH_SWAP_DISABLED             0x00000000
+		#define SHARED_HW_CFG_PATH_SWAP_ENABLED              0x02000000
+
+	/*  Set the MDC/MDIO access for the first external phy */
+	#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK         0x1C000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT         26
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE      0x00000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0         0x04000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1         0x08000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH          0x0c000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED       0x10000000
+
+	/*  Set the MDC/MDIO access for the second external phy */
+	#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK         0xE0000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT         29
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE      0x00000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0         0x20000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1         0x40000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH          0x60000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED       0x80000000
+
+	u32 config_3;				/* 0x11C */
+	#define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK         0x00000F00
+		#define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT              8
+		#define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5        0x00000000
+		#define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0        0x00000100
+
+	u32 ump_nc_si_config;			/* 0x120 */
+	#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK       0x00000003
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT       0
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC         0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY         0x00000001
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII         0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII        0x00000002
+
+	#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK       0x00000f00
+		#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT       8
+
+	#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK   0x00ff0000
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT   16
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE    0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+
+	u32 board;			/* 0x124 */
+	#define SHARED_HW_CFG_E3_I2C_MUX0_MASK              0x0000003F
+	#define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT                      0
+	#define SHARED_HW_CFG_E3_I2C_MUX1_MASK              0x00000FC0
+	#define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT                      6
+	/* Use the PIN_CFG_XXX defines on top */
+	#define SHARED_HW_CFG_BOARD_REV_MASK                0x00ff0000
+	#define SHARED_HW_CFG_BOARD_REV_SHIFT                        16
+
+	#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK          0x0f000000
+	#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT                  24
+
+	#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK          0xf0000000
+	#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT                  28
+
+	u32 wc_lane_config;				    /* 0x128 */
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_MASK            0x0000FFFF
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT            0
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_32103210         0x00001b1b
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_32100123         0x00001be4
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_01233210         0x0000e41b
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_01230123         0x0000e4e4
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK         0x000000FF
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT                 0
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK         0x0000FF00
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT                 8
+
+	/* TX lane Polarity swap */
+	#define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED     0x00010000
+	#define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED     0x00020000
+	#define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED     0x00040000
+	#define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED     0x00080000
+	/* TX lane Polarity swap */
+	#define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED     0x00100000
+	#define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED     0x00200000
+	#define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED     0x00400000
+	#define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED     0x00800000
+
+	/*  Selects the port layout of the board */
+	#define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK           0x0F000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT           24
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01           0x00000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10           0x01000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123         0x02000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032         0x03000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301         0x04000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210         0x05000000
+};
+
+
+/****************************************************************************
+ * Port HW configuration                                                    *
+ ****************************************************************************/
+struct port_hw_cfg {		    /* port 0: 0x12c  port 1: 0x2bc */
+
+	u32 pci_id;
+	#define PORT_HW_CFG_PCI_VENDOR_ID_MASK              0xffff0000
+	#define PORT_HW_CFG_PCI_DEVICE_ID_MASK              0x0000ffff
+
+	u32 pci_sub_id;
+	#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK       0xffff0000
+	#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK       0x0000ffff
+
+	u32 power_dissipated;
+	#define PORT_HW_CFG_POWER_DIS_D0_MASK               0x000000ff
+	#define PORT_HW_CFG_POWER_DIS_D0_SHIFT                       0
+	#define PORT_HW_CFG_POWER_DIS_D1_MASK               0x0000ff00
+	#define PORT_HW_CFG_POWER_DIS_D1_SHIFT                       8
+	#define PORT_HW_CFG_POWER_DIS_D2_MASK               0x00ff0000
+	#define PORT_HW_CFG_POWER_DIS_D2_SHIFT                       16
+	#define PORT_HW_CFG_POWER_DIS_D3_MASK               0xff000000
+	#define PORT_HW_CFG_POWER_DIS_D3_SHIFT                       24
+
+	u32 power_consumed;
+	#define PORT_HW_CFG_POWER_CONS_D0_MASK              0x000000ff
+	#define PORT_HW_CFG_POWER_CONS_D0_SHIFT                      0
+	#define PORT_HW_CFG_POWER_CONS_D1_MASK              0x0000ff00
+	#define PORT_HW_CFG_POWER_CONS_D1_SHIFT                      8
+	#define PORT_HW_CFG_POWER_CONS_D2_MASK              0x00ff0000
+	#define PORT_HW_CFG_POWER_CONS_D2_SHIFT                      16
+	#define PORT_HW_CFG_POWER_CONS_D3_MASK              0xff000000
+	#define PORT_HW_CFG_POWER_CONS_D3_SHIFT                      24
+
+	u32 mac_upper;
+	#define PORT_HW_CFG_UPPERMAC_MASK                   0x0000ffff
+	#define PORT_HW_CFG_UPPERMAC_SHIFT                           0
+	u32 mac_lower;
+
+	u32 iscsi_mac_upper;  /* Upper 16 bits are always zeroes */
+	u32 iscsi_mac_lower;
+
+	u32 rdma_mac_upper;   /* Upper 16 bits are always zeroes */
+	u32 rdma_mac_lower;
+
+	u32 serdes_config;
+	#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
+	#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT         0
+
+	#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK    0xffff0000
+	#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT            16
+
+
+	/*  Default values: 2P-64, 4P-32 */
+	u32 pf_config;					    /* 0x158 */
+	#define PORT_HW_CFG_PF_NUM_VF_MASK                  0x0000007F
+	#define PORT_HW_CFG_PF_NUM_VF_SHIFT                          0
+
+	/*  Default values: 17 */
+	#define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK        0x00007F00
+	#define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT                8
+
+	#define PORT_HW_CFG_ENABLE_FLR_MASK                 0x00010000
+	#define PORT_HW_CFG_FLR_ENABLED                     0x00010000
+
+	u32 vf_config;					    /* 0x15C */
+	#define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK        0x0000007F
+	#define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT                0
+
+	#define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK           0xFFFF0000
+	#define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT                   16
+
+	u32 mf_pci_id;					    /* 0x160 */
+	#define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK           0x0000FFFF
+	#define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT                   0
+
+	/*  Controls the TX laser of the SFP+ module */
+	u32 sfp_ctrl;					    /* 0x164 */
+	#define PORT_HW_CFG_TX_LASER_MASK                   0x000000FF
+		#define PORT_HW_CFG_TX_LASER_SHIFT                   0
+		#define PORT_HW_CFG_TX_LASER_MDIO                    0x00000000
+		#define PORT_HW_CFG_TX_LASER_GPIO0                   0x00000001
+		#define PORT_HW_CFG_TX_LASER_GPIO1                   0x00000002
+		#define PORT_HW_CFG_TX_LASER_GPIO2                   0x00000003
+		#define PORT_HW_CFG_TX_LASER_GPIO3                   0x00000004
+
+	/*  Controls the fault module LED of the SFP+ */
+	#define PORT_HW_CFG_FAULT_MODULE_LED_MASK           0x0000FF00
+		#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT           8
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0           0x00000000
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1           0x00000100
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2           0x00000200
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3           0x00000300
+		#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED        0x00000400
+
+	/*  The output pin TX_DIS that controls the TX laser of the SFP+
+	  module. Use the PIN_CFG_XXX defines on top */
+	u32 e3_sfp_ctrl;				    /* 0x168 */
+	#define PORT_HW_CFG_E3_TX_LASER_MASK                0x000000FF
+	#define PORT_HW_CFG_E3_TX_LASER_SHIFT                        0
+
+	/*  The output pin for SFPP_TYPE which turns on the Fault module LED */
+	#define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK           0x0000FF00
+	#define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT                   8
+
+	/*  The input pin MOD_ABS that indicates whether SFP+ module is
+	  present or not. Use the PIN_CFG_XXX defines on top */
+	#define PORT_HW_CFG_E3_MOD_ABS_MASK                 0x00FF0000
+	#define PORT_HW_CFG_E3_MOD_ABS_SHIFT                         16
+
+	/*  The output pin PWRDIS_SFP_X which disable the power of the SFP+
+	  module. Use the PIN_CFG_XXX defines on top */
+	#define PORT_HW_CFG_E3_PWR_DIS_MASK                 0xFF000000
+	#define PORT_HW_CFG_E3_PWR_DIS_SHIFT                         24
+
+	/*
+	 * The input pin which signals module transmit fault. Use the
+	 * PIN_CFG_XXX defines on top
+	 */
+	u32 e3_cmn_pin_cfg;				    /* 0x16C */
+	#define PORT_HW_CFG_E3_TX_FAULT_MASK                0x000000FF
+	#define PORT_HW_CFG_E3_TX_FAULT_SHIFT                        0
+
+	/*  The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+	 top */
+	#define PORT_HW_CFG_E3_PHY_RESET_MASK               0x0000FF00
+	#define PORT_HW_CFG_E3_PHY_RESET_SHIFT                       8
+
+	/*
+	 * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+	 * defines on top
+	 */
+	#define PORT_HW_CFG_E3_PWR_DOWN_MASK                0x00FF0000
+	#define PORT_HW_CFG_E3_PWR_DOWN_SHIFT                        16
+
+	/*  The output pin values BSC_SEL which selects the I2C for this port
+	  in the I2C Mux */
+	#define PORT_HW_CFG_E3_I2C_MUX0_MASK                0x01000000
+	#define PORT_HW_CFG_E3_I2C_MUX1_MASK                0x02000000
+
+
+	/*
+	 * The input pin I_FAULT which indicate over-current has occurred.
+	 * Use the PIN_CFG_XXX defines on top
+	 */
+	u32 e3_cmn_pin_cfg1;				    /* 0x170 */
+	#define PORT_HW_CFG_E3_OVER_CURRENT_MASK            0x000000FF
+	#define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT                    0
+
+	/*  pause on host ring */
+	u32 generic_features;                               /* 0x174 */
+	#define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK                   0x00000001
+	#define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT                  0
+	#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED               0x00000000
+	#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED                0x00000001
+
+	/* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+	 * LOM recommended and tested value is 0xBEB2. Using a different
+	 * value means using a value not tested by BRCM
+	 */
+	u32 sfi_tap_values;                                 /* 0x178 */
+	#define PORT_HW_CFG_TX_EQUALIZATION_MASK                      0x0000FFFF
+	#define PORT_HW_CFG_TX_EQUALIZATION_SHIFT                     0
+
+	/* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+	 * value is 0x2. LOM recommended and tested value is 0x2. Using a
+	 * different value means using a value not tested by BRCM
+	 */
+	#define PORT_HW_CFG_TX_DRV_BROADCAST_MASK                     0x000F0000
+	#define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT                    16
+	/*  Set non-default values for TXFIR in SFP mode. */
+	#define PORT_HW_CFG_TX_DRV_IFIR_MASK                          0x00F00000
+	#define PORT_HW_CFG_TX_DRV_IFIR_SHIFT                         20
+
+	/*  Set non-default values for IPREDRIVER in SFP mode. */
+	#define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK                    0x0F000000
+	#define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT                   24
+
+	/*  Set non-default values for POST2 in SFP mode. */
+	#define PORT_HW_CFG_TX_DRV_POST2_MASK                         0xF0000000
+	#define PORT_HW_CFG_TX_DRV_POST2_SHIFT                        28
+
+	u32 reserved0[5];				    /* 0x17c */
+
+	u32 aeu_int_mask;				    /* 0x190 */
+
+	u32 media_type;					    /* 0x194 */
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK            0x000000FF
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT                    0
+
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK            0x0000FF00
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT                    8
+
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK            0x00FF0000
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT                    16
+
+	/*  4 times 16 bits for all 4 lanes. In case external PHY is present
+	      (not direct mode), those values will not take effect on the 4 XGXS
+	      lanes. For some external PHYs (such as 8706 and 8726) the values
+	      will be used to configure the external PHY  in those cases, not
+	      all 4 values are needed. */
+	u16 xgxs_config_rx[4];			/* 0x198 */
+	u16 xgxs_config_tx[4];			/* 0x1A0 */
+
+	/* For storing FCOE mac on shared memory */
+	u32 fcoe_fip_mac_upper;
+	#define PORT_HW_CFG_FCOE_UPPERMAC_MASK              0x0000ffff
+	#define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT                      0
+	u32 fcoe_fip_mac_lower;
+
+	u32 fcoe_wwn_port_name_upper;
+	u32 fcoe_wwn_port_name_lower;
+
+	u32 fcoe_wwn_node_name_upper;
+	u32 fcoe_wwn_node_name_lower;
+
+	u32 Reserved1[49];				    /* 0x1C0 */
+
+	/*  Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+	      84833 only */
+	u32 xgbt_phy_cfg;				    /* 0x284 */
+	#define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK             0x000000FF
+	#define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT                     0
+
+		u32 default_cfg;			    /* 0x288 */
+	#define PORT_HW_CFG_GPIO0_CONFIG_MASK               0x00000003
+		#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT               0
+		#define PORT_HW_CFG_GPIO0_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO0_CONFIG_LOW                 0x00000001
+		#define PORT_HW_CFG_GPIO0_CONFIG_HIGH                0x00000002
+		#define PORT_HW_CFG_GPIO0_CONFIG_INPUT               0x00000003
+
+	#define PORT_HW_CFG_GPIO1_CONFIG_MASK               0x0000000C
+		#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT               2
+		#define PORT_HW_CFG_GPIO1_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO1_CONFIG_LOW                 0x00000004
+		#define PORT_HW_CFG_GPIO1_CONFIG_HIGH                0x00000008
+		#define PORT_HW_CFG_GPIO1_CONFIG_INPUT               0x0000000c
+
+	#define PORT_HW_CFG_GPIO2_CONFIG_MASK               0x00000030
+		#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT               4
+		#define PORT_HW_CFG_GPIO2_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO2_CONFIG_LOW                 0x00000010
+		#define PORT_HW_CFG_GPIO2_CONFIG_HIGH                0x00000020
+		#define PORT_HW_CFG_GPIO2_CONFIG_INPUT               0x00000030
+
+	#define PORT_HW_CFG_GPIO3_CONFIG_MASK               0x000000C0
+		#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT               6
+		#define PORT_HW_CFG_GPIO3_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO3_CONFIG_LOW                 0x00000040
+		#define PORT_HW_CFG_GPIO3_CONFIG_HIGH                0x00000080
+		#define PORT_HW_CFG_GPIO3_CONFIG_INPUT               0x000000c0
+
+	/*  When KR link is required to be set to force which is not
+	      KR-compliant, this parameter determine what is the trigger for it.
+	      When GPIO is selected, low input will force the speed. Currently
+	      default speed is 1G. In the future, it may be widen to select the
+	      forced speed in with another parameter. Note when force-1G is
+	      enabled, it override option 56: Link Speed option. */
+	#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK           0x00000F00
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT           8
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED      0x00000000
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0        0x00000100
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0        0x00000200
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0        0x00000300
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0        0x00000400
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1        0x00000500
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1        0x00000600
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1        0x00000700
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1        0x00000800
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED          0x00000900
+	/*  Enable to determine with which GPIO to reset the external phy */
+	#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK           0x000F0000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT           16
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE        0x00000000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0        0x00010000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0        0x00020000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0        0x00030000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0        0x00040000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1        0x00050000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1        0x00060000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1        0x00070000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1        0x00080000
+
+	/*  Enable BAM on KR */
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK           0x00100000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT                   20
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED                0x00000000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED                 0x00100000
+
+	/*  Enable Common Mode Sense */
+	#define PORT_HW_CFG_ENABLE_CMS_MASK                 0x00200000
+	#define PORT_HW_CFG_ENABLE_CMS_SHIFT                         21
+	#define PORT_HW_CFG_ENABLE_CMS_DISABLED                      0x00000000
+	#define PORT_HW_CFG_ENABLE_CMS_ENABLED                       0x00200000
+
+	/*  Determine the Serdes electrical interface   */
+	#define PORT_HW_CFG_NET_SERDES_IF_MASK              0x0F000000
+	#define PORT_HW_CFG_NET_SERDES_IF_SHIFT                      24
+	#define PORT_HW_CFG_NET_SERDES_IF_SGMII                      0x00000000
+	#define PORT_HW_CFG_NET_SERDES_IF_XFI                        0x01000000
+	#define PORT_HW_CFG_NET_SERDES_IF_SFI                        0x02000000
+	#define PORT_HW_CFG_NET_SERDES_IF_KR                         0x03000000
+	#define PORT_HW_CFG_NET_SERDES_IF_DXGXS                      0x04000000
+	#define PORT_HW_CFG_NET_SERDES_IF_KR2                        0x05000000
+
+
+	u32 speed_capability_mask2;			    /* 0x28C */
+	#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK       0x0000FFFF
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT       0
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL    0x00000001
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__           0x00000002
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___          0x00000004
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL   0x00000008
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G          0x00000010
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G    0x00000020
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G         0x00000040
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G         0x00000080
+
+	#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK       0xFFFF0000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT       16
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL    0x00010000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__           0x00020000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___          0x00040000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL   0x00080000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G          0x00100000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G    0x00200000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G         0x00400000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G         0x00800000
+
+
+	/*  In the case where two media types (e.g. copper and fiber) are
+	      present and electrically active at the same time, PHY Selection
+	      will determine which of the two PHYs will be designated as the
+	      Active PHY and used for a connection to the network.  */
+	u32 multi_phy_config;				    /* 0x290 */
+	#define PORT_HW_CFG_PHY_SELECTION_MASK              0x00000007
+		#define PORT_HW_CFG_PHY_SELECTION_SHIFT              0
+		#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT   0x00000000
+		#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY          0x00000001
+		#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY         0x00000002
+		#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+		#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+	/*  When enabled, all second phy nvram parameters will be swapped
+	      with the first phy parameters */
+	#define PORT_HW_CFG_PHY_SWAPPED_MASK                0x00000008
+		#define PORT_HW_CFG_PHY_SWAPPED_SHIFT                3
+		#define PORT_HW_CFG_PHY_SWAPPED_DISABLED             0x00000000
+		#define PORT_HW_CFG_PHY_SWAPPED_ENABLED              0x00000008
+
+
+	/*  Address of the second external phy */
+	u32 external_phy_config2;			    /* 0x294 */
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK         0x000000FF
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT                 0
+
+	/*  The second XGXS external PHY type */
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK         0x0000FF00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT         8
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT        0x00000000
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071       0x00000100
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072       0x00000200
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073       0x00000300
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705       0x00000400
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706       0x00000500
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726       0x00000600
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481       0x00000700
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101       0x00000800
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727       0x00000900
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC   0x00000a00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823      0x00000b00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640      0x00000c00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833      0x00000d00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE    0x00000e00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858      0x00001200
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
+
+
+	/*  4 times 16 bits for all 4 lanes. For some external PHYs (such as
+	      8706, 8726 and 8727) not all 4 values are needed. */
+	u16 xgxs_config2_rx[4];				    /* 0x296 */
+	u16 xgxs_config2_tx[4];				    /* 0x2A0 */
+
+	u32 lane_config;
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASK              0x0000ffff
+		#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT              0
+		/* AN and forced */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_01230123           0x00001b1b
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_01233210           0x00001be4
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_31203120           0x0000d8d8
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_32103210           0x0000e4e4
+	#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK           0x000000ff
+	#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT                   0
+	#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK           0x0000ff00
+	#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT                   8
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK       0x0000c000
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT               14
+
+	/*  Indicate whether to swap the external phy polarity */
+	#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK          0x00010000
+		#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED       0x00000000
+		#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED        0x00010000
+
+
+	u32 external_phy_config;
+	#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK          0x000000ff
+	#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT                  0
+
+	#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK          0x0000ff00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT          8
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT         0x00000000
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071        0x00000100
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072        0x00000200
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073        0x00000300
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705        0x00000400
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706        0x00000500
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726        0x00000600
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481        0x00000700
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101        0x00000800
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727        0x00000900
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC    0x00000a00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823       0x00000b00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640       0x00000c00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833       0x00000d00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE     0x00000e00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858       0x00001200
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
+
+	#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK        0x00ff0000
+	#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT                16
+
+	#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK        0xff000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT        24
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT       0x00000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482      0x01000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD    0x02000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN     0xff000000
+
+	u32 speed_capability_mask;
+	#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK        0x0000ffff
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT        0
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL     0x00000001
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF     0x00000002
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF    0x00000004
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL    0x00000008
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G           0x00000010
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G         0x00000020
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G          0x00000040
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G          0x00000080
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED     0x0000f000
+
+	#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK        0xffff0000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT        16
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL     0x00010000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF     0x00020000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF    0x00040000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL    0x00080000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G           0x00100000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G         0x00200000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G          0x00400000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G          0x00800000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED     0xf0000000
+
+	/*  A place to hold the original MAC address as a backup */
+	u32 backup_mac_upper;			/* 0x2B4 */
+	u32 backup_mac_lower;			/* 0x2B8 */
+
+};
+
+
+/****************************************************************************
+ * Shared Feature configuration                                             *
+ ****************************************************************************/
+struct shared_feat_cfg {		 /* NVRAM Offset */
+
+	u32 config;			/* 0x450 */
+	#define SHARED_FEATURE_BMC_ECHO_MODE_EN             0x00000001
+
+	/* Use NVRAM values instead of HW default values */
+	#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+							    0x00000002
+		#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+								     0x00000000
+		#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+								     0x00000002
+
+	#define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK         0x00000008
+		#define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO          0x00000000
+		#define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM         0x00000008
+
+	#define SHARED_FEAT_CFG_NCSI_ID_MASK                0x00000030
+	#define SHARED_FEAT_CFG_NCSI_ID_SHIFT                        4
+
+	/*  Override the OTP back to single function mode. When using GPIO,
+	      high means only SF, 0 is according to CLP configuration */
+	#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK          0x00000700
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT          8
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED     0x00000000
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF      0x00000100
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE        0x00000500
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE       0x00000600
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE  0x00000700
+
+	/* The interval in seconds between sending LLDP packets. Set to zero
+	   to disable the feature */
+	#define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK     0x00ff0000
+	#define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT             16
+
+	/* The assigned device type ID for LLDP usage */
+	#define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK    0xff000000
+	#define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT            24
+
+};
+
+
+/****************************************************************************
+ * Port Feature configuration                                               *
+ ****************************************************************************/
+struct port_feat_cfg {		    /* port 0: 0x454  port 1: 0x4c8 */
+
+	u32 config;
+	#define PORT_FEATURE_BAR1_SIZE_MASK                 0x0000000f
+		#define PORT_FEATURE_BAR1_SIZE_SHIFT                 0
+		#define PORT_FEATURE_BAR1_SIZE_DISABLED              0x00000000
+		#define PORT_FEATURE_BAR1_SIZE_64K                   0x00000001
+		#define PORT_FEATURE_BAR1_SIZE_128K                  0x00000002
+		#define PORT_FEATURE_BAR1_SIZE_256K                  0x00000003
+		#define PORT_FEATURE_BAR1_SIZE_512K                  0x00000004
+		#define PORT_FEATURE_BAR1_SIZE_1M                    0x00000005
+		#define PORT_FEATURE_BAR1_SIZE_2M                    0x00000006
+		#define PORT_FEATURE_BAR1_SIZE_4M                    0x00000007
+		#define PORT_FEATURE_BAR1_SIZE_8M                    0x00000008
+		#define PORT_FEATURE_BAR1_SIZE_16M                   0x00000009
+		#define PORT_FEATURE_BAR1_SIZE_32M                   0x0000000a
+		#define PORT_FEATURE_BAR1_SIZE_64M                   0x0000000b
+		#define PORT_FEATURE_BAR1_SIZE_128M                  0x0000000c
+		#define PORT_FEATURE_BAR1_SIZE_256M                  0x0000000d
+		#define PORT_FEATURE_BAR1_SIZE_512M                  0x0000000e
+		#define PORT_FEATURE_BAR1_SIZE_1G                    0x0000000f
+	#define PORT_FEATURE_BAR2_SIZE_MASK                 0x000000f0
+		#define PORT_FEATURE_BAR2_SIZE_SHIFT                 4
+		#define PORT_FEATURE_BAR2_SIZE_DISABLED              0x00000000
+		#define PORT_FEATURE_BAR2_SIZE_64K                   0x00000010
+		#define PORT_FEATURE_BAR2_SIZE_128K                  0x00000020
+		#define PORT_FEATURE_BAR2_SIZE_256K                  0x00000030
+		#define PORT_FEATURE_BAR2_SIZE_512K                  0x00000040
+		#define PORT_FEATURE_BAR2_SIZE_1M                    0x00000050
+		#define PORT_FEATURE_BAR2_SIZE_2M                    0x00000060
+		#define PORT_FEATURE_BAR2_SIZE_4M                    0x00000070
+		#define PORT_FEATURE_BAR2_SIZE_8M                    0x00000080
+		#define PORT_FEATURE_BAR2_SIZE_16M                   0x00000090
+		#define PORT_FEATURE_BAR2_SIZE_32M                   0x000000a0
+		#define PORT_FEATURE_BAR2_SIZE_64M                   0x000000b0
+		#define PORT_FEATURE_BAR2_SIZE_128M                  0x000000c0
+		#define PORT_FEATURE_BAR2_SIZE_256M                  0x000000d0
+		#define PORT_FEATURE_BAR2_SIZE_512M                  0x000000e0
+		#define PORT_FEATURE_BAR2_SIZE_1G                    0x000000f0
+
+	#define PORT_FEAT_CFG_DCBX_MASK                     0x00000100
+		#define PORT_FEAT_CFG_DCBX_DISABLED                  0x00000000
+		#define PORT_FEAT_CFG_DCBX_ENABLED                   0x00000100
+
+		#define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK        0x00000C00
+		#define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE        0x00000400
+		#define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI       0x00000800
+
+	#define PORT_FEATURE_EN_SIZE_MASK                   0x0f000000
+	#define PORT_FEATURE_EN_SIZE_SHIFT                           24
+	#define PORT_FEATURE_WOL_ENABLED                             0x01000000
+	#define PORT_FEATURE_MBA_ENABLED                             0x02000000
+	#define PORT_FEATURE_MFW_ENABLED                             0x04000000
+
+	/* Advertise expansion ROM even if MBA is disabled */
+	#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK        0x08000000
+		#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED     0x00000000
+		#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED      0x08000000
+
+	/* Check the optic vendor via i2c against a list of approved modules
+	   in a separate nvram image */
+	#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK         0xe0000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT         29
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+								     0x00000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+								     0x20000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG   0x40000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN    0x60000000
+
+	u32 wol_config;
+	/* Default is used when driver sets to "auto" mode */
+	#define PORT_FEATURE_WOL_DEFAULT_MASK               0x00000003
+		#define PORT_FEATURE_WOL_DEFAULT_SHIFT               0
+		#define PORT_FEATURE_WOL_DEFAULT_DISABLE             0x00000000
+		#define PORT_FEATURE_WOL_DEFAULT_MAGIC               0x00000001
+		#define PORT_FEATURE_WOL_DEFAULT_ACPI                0x00000002
+		#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI      0x00000003
+	#define PORT_FEATURE_WOL_RES_PAUSE_CAP              0x00000004
+	#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP         0x00000008
+	#define PORT_FEATURE_WOL_ACPI_UPON_MGMT             0x00000010
+
+	u32 mba_config;
+	#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK       0x00000007
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT       0
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE         0x00000000
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL         0x00000001
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP       0x00000002
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB      0x00000003
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT   0x00000004
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE        0x00000007
+
+	#define PORT_FEATURE_MBA_BOOT_RETRY_MASK            0x00000038
+	#define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT                    3
+
+	#define PORT_FEATURE_MBA_RES_PAUSE_CAP              0x00000100
+	#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP         0x00000200
+	#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE        0x00000400
+	#define PORT_FEATURE_MBA_HOTKEY_MASK                0x00000800
+		#define PORT_FEATURE_MBA_HOTKEY_CTRL_S               0x00000000
+		#define PORT_FEATURE_MBA_HOTKEY_CTRL_B               0x00000800
+	#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK          0x000ff000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT          12
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED       0x00000000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K             0x00001000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K             0x00002000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K             0x00003000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K            0x00004000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K            0x00005000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K            0x00006000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K           0x00007000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K           0x00008000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K           0x00009000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M             0x0000a000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M             0x0000b000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M             0x0000c000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M             0x0000d000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M            0x0000e000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M            0x0000f000
+	#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK           0x00f00000
+	#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT                   20
+	#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK        0x03000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT        24
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO         0x00000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS          0x01000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H       0x02000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H       0x03000000
+	#define PORT_FEATURE_MBA_LINK_SPEED_MASK            0x3c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT            26
+		#define PORT_FEATURE_MBA_LINK_SPEED_AUTO             0x00000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10HD             0x04000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10FD             0x08000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_100HD            0x0c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_100FD            0x10000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS            0x14000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS          0x18000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4       0x1c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_20GBPS           0x20000000
+	u32 bmc_config;
+	#define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK         0x00000001
+		#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT       0x00000000
+		#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN            0x00000001
+
+	u32 mba_vlan_cfg;
+	#define PORT_FEATURE_MBA_VLAN_TAG_MASK              0x0000ffff
+	#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT                      0
+	#define PORT_FEATURE_MBA_VLAN_EN                    0x00010000
+
+	u32 resource_cfg;
+	#define PORT_FEATURE_RESOURCE_CFG_VALID             0x00000001
+	#define PORT_FEATURE_RESOURCE_CFG_DIAG              0x00000002
+	#define PORT_FEATURE_RESOURCE_CFG_L2                0x00000004
+	#define PORT_FEATURE_RESOURCE_CFG_ISCSI             0x00000008
+	#define PORT_FEATURE_RESOURCE_CFG_RDMA              0x00000010
+
+	u32 smbus_config;
+	#define PORT_FEATURE_SMBUS_ADDR_MASK                0x000000fe
+	#define PORT_FEATURE_SMBUS_ADDR_SHIFT                        1
+
+	u32 vf_config;
+	#define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK             0x0000000f
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT             0
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED          0x00000000
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_4K                0x00000001
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_8K                0x00000002
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_16K               0x00000003
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_32K               0x00000004
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_64K               0x00000005
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_128K              0x00000006
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_256K              0x00000007
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_512K              0x00000008
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_1M                0x00000009
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_2M                0x0000000a
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_4M                0x0000000b
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_8M                0x0000000c
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_16M               0x0000000d
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_32M               0x0000000e
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_64M               0x0000000f
+
+	u32 link_config;    /* Used as HW defaults for the driver */
+	#define PORT_FEATURE_CONNECTED_SWITCH_MASK          0x03000000
+		#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT          24
+		/* (forced) low speed switch (< 10G) */
+		#define PORT_FEATURE_CON_SWITCH_1G_SWITCH            0x00000000
+		/* (forced) high speed switch (>= 10G) */
+		#define PORT_FEATURE_CON_SWITCH_10G_SWITCH           0x01000000
+		#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT          0x02000000
+		#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT      0x03000000
+
+	#define PORT_FEATURE_LINK_SPEED_MASK                0x000f0000
+		#define PORT_FEATURE_LINK_SPEED_SHIFT                16
+		#define PORT_FEATURE_LINK_SPEED_AUTO                 0x00000000
+		#define PORT_FEATURE_LINK_SPEED_10M_FULL             0x00010000
+		#define PORT_FEATURE_LINK_SPEED_10M_HALF             0x00020000
+		#define PORT_FEATURE_LINK_SPEED_100M_HALF            0x00030000
+		#define PORT_FEATURE_LINK_SPEED_100M_FULL            0x00040000
+		#define PORT_FEATURE_LINK_SPEED_1G                   0x00050000
+		#define PORT_FEATURE_LINK_SPEED_2_5G                 0x00060000
+		#define PORT_FEATURE_LINK_SPEED_10G_CX4              0x00070000
+		#define PORT_FEATURE_LINK_SPEED_20G                  0x00080000
+
+	#define PORT_FEATURE_FLOW_CONTROL_MASK              0x00000700
+		#define PORT_FEATURE_FLOW_CONTROL_SHIFT              8
+		#define PORT_FEATURE_FLOW_CONTROL_AUTO               0x00000000
+		#define PORT_FEATURE_FLOW_CONTROL_TX                 0x00000100
+		#define PORT_FEATURE_FLOW_CONTROL_RX                 0x00000200
+		#define PORT_FEATURE_FLOW_CONTROL_BOTH               0x00000300
+		#define PORT_FEATURE_FLOW_CONTROL_NONE               0x00000400
+
+	/* The default for MCP link configuration,
+	   uses the same defines as link_config */
+	u32 mfw_wol_link_cfg;
+
+	/* The default for the driver of the second external phy,
+	   uses the same defines as link_config */
+	u32 link_config2;				    /* 0x47C */
+
+	/* The default for MCP of the second external phy,
+	   uses the same defines as link_config */
+	u32 mfw_wol_link_cfg2;				    /* 0x480 */
+
+
+	/*  EEE power saving mode */
+	u32 eee_power_mode;                                 /* 0x484 */
+	#define PORT_FEAT_CFG_EEE_POWER_MODE_MASK                     0x000000FF
+	#define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT                    0
+	#define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED                 0x00000000
+	#define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED                 0x00000001
+	#define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE               0x00000002
+	#define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY              0x00000003
+
+
+	u32 Reserved2[16];                                  /* 0x488 */
+};
+
+
+/****************************************************************************
+ * Device Information                                                       *
+ ****************************************************************************/
+struct shm_dev_info {				/* size */
+
+	u32    bc_rev; /* 8 bits each: major, minor, build */	       /* 4 */
+
+	struct shared_hw_cfg     shared_hw_config;	      /* 40 */
+
+	struct port_hw_cfg       port_hw_config[PORT_MAX];     /* 400*2=800 */
+
+	struct shared_feat_cfg   shared_feature_config;		   /* 4 */
+
+	struct port_feat_cfg     port_feature_config[PORT_MAX];/* 116*2=232 */
+
+};
+
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+	#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
+
+#define FUNC_0              0
+#define FUNC_1              1
+#define FUNC_2              2
+#define FUNC_3              3
+#define FUNC_4              4
+#define FUNC_5              5
+#define FUNC_6              6
+#define FUNC_7              7
+#define E1_FUNC_MAX         2
+#define E1H_FUNC_MAX            8
+#define E2_FUNC_MAX         4   /* per path */
+
+#define VN_0                0
+#define VN_1                1
+#define VN_2                2
+#define VN_3                3
+#define E1VN_MAX            1
+#define E1HVN_MAX           4
+
+#define E2_VF_MAX           64  /* HC_REG_VF_CONFIGURATION_SIZE */
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code.  The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS     250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out.  Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS      5000
+
+#define FW_ACK_POLL_TIME_MS     1
+
+#define FW_ACK_NUM_OF_POLL  (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+#define MFW_TRACE_SIGNATURE     0x54524342
+
+/****************************************************************************
+ * Driver <-> FW Mailbox                                                    *
+ ****************************************************************************/
+struct drv_port_mb {
+
+	u32 link_status;
+	/* Driver should update this field on any link change event */
+
+	#define LINK_STATUS_NONE				(0<<0)
+	#define LINK_STATUS_LINK_FLAG_MASK			0x00000001
+	#define LINK_STATUS_LINK_UP				0x00000001
+	#define LINK_STATUS_SPEED_AND_DUPLEX_MASK		0x0000001E
+	#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE	(0<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10THD		(1<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD		(2<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD		(3<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100T4		(4<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD		(5<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD		(6<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD		(7<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD		(7<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD		(8<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD		(9<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD		(9<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD		(10<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD		(10<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD		(11<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD		(11<<1)
+
+	#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK		0x00000020
+	#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED		0x00000020
+
+	#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE		0x00000040
+	#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK	0x00000080
+	#define LINK_STATUS_PARALLEL_DETECTION_USED		0x00000080
+
+	#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE	0x00000200
+	#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE	0x00000400
+	#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE		0x00000800
+	#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE	0x00001000
+	#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE	0x00002000
+	#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE		0x00004000
+	#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE		0x00008000
+
+	#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK		0x00010000
+	#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED		0x00010000
+
+	#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK		0x00020000
+	#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED		0x00020000
+
+	#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK	0x000C0000
+	#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE	(0<<18)
+	#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE	(1<<18)
+	#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE	(2<<18)
+	#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE		(3<<18)
+
+	#define LINK_STATUS_SERDES_LINK				0x00100000
+
+	#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE	0x00200000
+	#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE	0x00400000
+	#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE		0x00800000
+	#define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE		0x10000000
+
+	#define LINK_STATUS_PFC_ENABLED				0x20000000
+
+	#define LINK_STATUS_PHYSICAL_LINK_FLAG			0x40000000
+	#define LINK_STATUS_SFP_TX_FAULT			0x80000000
+
+	u32 port_stx;
+
+	u32 stat_nig_timer;
+
+	/* MCP firmware does not use this field */
+	u32 ext_phy_fw_version;
+
+};
+
+
+struct drv_func_mb {
+
+	u32 drv_mb_header;
+	#define DRV_MSG_CODE_MASK                       0xffff0000
+	#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+	#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN          0x20000000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS         0x20010000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP         0x20020000
+	#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+	#define DRV_MSG_CODE_DCC_OK                     0x30000000
+	#define DRV_MSG_CODE_DCC_FAILURE                0x31000000
+	#define DRV_MSG_CODE_DIAG_ENTER_REQ             0x50000000
+	#define DRV_MSG_CODE_DIAG_EXIT_REQ              0x60000000
+	#define DRV_MSG_CODE_VALIDATE_KEY               0x70000000
+	#define DRV_MSG_CODE_GET_CURR_KEY               0x80000000
+	#define DRV_MSG_CODE_GET_UPGRADE_KEY            0x81000000
+	#define DRV_MSG_CODE_GET_MANUF_KEY              0x82000000
+	#define DRV_MSG_CODE_LOAD_L2B_PRAM              0x90000000
+	#define DRV_MSG_CODE_OEM_OK			0x00010000
+	#define DRV_MSG_CODE_OEM_FAILURE		0x00020000
+	#define DRV_MSG_CODE_OEM_UPDATE_SVID_OK		0x00030000
+	#define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE	0x00040000
+	/*
+	 * The optic module verification command requires bootcode
+	 * v5.0.6 or later, te specific optic module verification command
+	 * requires bootcode v5.2.12 or later
+	 */
+	#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL     0xa0000000
+	#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL     0x00050006
+	#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
+	#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
+	#define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED        0xa2000000
+	#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED        0x00070002
+	#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+	#define REQ_BC_VER_4_MT_SUPPORTED               0x00070201
+	#define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
+	#define REQ_BC_VER_4_FCOE_FEATURES              0x00070209
+
+	#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
+	#define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
+	#define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF     0x00070401
+
+	#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+
+	#define DRV_MSG_CODE_AFEX_DRIVER_SETMAC         0xd0000000
+	#define DRV_MSG_CODE_AFEX_LISTGET_ACK           0xd1000000
+	#define DRV_MSG_CODE_AFEX_LISTSET_ACK           0xd2000000
+	#define DRV_MSG_CODE_AFEX_STATSGET_ACK          0xd3000000
+	#define DRV_MSG_CODE_AFEX_VIFSET_ACK            0xd4000000
+
+	#define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
+	#define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
+
+	#define DRV_MSG_CODE_EEE_RESULTS_ACK            0xda000000
+
+	#define DRV_MSG_CODE_RMMOD                      0xdb000000
+	#define REQ_BC_VER_4_RMMOD_CMD                  0x0007080f
+
+	#define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
+	#define REQ_BC_VER_4_SET_MF_BW                  0x00060202
+	#define DRV_MSG_CODE_SET_MF_BW_ACK              0xe1000000
+
+	#define DRV_MSG_CODE_LINK_STATUS_CHANGED        0x01000000
+
+	#define DRV_MSG_CODE_INITIATE_FLR               0x02000000
+	#define REQ_BC_VER_4_INITIATE_FLR               0x00070213
+
+	#define BIOS_MSG_CODE_LIC_CHALLENGE             0xff010000
+	#define BIOS_MSG_CODE_LIC_RESPONSE              0xff020000
+	#define BIOS_MSG_CODE_VIRT_MAC_PRIM             0xff030000
+	#define BIOS_MSG_CODE_VIRT_MAC_ISCSI            0xff040000
+
+	#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+	u32 drv_mb_param;
+	#define DRV_MSG_CODE_SET_MF_BW_MIN_MASK         0x00ff0000
+	#define DRV_MSG_CODE_SET_MF_BW_MAX_MASK         0xff000000
+
+	#define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET     0x00000002
+
+	#define DRV_MSG_CODE_LOAD_REQ_WITH_LFA          0x0000100a
+	#define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA         0x00002000
+
+	u32 fw_mb_header;
+	#define FW_MSG_CODE_MASK                        0xffff0000
+	#define FW_MSG_CODE_DRV_LOAD_COMMON             0x10100000
+	#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+	#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+	/* Load common chip is supported from bc 6.0.0  */
+	#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP       0x00060000
+	#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP        0x10130000
+
+	#define FW_MSG_CODE_DRV_LOAD_REFUSED            0x10200000
+	#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+	#define FW_MSG_CODE_DRV_UNLOAD_COMMON           0x20100000
+	#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20110000
+	#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20120000
+	#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+	#define FW_MSG_CODE_DCC_DONE                    0x30100000
+	#define FW_MSG_CODE_LLDP_DONE                   0x40100000
+	#define FW_MSG_CODE_DIAG_ENTER_DONE             0x50100000
+	#define FW_MSG_CODE_DIAG_REFUSE                 0x50200000
+	#define FW_MSG_CODE_DIAG_EXIT_DONE              0x60100000
+	#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS        0x70100000
+	#define FW_MSG_CODE_VALIDATE_KEY_FAILURE        0x70200000
+	#define FW_MSG_CODE_GET_KEY_DONE                0x80100000
+	#define FW_MSG_CODE_NO_KEY                      0x80f00000
+	#define FW_MSG_CODE_LIC_INFO_NOT_READY          0x80f80000
+	#define FW_MSG_CODE_L2B_PRAM_LOADED             0x90100000
+	#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE     0x90210000
+	#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE     0x90220000
+	#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE     0x90230000
+	#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE     0x90240000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS        0xa0100000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
+	#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+	#define FW_MSG_CODE_HW_SET_INVALID_IMAGE        0xb0100000
+
+	#define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE     0xd0100000
+	#define FW_MSG_CODE_AFEX_LISTGET_ACK            0xd1100000
+	#define FW_MSG_CODE_AFEX_LISTSET_ACK            0xd2100000
+	#define FW_MSG_CODE_AFEX_STATSGET_ACK           0xd3100000
+	#define FW_MSG_CODE_AFEX_VIFSET_ACK             0xd4100000
+
+	#define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
+	#define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
+
+	#define FW_MSG_CODE_EEE_RESULS_ACK              0xda100000
+
+	#define FW_MSG_CODE_RMMOD_ACK                   0xdb100000
+
+	#define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
+	#define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
+
+	#define FW_MSG_CODE_LINK_CHANGED_ACK            0x01100000
+
+	#define FW_MSG_CODE_LIC_CHALLENGE               0xff010000
+	#define FW_MSG_CODE_LIC_RESPONSE                0xff020000
+	#define FW_MSG_CODE_VIRT_MAC_PRIM               0xff030000
+	#define FW_MSG_CODE_VIRT_MAC_ISCSI              0xff040000
+
+	#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
+
+	u32 fw_mb_param;
+
+	u32 drv_pulse_mb;
+	#define DRV_PULSE_SEQ_MASK                      0x00007fff
+	#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+	/*
+	 * The system time is in the format of
+	 * (year-2001)*12*32 + month*32 + day.
+	 */
+	#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+	/*
+	 * Indicate to the firmware not to go into the
+	 * OS-absent when it is not getting driver pulse.
+	 * This is used for debugging as well for PXE(MBA).
+	 */
+
+	u32 mcp_pulse_mb;
+	#define MCP_PULSE_SEQ_MASK                      0x00007fff
+	#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
+	/* Indicates to the driver not to assert due to lack
+	 * of MCP response */
+	#define MCP_EVENT_MASK                          0xffff0000
+	#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
+
+	u32 iscsi_boot_signature;
+	u32 iscsi_boot_block_offset;
+
+	u32 drv_status;
+	#define DRV_STATUS_PMF                          0x00000001
+	#define DRV_STATUS_VF_DISABLED                  0x00000002
+	#define DRV_STATUS_SET_MF_BW                    0x00000004
+	#define DRV_STATUS_LINK_EVENT                   0x00000008
+
+	#define DRV_STATUS_OEM_EVENT_MASK               0x00000070
+	#define DRV_STATUS_OEM_DISABLE_ENABLE_PF        0x00000010
+	#define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION     0x00000020
+
+	#define DRV_STATUS_OEM_UPDATE_SVID              0x00000080
+
+	#define DRV_STATUS_DCC_EVENT_MASK               0x0000ff00
+	#define DRV_STATUS_DCC_DISABLE_ENABLE_PF        0x00000100
+	#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION     0x00000200
+	#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS       0x00000400
+	#define DRV_STATUS_DCC_RESERVED1                0x00000800
+	#define DRV_STATUS_DCC_SET_PROTOCOL             0x00001000
+	#define DRV_STATUS_DCC_SET_PRIORITY             0x00002000
+
+	#define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
+	#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+	#define DRV_STATUS_AFEX_EVENT_MASK              0x03f00000
+	#define DRV_STATUS_AFEX_LISTGET_REQ             0x00100000
+	#define DRV_STATUS_AFEX_LISTSET_REQ             0x00200000
+	#define DRV_STATUS_AFEX_STATSGET_REQ            0x00400000
+	#define DRV_STATUS_AFEX_VIFSET_REQ              0x00800000
+
+	#define DRV_STATUS_DRV_INFO_REQ                 0x04000000
+
+	#define DRV_STATUS_EEE_NEGOTIATION_RESULTS      0x08000000
+
+	u32 virt_mac_upper;
+	#define VIRT_MAC_SIGN_MASK                      0xffff0000
+	#define VIRT_MAC_SIGNATURE                      0x564d0000
+	u32 virt_mac_lower;
+
+};
+
+
+/****************************************************************************
+ * Management firmware state                                                *
+ ****************************************************************************/
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE                          110
+
+struct mgmtfw_state {
+	u32 opaque[MGMTFW_STATE_WORD_SIZE];
+};
+
+
+/****************************************************************************
+ * Multi-Function configuration                                             *
+ ****************************************************************************/
+struct shared_mf_cfg {
+
+	u32 clp_mb;
+	#define SHARED_MF_CLP_SET_DEFAULT               0x00000000
+	/* set by CLP */
+	#define SHARED_MF_CLP_EXIT                      0x00000001
+	/* set by MCP */
+	#define SHARED_MF_CLP_EXIT_DONE                 0x00010000
+
+};
+
+struct port_mf_cfg {
+
+	u32 dynamic_cfg;    /* device control channel */
+	#define PORT_MF_CFG_E1HOV_TAG_MASK              0x0000ffff
+	#define PORT_MF_CFG_E1HOV_TAG_SHIFT             0
+	#define PORT_MF_CFG_E1HOV_TAG_DEFAULT         PORT_MF_CFG_E1HOV_TAG_MASK
+
+	u32 reserved[1];
+
+};
+
+struct func_mf_cfg {
+
+	u32 config;
+	/* E/R/I/D */
+	/* function 0 of each port cannot be hidden */
+	#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
+
+	#define FUNC_MF_CFG_PROTOCOL_MASK               0x00000006
+	#define FUNC_MF_CFG_PROTOCOL_FCOE               0x00000000
+	#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000002
+	#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+	#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000006
+	#define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+				FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+
+	#define FUNC_MF_CFG_FUNC_DISABLED               0x00000008
+	#define FUNC_MF_CFG_FUNC_DELETED                0x00000010
+
+	/* PRI */
+	/* 0 - low priority, 3 - high priority */
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK      0x00000300
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT     8
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT   0x00000000
+
+	/* MINBW, MAXBW */
+	/* value range - 0..100, increments in 100Mbps */
+	#define FUNC_MF_CFG_MIN_BW_MASK                 0x00ff0000
+	#define FUNC_MF_CFG_MIN_BW_SHIFT                16
+	#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+	#define FUNC_MF_CFG_MAX_BW_MASK                 0xff000000
+	#define FUNC_MF_CFG_MAX_BW_SHIFT                24
+	#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x64000000
+
+	u32 mac_upper;	    /* MAC */
+	#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+	#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+	#define FUNC_MF_CFG_UPPERMAC_DEFAULT           FUNC_MF_CFG_UPPERMAC_MASK
+	u32 mac_lower;
+	#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
+
+	u32 e1hov_tag;	/* VNI */
+	#define FUNC_MF_CFG_E1HOV_TAG_MASK              0x0000ffff
+	#define FUNC_MF_CFG_E1HOV_TAG_SHIFT             0
+	#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT         FUNC_MF_CFG_E1HOV_TAG_MASK
+
+	/* afex default VLAN ID - 12 bits */
+	#define FUNC_MF_CFG_AFEX_VLAN_MASK              0x0fff0000
+	#define FUNC_MF_CFG_AFEX_VLAN_SHIFT             16
+
+	u32 afex_config;
+	#define FUNC_MF_CFG_AFEX_COS_FILTER_MASK                     0x000000ff
+	#define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT                    0
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK                    0x0000ff00
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT                   8
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL                     0x00000100
+	#define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK                      0x000f0000
+	#define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT                     16
+
+	u32 reserved;
+};
+
+enum mf_cfg_afex_vlan_mode {
+	FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+	FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+	FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
+};
+
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+	u32 func_cfg;
+	#define MACP_FUNC_CFG_FLAGS_MASK                0x0000007F
+	#define MACP_FUNC_CFG_FLAGS_SHIFT               0
+	#define MACP_FUNC_CFG_FLAGS_ENABLED             0x00000001
+	#define MACP_FUNC_CFG_FLAGS_ETHERNET            0x00000002
+	#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD       0x00000004
+	#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD        0x00000008
+	#define MACP_FUNC_CFG_PAUSE_ON_HOST_RING        0x00000080
+
+	u32 iscsi_mac_addr_upper;
+	u32 iscsi_mac_addr_lower;
+
+	u32 fcoe_mac_addr_upper;
+	u32 fcoe_mac_addr_lower;
+
+	u32 fcoe_wwn_port_name_upper;
+	u32 fcoe_wwn_port_name_lower;
+
+	u32 fcoe_wwn_node_name_upper;
+	u32 fcoe_wwn_node_name_lower;
+
+	u32 preserve_data;
+	#define MF_FUNC_CFG_PRESERVE_L2_MAC             (1<<0)
+	#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC          (1<<1)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_MAC           (1<<2)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P         (1<<3)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N         (1<<4)
+	#define MF_FUNC_CFG_PRESERVE_TX_BW              (1<<5)
+};
+
+struct mf_cfg {
+
+	struct shared_mf_cfg    shared_mf_config;       /* 0x4 */
+							/* 0x8*2*2=0x20 */
+	struct port_mf_cfg  port_mf_config[NVM_PATH_MAX][PORT_MAX];
+	/* for all chips, there are 8 mf functions */
+	struct func_mf_cfg  func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+	/*
+	 * Extended configuration per function  - this array does not exist and
+	 * should not be accessed on 57711
+	 */
+	struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
+
+/****************************************************************************
+ * Shared Memory Region                                                     *
+ ****************************************************************************/
+struct shmem_region {		       /*   SharedMem Offset (size) */
+
+	u32         validity_map[PORT_MAX];  /* 0x0 (4*2 = 0x8) */
+	#define SHR_MEM_FORMAT_REV_MASK                     0xff000000
+	#define SHR_MEM_FORMAT_REV_ID                       ('A'<<24)
+	/* validity bits */
+	#define SHR_MEM_VALIDITY_PCI_CFG                    0x00100000
+	#define SHR_MEM_VALIDITY_MB                         0x00200000
+	#define SHR_MEM_VALIDITY_DEV_INFO                   0x00400000
+	#define SHR_MEM_VALIDITY_RESERVED                   0x00000007
+	/* One licensing bit should be set */
+	#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
+	#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+	#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+	#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
+	/* Active MFW */
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI            0x00000040
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP             0x00000080
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI            0x000000c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
+
+	struct shm_dev_info dev_info;	     /* 0x8     (0x438) */
+
+	struct license_key       drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+
+	/* FW information (for internal FW use) */
+	u32         fw_info_fio_offset;		/* 0x4a8       (0x4) */
+	struct mgmtfw_state mgmtfw_state;	/* 0x4ac     (0x1b8) */
+
+	struct drv_port_mb  port_mb[PORT_MAX];	/* 0x664 (16*2=0x20) */
+
+#ifdef BMAPI
+	/* This is a variable length array */
+	/* the number of function depends on the chip type */
+	struct drv_func_mb func_mb[1];	/* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+	/* the number of function depends on the chip type */
+	struct drv_func_mb  func_mb[];	/* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
+
+}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
+
+/****************************************************************************
+ * Shared Memory 2 Region                                                   *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 64 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
+struct fw_flr_ack {
+	u32         pf_ack;
+	u32         vf_ack[1];
+	u32         iov_dis_ack;
+};
+
+struct fw_flr_mb {
+	u32         aggint;
+	u32         opgen_addr;
+	struct fw_flr_ack ack;
+};
+
+struct eee_remote_vals {
+	u32         tx_tw;
+	u32         rx_tw;
+};
+
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb)		((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb)	((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ *                |                |                |               |
+ *   0    |   1   |   2    |   3   |   4    |   5   |   6   |   7   |
+ *                |                |                |               |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ *                |                |                |               |
+ *   1   |   0    |   3    |   2   |   5    |   4   |   7   |   6   |
+ *                |                |                |               |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ *                |                |                |               |
+ *   3   |   2    |   1   |   0    |   7   |   6    |   5   |   4   |
+ *                |                |                |               |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb)	\
+	((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+	(((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb)					\
+	((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) &  \
+	SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val)				\
+do {									   \
+	a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) <<	   \
+	SHMEM_ARRAY_BITPOS(i, eb, fb));					   \
+	a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) <<  \
+	SHMEM_ARRAY_BITPOS(i, eb, fb));					   \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES	8
+#define DCBX_PRI_PG_BITWIDTH		4
+#define DCBX_PRI_PG_FBITS		8
+#define DCBX_PRI_PG_GET(a, i)		\
+	SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val)	\
+	SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES	8
+#define DCBX_BW_PG_BITWIDTH		8
+#define DCBX_PG_BW_GET(a, i)		\
+	SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val)	\
+	SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG		15
+#define DCBX_MAX_APP_PROTOCOL		16
+#define FCOE_APP_IDX			0
+#define ISCSI_APP_IDX			1
+#define PREDEFINED_APP_IDX_MAX		2
+
+
+/* Big/Little endian have the same representation. */
+struct dcbx_ets_feature {
+	/*
+	 * For Admin MIB - is this feature supported by the
+	 * driver | For Local MIB - should this feature be enabled.
+	 */
+	u32 enabled;
+	u32  pg_bw_tbl[2];
+	u32  pri_pg_tbl[1];
+};
+
+/* Driver structure in LE */
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+	u8 pri_en_bitmap;
+	#define DCBX_PFC_PRI_0 0x01
+	#define DCBX_PFC_PRI_1 0x02
+	#define DCBX_PFC_PRI_2 0x04
+	#define DCBX_PFC_PRI_3 0x08
+	#define DCBX_PFC_PRI_4 0x10
+	#define DCBX_PFC_PRI_5 0x20
+	#define DCBX_PFC_PRI_6 0x40
+	#define DCBX_PFC_PRI_7 0x80
+	u8 pfc_caps;
+	u8 reserved;
+	u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+	u8 enabled;
+	u8 reserved;
+	u8 pfc_caps;
+	u8 pri_en_bitmap;
+	#define DCBX_PFC_PRI_0 0x01
+	#define DCBX_PFC_PRI_1 0x02
+	#define DCBX_PFC_PRI_2 0x04
+	#define DCBX_PFC_PRI_3 0x08
+	#define DCBX_PFC_PRI_4 0x10
+	#define DCBX_PFC_PRI_5 0x20
+	#define DCBX_PFC_PRI_6 0x40
+	#define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+	u16  app_id;
+	u8  pri_bitmap;
+	u8  appBitfield;
+	#define DCBX_APP_ENTRY_VALID         0x01
+	#define DCBX_APP_ENTRY_SF_MASK       0x30
+	#define DCBX_APP_ENTRY_SF_SHIFT      4
+	#define DCBX_APP_SF_ETH_TYPE         0x10
+	#define DCBX_APP_SF_PORT             0x20
+#elif defined(__LITTLE_ENDIAN)
+	u8 appBitfield;
+	#define DCBX_APP_ENTRY_VALID         0x01
+	#define DCBX_APP_ENTRY_SF_MASK       0x30
+	#define DCBX_APP_ENTRY_SF_SHIFT      4
+	#define DCBX_APP_SF_ETH_TYPE         0x10
+	#define DCBX_APP_SF_PORT             0x20
+	u8  pri_bitmap;
+	u16  app_id;
+#endif
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+	u8 reserved;
+	u8 default_pri;
+	u8 tc_supported;
+	u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+	u8 enabled;
+	u8 tc_supported;
+	u8 default_pri;
+	u8 reserved;
+#endif
+	struct dcbx_app_priority_entry  app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+	/* PG feature */
+	struct dcbx_ets_feature ets;
+	/* PFC feature */
+	struct dcbx_pfc_feature pfc;
+	/* APP feature */
+	struct dcbx_app_priority_feature app;
+};
+
+/* LLDP protocol parameters */
+/* FW structure in BE */
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+	u8  msg_fast_tx_interval;
+	u8  msg_tx_hold;
+	u8  msg_tx_interval;
+	u8  admin_status;
+	#define LLDP_TX_ONLY  0x01
+	#define LLDP_RX_ONLY  0x02
+	#define LLDP_TX_RX    0x03
+	#define LLDP_DISABLED 0x04
+	u8  reserved1;
+	u8  tx_fast;
+	u8  tx_crd_max;
+	u8  tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+	u8  admin_status;
+	#define LLDP_TX_ONLY  0x01
+	#define LLDP_RX_ONLY  0x02
+	#define LLDP_TX_RX    0x03
+	#define LLDP_DISABLED 0x04
+	u8  msg_tx_interval;
+	u8  msg_tx_hold;
+	u8  msg_fast_tx_interval;
+	u8  tx_crd;
+	u8  tx_crd_max;
+	u8  tx_fast;
+	u8  reserved1;
+#endif
+	#define REM_CHASSIS_ID_STAT_LEN 4
+	#define REM_PORT_ID_STAT_LEN 4
+	/* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+	u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+	/* Holds remote Port ID TLV header, subtype and 9B of payload. */
+	u32 peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+	#define LOCAL_CHASSIS_ID_STAT_LEN 2
+	#define LOCAL_PORT_ID_STAT_LEN 2
+	/* Holds local Chassis ID 8B payload of constant subtype 4. */
+	u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+	/* Holds local Port ID 8B payload of constant subtype 3. */
+	u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+	/* Number of DCBX frames transmitted. */
+	u32 num_tx_dcbx_pkts;
+	/* Number of DCBX frames received. */
+	u32 num_rx_dcbx_pkts;
+};
+
+/* ADMIN MIB - DCBX local machine default configuration. */
+struct lldp_admin_mib {
+	u32     ver_cfg_flags;
+	#define DCBX_ETS_CONFIG_TX_ENABLED       0x00000001
+	#define DCBX_PFC_CONFIG_TX_ENABLED       0x00000002
+	#define DCBX_APP_CONFIG_TX_ENABLED       0x00000004
+	#define DCBX_ETS_RECO_TX_ENABLED         0x00000008
+	#define DCBX_ETS_RECO_VALID              0x00000010
+	#define DCBX_ETS_WILLING                 0x00000020
+	#define DCBX_PFC_WILLING                 0x00000040
+	#define DCBX_APP_WILLING                 0x00000080
+	#define DCBX_VERSION_CEE                 0x00000100
+	#define DCBX_VERSION_IEEE                0x00000200
+	#define DCBX_DCBX_ENABLED                0x00000400
+	#define DCBX_CEE_VERSION_MASK            0x0000f000
+	#define DCBX_CEE_VERSION_SHIFT           12
+	#define DCBX_CEE_MAX_VERSION_MASK        0x000f0000
+	#define DCBX_CEE_MAX_VERSION_SHIFT       16
+	struct dcbx_features     features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
+struct lldp_remote_mib {
+	u32 prefix_seq_num;
+	u32 flags;
+	#define DCBX_ETS_TLV_RX                  0x00000001
+	#define DCBX_PFC_TLV_RX                  0x00000002
+	#define DCBX_APP_TLV_RX                  0x00000004
+	#define DCBX_ETS_RX_ERROR                0x00000010
+	#define DCBX_PFC_RX_ERROR                0x00000020
+	#define DCBX_APP_RX_ERROR                0x00000040
+	#define DCBX_ETS_REM_WILLING             0x00000100
+	#define DCBX_PFC_REM_WILLING             0x00000200
+	#define DCBX_APP_REM_WILLING             0x00000400
+	#define DCBX_REMOTE_ETS_RECO_VALID       0x00001000
+	#define DCBX_REMOTE_MIB_VALID            0x00002000
+	struct dcbx_features features;
+	u32 suffix_seq_num;
+};
+
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
+struct lldp_local_mib {
+	u32 prefix_seq_num;
+	/* Indicates if there is mismatch with negotiation results. */
+	u32 error;
+	#define DCBX_LOCAL_ETS_ERROR             0x00000001
+	#define DCBX_LOCAL_PFC_ERROR             0x00000002
+	#define DCBX_LOCAL_APP_ERROR             0x00000004
+	#define DCBX_LOCAL_PFC_MISMATCH          0x00000010
+	#define DCBX_LOCAL_APP_MISMATCH          0x00000020
+	#define DCBX_REMOTE_MIB_ERROR		 0x00000040
+	#define DCBX_REMOTE_ETS_TLV_NOT_FOUND    0x00000080
+	#define DCBX_REMOTE_PFC_TLV_NOT_FOUND    0x00000100
+	#define DCBX_REMOTE_APP_TLV_NOT_FOUND    0x00000200
+	struct dcbx_features   features;
+	u32 suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
+
+/***********************************************************/
+/*                         Elink section                   */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+	u32 req_duplex;
+	#define REQ_DUPLEX_PHY0_MASK        0x0000ffff
+	#define REQ_DUPLEX_PHY0_SHIFT       0
+	#define REQ_DUPLEX_PHY1_MASK        0xffff0000
+	#define REQ_DUPLEX_PHY1_SHIFT       16
+	u32 req_flow_ctrl;
+	#define REQ_FLOW_CTRL_PHY0_MASK     0x0000ffff
+	#define REQ_FLOW_CTRL_PHY0_SHIFT    0
+	#define REQ_FLOW_CTRL_PHY1_MASK     0xffff0000
+	#define REQ_FLOW_CTRL_PHY1_SHIFT    16
+	u32 req_line_speed; /* Also determine AutoNeg */
+	#define REQ_LINE_SPD_PHY0_MASK      0x0000ffff
+	#define REQ_LINE_SPD_PHY0_SHIFT     0
+	#define REQ_LINE_SPD_PHY1_MASK      0xffff0000
+	#define REQ_LINE_SPD_PHY1_SHIFT     16
+	u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+	u32 additional_config;
+	#define REQ_FC_AUTO_ADV_MASK        0x0000ffff
+	#define REQ_FC_AUTO_ADV0_SHIFT      0
+	#define NO_LFA_DUE_TO_DCC_MASK      0x00010000
+	u32 lfa_sts;
+	#define LFA_LINK_FLAP_REASON_OFFSET		0
+	#define LFA_LINK_FLAP_REASON_MASK		0x000000ff
+		#define LFA_LINK_DOWN			    0x1
+		#define LFA_LOOPBACK_ENABLED		0x2
+		#define LFA_DUPLEX_MISMATCH		    0x3
+		#define LFA_MFW_IS_TOO_OLD		    0x4
+		#define LFA_LINK_SPEED_MISMATCH		0x5
+		#define LFA_FLOW_CTRL_MISMATCH		0x6
+		#define LFA_SPEED_CAP_MISMATCH		0x7
+		#define LFA_DCC_LFA_DISABLED		0x8
+		#define LFA_EEE_MISMATCH		0x9
+
+	#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET	8
+	#define LINK_FLAP_AVOIDANCE_COUNT_MASK		0x0000ff00
+
+	#define LINK_FLAP_COUNT_OFFSET			16
+	#define LINK_FLAP_COUNT_MASK			0x00ff0000
+
+	#define LFA_FLAGS_MASK				0xff000000
+	#define SHMEM_LFA_DONT_CLEAR_STAT		(1<<24)
+};
+
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED			0
+
+	/* personalties order is important */
+#define DRV_PERS_ETHERNET			0
+#define DRV_PERS_ISCSI				1
+#define DRV_PERS_FCOE				2
+
+	/* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS				3
+	u32 versions[MAX_DRV_PERS];
+};
+
+struct ncsi_oem_fcoe_features {
+	u32 fcoe_features1;
+	#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
+	#define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET        0
+
+	#define FCOE_FEATURES1_LOGINS_PER_PORT_MASK             0xFFFF0000
+	#define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET           16
+
+	u32 fcoe_features2;
+	#define FCOE_FEATURES2_EXCHANGES_MASK                   0x0000FFFF
+	#define FCOE_FEATURES2_EXCHANGES_OFFSET                 0
+
+	#define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK           0xFFFF0000
+	#define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET         16
+
+	u32 fcoe_features3;
+	#define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK           0x0000FFFF
+	#define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET         0
+
+	#define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK        0xFFFF0000
+	#define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET      16
+
+	u32 fcoe_features4;
+	#define FCOE_FEATURES4_FEATURE_SETTINGS_MASK            0x0000000F
+	#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
+};
+
+enum curr_cfg_method_e {
+	CURR_CFG_MET_NONE = 0,  /* default config */
+	CURR_CFG_MET_OS = 1,
+	CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+	u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+	u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+	/* hdr used internally by the MFW */
+	u32 hdr;
+	u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+	struct bdn_fc_npiv_cfg fc_npiv_cfg;
+	struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+	u32 epoc;
+	u32 drv_ver;
+	u32 fw_ver;
+
+	u32 valid_dump;
+	#define FIRST_DUMP_VALID        (1 << 0)
+	#define SECOND_DUMP_VALID       (1 << 1)
+
+	u32 flags;
+	#define ENABLE_ALL_TRIGGERS     (0x7fffffff)
+	#define TRIGGER_MDUMP_ONCE      (1 << 31)
+};
+
+struct ncsi_oem_data {
+	u32 driver_version[4];
+	struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
+};
+
+struct shmem2_region {
+
+	u32 size;					/* 0x0000 */
+
+	u32 dcc_support;				/* 0x0004 */
+	#define SHMEM_DCC_SUPPORT_NONE                      0x00000000
+	#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV     0x00000001
+	#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV  0x00000004
+	#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV    0x00000008
+	#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV          0x00000040
+	#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV          0x00000080
+
+	u32 ext_phy_fw_version2[PORT_MAX];		/* 0x0008 */
+	/*
+	 * For backwards compatibility, if the mf_cfg_addr does not exist
+	 * (the size filed is smaller than 0xc) the mf_cfg resides at the
+	 * end of struct shmem_region
+	 */
+	u32 mf_cfg_addr;				/* 0x0010 */
+	#define SHMEM_MF_CFG_ADDR_NONE                  0x00000000
+
+	struct fw_flr_mb flr_mb;			/* 0x0014 */
+	u32 dcbx_lldp_params_offset;			/* 0x0028 */
+	#define SHMEM_LLDP_DCBX_PARAMS_NONE             0x00000000
+	u32 dcbx_neg_res_offset;			/* 0x002c */
+	#define SHMEM_DCBX_NEG_RES_NONE			0x00000000
+	u32 dcbx_remote_mib_offset;			/* 0x0030 */
+	#define SHMEM_DCBX_REMOTE_MIB_NONE              0x00000000
+	/*
+	 * The other shmemX_base_addr holds the other path's shmem address
+	 * required for example in case of common phy init, or for path1 to know
+	 * the address of mcp debug trace which is located in offset from shmem
+	 * of path0
+	 */
+	u32 other_shmem_base_addr;			/* 0x0034 */
+	u32 other_shmem2_base_addr;			/* 0x0038 */
+	/*
+	 * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+	 * which were disabled/flred
+	 */
+	u32 mcp_vf_disabled[E2_VF_MAX / 32];		/* 0x003c */
+
+	/*
+	 * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+	 * VFs
+	 */
+	u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+	u32 dcbx_lldp_dcbx_stat_offset;			/* 0x0064 */
+	#define SHMEM_LLDP_DCBX_STAT_NONE               0x00000000
+
+	/*
+	 * edebug_driver_if field is used to transfer messages between edebug
+	 * app to the driver through shmem2.
+	 *
+	 * message format:
+	 * bits 0-2 -  function number / instance of driver to perform request
+	 * bits 3-5 -  op code / is_ack?
+	 * bits 6-63 - data
+	 */
+	u32 edebug_driver_if[2];			/* 0x0068 */
+	#define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR  1
+	#define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR   2
+	#define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT   3
+
+	u32 nvm_retain_bitmap_addr;			/* 0x0070 */
+
+	/* afex support of that driver */
+	u32 afex_driver_support;			/* 0x0074 */
+	#define SHMEM_AFEX_VERSION_MASK                  0x100f
+	#define SHMEM_AFEX_SUPPORTED_VERSION_ONE         0x1001
+	#define SHMEM_AFEX_REDUCED_DRV_LOADED            0x8000
+
+	/* driver receives addr in scratchpad to which it should respond */
+	u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
+
+	/* generic params from MCP to driver (value depends on the msg sent
+	 * to driver
+	 */
+	u32 afex_param1_to_driver[E2_FUNC_MAX];		/* 0x0088 */
+	u32 afex_param2_to_driver[E2_FUNC_MAX];		/* 0x0098 */
+
+	u32 swim_base_addr;				/* 0x0108 */
+	u32 swim_funcs;
+	u32 swim_main_cb;
+
+	/* bitmap notifying which VIF profiles stored in nvram are enabled by
+	 * switch
+	 */
+	u32 afex_profiles_enabled[2];
+
+	/* generic flags controlled by the driver */
+	u32 drv_flags;
+	#define DRV_FLAGS_DCB_CONFIGURED		0x0
+	#define DRV_FLAGS_DCB_CONFIGURATION_ABORTED	0x1
+	#define DRV_FLAGS_DCB_MFW_CONFIGURED	0x2
+
+	#define DRV_FLAGS_PORT_MASK	((1 << DRV_FLAGS_DCB_CONFIGURED) | \
+			(1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \
+			(1 << DRV_FLAGS_DCB_MFW_CONFIGURED))
+	/* pointer to extended dev_info shared data copied from nvm image */
+	u32 extended_dev_info_shared_addr;
+	u32 ncsi_oem_data_addr;
+
+	u32 ocsd_host_addr; /* initialized by option ROM */
+	u32 ocbb_host_addr; /* initialized by option ROM */
+	u32 ocsd_req_update_interval; /* initialized by option ROM */
+	u32 temperature_in_half_celsius;
+	u32 glob_struct_in_host;
+
+	u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE			0x00000000
+
+	u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+#define DRV_FLAGS_MTU_MASK			0xffff0000
+#define DRV_FLAGS_MTU_SHIFT			16
+
+	u32 extended_dev_info_shared_cfg_size;
+
+	u32 dcbx_en[PORT_MAX];
+
+	/* The offset points to the multi threaded meta structure */
+	u32 multi_thread_data_offset;
+
+	/* address of DMAable host address holding values from the drivers */
+	u32 drv_info_host_addr_lo;
+	u32 drv_info_host_addr_hi;
+
+	/* general values written by the MFW (such as current version) */
+	u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK          0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT         0
+#define DRV_INFO_CONTROL_OP_CODE_MASK      0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT     8
+	u32 ibft_host_addr; /* initialized by option ROM */
+	struct eee_remote_vals eee_remote_vals[PORT_MAX];
+	u32 reserved[E2_FUNC_MAX];
+
+
+	/* the status of EEE auto-negotiation
+	 * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+	 * bits 19:16 the supported modes for EEE.
+	 * bits 23:20 the speeds advertised for EEE.
+	 * bits 27:24 the speeds the Link partner advertised for EEE.
+	 * The supported/adv. modes in bits 27:19 originate from the
+	 * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+	 * bit 28 when 1'b1 EEE was requested.
+	 * bit 29 when 1'b1 tx lpi was requested.
+	 * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+	 * 30:29 are 2'b11.
+	 * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+	 * value. When 1'b1 those bits contains a value times 16 microseconds.
+	 */
+	u32 eee_status[PORT_MAX];
+	#define SHMEM_EEE_TIMER_MASK		   0x0000ffff
+	#define SHMEM_EEE_SUPPORTED_MASK	   0x000f0000
+	#define SHMEM_EEE_SUPPORTED_SHIFT	   16
+	#define SHMEM_EEE_ADV_STATUS_MASK	   0x00f00000
+		#define SHMEM_EEE_100M_ADV	   (1<<0)
+		#define SHMEM_EEE_1G_ADV	   (1<<1)
+		#define SHMEM_EEE_10G_ADV	   (1<<2)
+	#define SHMEM_EEE_ADV_STATUS_SHIFT	   20
+	#define	SHMEM_EEE_LP_ADV_STATUS_MASK	   0x0f000000
+	#define SHMEM_EEE_LP_ADV_STATUS_SHIFT	   24
+	#define SHMEM_EEE_REQUESTED_BIT		   0x10000000
+	#define SHMEM_EEE_LPI_REQUESTED_BIT	   0x20000000
+	#define SHMEM_EEE_ACTIVE_BIT		   0x40000000
+	#define SHMEM_EEE_TIME_OUTPUT_BIT	   0x80000000
+
+	u32 sizeof_port_stats;
+
+	/* Link Flap Avoidance */
+	u32 lfa_host_addr[PORT_MAX];
+	u32 reserved1;
+
+	u32 reserved2;				/* Offset 0x148 */
+	u32 reserved3;				/* Offset 0x14C */
+	u32 reserved4;				/* Offset 0x150 */
+	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */
+	#define LINK_ATTR_SYNC_KR2_ENABLE	0x00000001
+	#define LINK_ATTR_84858			0x00000002
+	#define LINK_SFP_EEPROM_COMP_CODE_MASK	0x0000ff00
+	#define LINK_SFP_EEPROM_COMP_CODE_SHIFT		 8
+	#define LINK_SFP_EEPROM_COMP_CODE_SR	0x00001000
+	#define LINK_SFP_EEPROM_COMP_CODE_LR	0x00002000
+	#define LINK_SFP_EEPROM_COMP_CODE_LRM	0x00004000
+
+	u32 reserved5[2];
+	u32 link_change_count[PORT_MAX];        /* Offset 0x160-0x164 */
+	#define LINK_CHANGE_COUNT_MASK 0xff     /* Offset 0x168 */
+	/* driver version for each personality */
+	struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+	/* Flag to the driver that PF's drv_info_host_addr buffer was read  */
+	u32 mfw_drv_indication;
+
+	/* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+	union { /* For various OEMs */			/* Offset 0x1a0 */
+		u8 storage_boot_prog[E2_FUNC_MAX];
+	#define STORAGE_BOOT_PROG_MASK				0x000000FF
+	#define STORAGE_BOOT_PROG_NONE				0x00000000
+	#define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED		0x00000002
+	#define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS	0x00000002
+	#define STORAGE_BOOT_PROG_TARGET_FOUND			0x00000004
+	#define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS		0x00000008
+	#define STORAGE_BOOT_PROG_FCOE_LUN_FOUND		0x00000008
+	#define STORAGE_BOOT_PROG_LOGGED_INTO_TGT		0x00000010
+	#define STORAGE_BOOT_PROG_IMG_DOWNLOADED		0x00000020
+	#define STORAGE_BOOT_PROG_OS_HANDOFF			0x00000040
+	#define STORAGE_BOOT_PROG_COMPLETED			0x00000080
+
+		u32 oem_i2c_data_addr;
+	};
+
+	/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+	/* For PCP values 0-3 use the map lower */
+	/* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+	 * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+	 */
+	u32 c2s_pcp_map_lower[E2_FUNC_MAX];			/* 0x1a4 */
+
+	/* For PCP values 4-7 use the map upper */
+	/* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+	 * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+	 */
+	u32 c2s_pcp_map_upper[E2_FUNC_MAX];			/* 0x1b4 */
+
+	/* For PCP default value get the MSB byte of the map default */
+	u32 c2s_pcp_map_default[E2_FUNC_MAX];			/* 0x1c4 */
+
+	/* FC_NPIV table offset in NVRAM */
+	u32 fc_npiv_nvram_tbl_addr[PORT_MAX];			/* 0x1d4 */
+
+	/* Shows last method that changed configuration of this device */
+	enum curr_cfg_method_e curr_cfg;			/* 0x1dc */
+
+	/* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+	 * MM - Major, mm - Minor, bb - Build ,dd - Drop
+	 */
+	u32 netproc_fw_ver;					/* 0x1e0 */
+
+	/* Option ROM SMASH CLP version */
+	u32 clp_ver;						/* 0x1e4 */
+
+	u32 pcie_bus_num;					/* 0x1e8 */
+
+	u32 sriov_switch_mode;					/* 0x1ec */
+	#define SRIOV_SWITCH_MODE_NONE		0x0
+	#define SRIOV_SWITCH_MODE_VEB		0x1
+	#define SRIOV_SWITCH_MODE_VEPA		0x2
+
+	u8  rsrv2[E2_FUNC_MAX];					/* 0x1f0 */
+
+	u32 img_inv_table_addr;	/* Address to INV_TABLE_P */	/* 0x1f4 */
+
+	u32 mtu_size[E2_FUNC_MAX];				/* 0x1f8 */
+
+	u32 os_driver_state[E2_FUNC_MAX];			/* 0x208 */
+	#define OS_DRIVER_STATE_NOT_LOADED	0 /* not installed */
+	#define OS_DRIVER_STATE_LOADING		1 /* transition state */
+	#define OS_DRIVER_STATE_DISABLED	2 /* installed but disabled */
+	#define OS_DRIVER_STATE_ACTIVE		3 /* installed and active */
+
+	/* mini dump driver info */
+	struct mdump_driver_info drv_info;			/* 0x218 */
+};
+
+
+struct emac_stats {
+	u32     rx_stat_ifhcinoctets;
+	u32     rx_stat_ifhcinbadoctets;
+	u32     rx_stat_etherstatsfragments;
+	u32     rx_stat_ifhcinucastpkts;
+	u32     rx_stat_ifhcinmulticastpkts;
+	u32     rx_stat_ifhcinbroadcastpkts;
+	u32     rx_stat_dot3statsfcserrors;
+	u32     rx_stat_dot3statsalignmenterrors;
+	u32     rx_stat_dot3statscarriersenseerrors;
+	u32     rx_stat_xonpauseframesreceived;
+	u32     rx_stat_xoffpauseframesreceived;
+	u32     rx_stat_maccontrolframesreceived;
+	u32     rx_stat_xoffstateentered;
+	u32     rx_stat_dot3statsframestoolong;
+	u32     rx_stat_etherstatsjabbers;
+	u32     rx_stat_etherstatsundersizepkts;
+	u32     rx_stat_etherstatspkts64octets;
+	u32     rx_stat_etherstatspkts65octetsto127octets;
+	u32     rx_stat_etherstatspkts128octetsto255octets;
+	u32     rx_stat_etherstatspkts256octetsto511octets;
+	u32     rx_stat_etherstatspkts512octetsto1023octets;
+	u32     rx_stat_etherstatspkts1024octetsto1522octets;
+	u32     rx_stat_etherstatspktsover1522octets;
+
+	u32     rx_stat_falsecarriererrors;
+
+	u32     tx_stat_ifhcoutoctets;
+	u32     tx_stat_ifhcoutbadoctets;
+	u32     tx_stat_etherstatscollisions;
+	u32     tx_stat_outxonsent;
+	u32     tx_stat_outxoffsent;
+	u32     tx_stat_flowcontroldone;
+	u32     tx_stat_dot3statssinglecollisionframes;
+	u32     tx_stat_dot3statsmultiplecollisionframes;
+	u32     tx_stat_dot3statsdeferredtransmissions;
+	u32     tx_stat_dot3statsexcessivecollisions;
+	u32     tx_stat_dot3statslatecollisions;
+	u32     tx_stat_ifhcoutucastpkts;
+	u32     tx_stat_ifhcoutmulticastpkts;
+	u32     tx_stat_ifhcoutbroadcastpkts;
+	u32     tx_stat_etherstatspkts64octets;
+	u32     tx_stat_etherstatspkts65octetsto127octets;
+	u32     tx_stat_etherstatspkts128octetsto255octets;
+	u32     tx_stat_etherstatspkts256octetsto511octets;
+	u32     tx_stat_etherstatspkts512octetsto1023octets;
+	u32     tx_stat_etherstatspkts1024octetsto1522octets;
+	u32     tx_stat_etherstatspktsover1522octets;
+	u32     tx_stat_dot3statsinternalmactransmiterrors;
+};
+
+
+struct bmac1_stats {
+	u32	tx_stat_gtpkt_lo;
+	u32	tx_stat_gtpkt_hi;
+	u32	tx_stat_gtxpf_lo;
+	u32	tx_stat_gtxpf_hi;
+	u32	tx_stat_gtfcs_lo;
+	u32	tx_stat_gtfcs_hi;
+	u32	tx_stat_gtmca_lo;
+	u32	tx_stat_gtmca_hi;
+	u32	tx_stat_gtbca_lo;
+	u32	tx_stat_gtbca_hi;
+	u32	tx_stat_gtfrg_lo;
+	u32	tx_stat_gtfrg_hi;
+	u32	tx_stat_gtovr_lo;
+	u32	tx_stat_gtovr_hi;
+	u32	tx_stat_gt64_lo;
+	u32	tx_stat_gt64_hi;
+	u32	tx_stat_gt127_lo;
+	u32	tx_stat_gt127_hi;
+	u32	tx_stat_gt255_lo;
+	u32	tx_stat_gt255_hi;
+	u32	tx_stat_gt511_lo;
+	u32	tx_stat_gt511_hi;
+	u32	tx_stat_gt1023_lo;
+	u32	tx_stat_gt1023_hi;
+	u32	tx_stat_gt1518_lo;
+	u32	tx_stat_gt1518_hi;
+	u32	tx_stat_gt2047_lo;
+	u32	tx_stat_gt2047_hi;
+	u32	tx_stat_gt4095_lo;
+	u32	tx_stat_gt4095_hi;
+	u32	tx_stat_gt9216_lo;
+	u32	tx_stat_gt9216_hi;
+	u32	tx_stat_gt16383_lo;
+	u32	tx_stat_gt16383_hi;
+	u32	tx_stat_gtmax_lo;
+	u32	tx_stat_gtmax_hi;
+	u32	tx_stat_gtufl_lo;
+	u32	tx_stat_gtufl_hi;
+	u32	tx_stat_gterr_lo;
+	u32	tx_stat_gterr_hi;
+	u32	tx_stat_gtbyt_lo;
+	u32	tx_stat_gtbyt_hi;
+
+	u32	rx_stat_gr64_lo;
+	u32	rx_stat_gr64_hi;
+	u32	rx_stat_gr127_lo;
+	u32	rx_stat_gr127_hi;
+	u32	rx_stat_gr255_lo;
+	u32	rx_stat_gr255_hi;
+	u32	rx_stat_gr511_lo;
+	u32	rx_stat_gr511_hi;
+	u32	rx_stat_gr1023_lo;
+	u32	rx_stat_gr1023_hi;
+	u32	rx_stat_gr1518_lo;
+	u32	rx_stat_gr1518_hi;
+	u32	rx_stat_gr2047_lo;
+	u32	rx_stat_gr2047_hi;
+	u32	rx_stat_gr4095_lo;
+	u32	rx_stat_gr4095_hi;
+	u32	rx_stat_gr9216_lo;
+	u32	rx_stat_gr9216_hi;
+	u32	rx_stat_gr16383_lo;
+	u32	rx_stat_gr16383_hi;
+	u32	rx_stat_grmax_lo;
+	u32	rx_stat_grmax_hi;
+	u32	rx_stat_grpkt_lo;
+	u32	rx_stat_grpkt_hi;
+	u32	rx_stat_grfcs_lo;
+	u32	rx_stat_grfcs_hi;
+	u32	rx_stat_grmca_lo;
+	u32	rx_stat_grmca_hi;
+	u32	rx_stat_grbca_lo;
+	u32	rx_stat_grbca_hi;
+	u32	rx_stat_grxcf_lo;
+	u32	rx_stat_grxcf_hi;
+	u32	rx_stat_grxpf_lo;
+	u32	rx_stat_grxpf_hi;
+	u32	rx_stat_grxuo_lo;
+	u32	rx_stat_grxuo_hi;
+	u32	rx_stat_grjbr_lo;
+	u32	rx_stat_grjbr_hi;
+	u32	rx_stat_grovr_lo;
+	u32	rx_stat_grovr_hi;
+	u32	rx_stat_grflr_lo;
+	u32	rx_stat_grflr_hi;
+	u32	rx_stat_grmeg_lo;
+	u32	rx_stat_grmeg_hi;
+	u32	rx_stat_grmeb_lo;
+	u32	rx_stat_grmeb_hi;
+	u32	rx_stat_grbyt_lo;
+	u32	rx_stat_grbyt_hi;
+	u32	rx_stat_grund_lo;
+	u32	rx_stat_grund_hi;
+	u32	rx_stat_grfrg_lo;
+	u32	rx_stat_grfrg_hi;
+	u32	rx_stat_grerb_lo;
+	u32	rx_stat_grerb_hi;
+	u32	rx_stat_grfre_lo;
+	u32	rx_stat_grfre_hi;
+	u32	rx_stat_gripj_lo;
+	u32	rx_stat_gripj_hi;
+};
+
+struct bmac2_stats {
+	u32	tx_stat_gtpk_lo; /* gtpok */
+	u32	tx_stat_gtpk_hi; /* gtpok */
+	u32	tx_stat_gtxpf_lo; /* gtpf */
+	u32	tx_stat_gtxpf_hi; /* gtpf */
+	u32	tx_stat_gtpp_lo; /* NEW BMAC2 */
+	u32	tx_stat_gtpp_hi; /* NEW BMAC2 */
+	u32	tx_stat_gtfcs_lo;
+	u32	tx_stat_gtfcs_hi;
+	u32	tx_stat_gtuca_lo; /* NEW BMAC2 */
+	u32	tx_stat_gtuca_hi; /* NEW BMAC2 */
+	u32	tx_stat_gtmca_lo;
+	u32	tx_stat_gtmca_hi;
+	u32	tx_stat_gtbca_lo;
+	u32	tx_stat_gtbca_hi;
+	u32	tx_stat_gtovr_lo;
+	u32	tx_stat_gtovr_hi;
+	u32	tx_stat_gtfrg_lo;
+	u32	tx_stat_gtfrg_hi;
+	u32	tx_stat_gtpkt1_lo; /* gtpkt */
+	u32	tx_stat_gtpkt1_hi; /* gtpkt */
+	u32	tx_stat_gt64_lo;
+	u32	tx_stat_gt64_hi;
+	u32	tx_stat_gt127_lo;
+	u32	tx_stat_gt127_hi;
+	u32	tx_stat_gt255_lo;
+	u32	tx_stat_gt255_hi;
+	u32	tx_stat_gt511_lo;
+	u32	tx_stat_gt511_hi;
+	u32	tx_stat_gt1023_lo;
+	u32	tx_stat_gt1023_hi;
+	u32	tx_stat_gt1518_lo;
+	u32	tx_stat_gt1518_hi;
+	u32	tx_stat_gt2047_lo;
+	u32	tx_stat_gt2047_hi;
+	u32	tx_stat_gt4095_lo;
+	u32	tx_stat_gt4095_hi;
+	u32	tx_stat_gt9216_lo;
+	u32	tx_stat_gt9216_hi;
+	u32	tx_stat_gt16383_lo;
+	u32	tx_stat_gt16383_hi;
+	u32	tx_stat_gtmax_lo;
+	u32	tx_stat_gtmax_hi;
+	u32	tx_stat_gtufl_lo;
+	u32	tx_stat_gtufl_hi;
+	u32	tx_stat_gterr_lo;
+	u32	tx_stat_gterr_hi;
+	u32	tx_stat_gtbyt_lo;
+	u32	tx_stat_gtbyt_hi;
+
+	u32	rx_stat_gr64_lo;
+	u32	rx_stat_gr64_hi;
+	u32	rx_stat_gr127_lo;
+	u32	rx_stat_gr127_hi;
+	u32	rx_stat_gr255_lo;
+	u32	rx_stat_gr255_hi;
+	u32	rx_stat_gr511_lo;
+	u32	rx_stat_gr511_hi;
+	u32	rx_stat_gr1023_lo;
+	u32	rx_stat_gr1023_hi;
+	u32	rx_stat_gr1518_lo;
+	u32	rx_stat_gr1518_hi;
+	u32	rx_stat_gr2047_lo;
+	u32	rx_stat_gr2047_hi;
+	u32	rx_stat_gr4095_lo;
+	u32	rx_stat_gr4095_hi;
+	u32	rx_stat_gr9216_lo;
+	u32	rx_stat_gr9216_hi;
+	u32	rx_stat_gr16383_lo;
+	u32	rx_stat_gr16383_hi;
+	u32	rx_stat_grmax_lo;
+	u32	rx_stat_grmax_hi;
+	u32	rx_stat_grpkt_lo;
+	u32	rx_stat_grpkt_hi;
+	u32	rx_stat_grfcs_lo;
+	u32	rx_stat_grfcs_hi;
+	u32	rx_stat_gruca_lo;
+	u32	rx_stat_gruca_hi;
+	u32	rx_stat_grmca_lo;
+	u32	rx_stat_grmca_hi;
+	u32	rx_stat_grbca_lo;
+	u32	rx_stat_grbca_hi;
+	u32	rx_stat_grxpf_lo; /* grpf */
+	u32	rx_stat_grxpf_hi; /* grpf */
+	u32	rx_stat_grpp_lo;
+	u32	rx_stat_grpp_hi;
+	u32	rx_stat_grxuo_lo; /* gruo */
+	u32	rx_stat_grxuo_hi; /* gruo */
+	u32	rx_stat_grjbr_lo;
+	u32	rx_stat_grjbr_hi;
+	u32	rx_stat_grovr_lo;
+	u32	rx_stat_grovr_hi;
+	u32	rx_stat_grxcf_lo; /* grcf */
+	u32	rx_stat_grxcf_hi; /* grcf */
+	u32	rx_stat_grflr_lo;
+	u32	rx_stat_grflr_hi;
+	u32	rx_stat_grpok_lo;
+	u32	rx_stat_grpok_hi;
+	u32	rx_stat_grmeg_lo;
+	u32	rx_stat_grmeg_hi;
+	u32	rx_stat_grmeb_lo;
+	u32	rx_stat_grmeb_hi;
+	u32	rx_stat_grbyt_lo;
+	u32	rx_stat_grbyt_hi;
+	u32	rx_stat_grund_lo;
+	u32	rx_stat_grund_hi;
+	u32	rx_stat_grfrg_lo;
+	u32	rx_stat_grfrg_hi;
+	u32	rx_stat_grerb_lo; /* grerrbyt */
+	u32	rx_stat_grerb_hi; /* grerrbyt */
+	u32	rx_stat_grfre_lo; /* grfrerr */
+	u32	rx_stat_grfre_hi; /* grfrerr */
+	u32	rx_stat_gripj_lo;
+	u32	rx_stat_gripj_hi;
+};
+
+struct mstat_stats {
+	struct {
+		/* OTE MSTAT on E3 has a bug where this register's contents are
+		 * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+		 */
+		u32 tx_gtxpok_lo;
+		u32 tx_gtxpok_hi;
+		u32 tx_gtxpf_lo;
+		u32 tx_gtxpf_hi;
+		u32 tx_gtxpp_lo;
+		u32 tx_gtxpp_hi;
+		u32 tx_gtfcs_lo;
+		u32 tx_gtfcs_hi;
+		u32 tx_gtuca_lo;
+		u32 tx_gtuca_hi;
+		u32 tx_gtmca_lo;
+		u32 tx_gtmca_hi;
+		u32 tx_gtgca_lo;
+		u32 tx_gtgca_hi;
+		u32 tx_gtpkt_lo;
+		u32 tx_gtpkt_hi;
+		u32 tx_gt64_lo;
+		u32 tx_gt64_hi;
+		u32 tx_gt127_lo;
+		u32 tx_gt127_hi;
+		u32 tx_gt255_lo;
+		u32 tx_gt255_hi;
+		u32 tx_gt511_lo;
+		u32 tx_gt511_hi;
+		u32 tx_gt1023_lo;
+		u32 tx_gt1023_hi;
+		u32 tx_gt1518_lo;
+		u32 tx_gt1518_hi;
+		u32 tx_gt2047_lo;
+		u32 tx_gt2047_hi;
+		u32 tx_gt4095_lo;
+		u32 tx_gt4095_hi;
+		u32 tx_gt9216_lo;
+		u32 tx_gt9216_hi;
+		u32 tx_gt16383_lo;
+		u32 tx_gt16383_hi;
+		u32 tx_gtufl_lo;
+		u32 tx_gtufl_hi;
+		u32 tx_gterr_lo;
+		u32 tx_gterr_hi;
+		u32 tx_gtbyt_lo;
+		u32 tx_gtbyt_hi;
+		u32 tx_collisions_lo;
+		u32 tx_collisions_hi;
+		u32 tx_singlecollision_lo;
+		u32 tx_singlecollision_hi;
+		u32 tx_multiplecollisions_lo;
+		u32 tx_multiplecollisions_hi;
+		u32 tx_deferred_lo;
+		u32 tx_deferred_hi;
+		u32 tx_excessivecollisions_lo;
+		u32 tx_excessivecollisions_hi;
+		u32 tx_latecollisions_lo;
+		u32 tx_latecollisions_hi;
+	} stats_tx;
+
+	struct {
+		u32 rx_gr64_lo;
+		u32 rx_gr64_hi;
+		u32 rx_gr127_lo;
+		u32 rx_gr127_hi;
+		u32 rx_gr255_lo;
+		u32 rx_gr255_hi;
+		u32 rx_gr511_lo;
+		u32 rx_gr511_hi;
+		u32 rx_gr1023_lo;
+		u32 rx_gr1023_hi;
+		u32 rx_gr1518_lo;
+		u32 rx_gr1518_hi;
+		u32 rx_gr2047_lo;
+		u32 rx_gr2047_hi;
+		u32 rx_gr4095_lo;
+		u32 rx_gr4095_hi;
+		u32 rx_gr9216_lo;
+		u32 rx_gr9216_hi;
+		u32 rx_gr16383_lo;
+		u32 rx_gr16383_hi;
+		u32 rx_grpkt_lo;
+		u32 rx_grpkt_hi;
+		u32 rx_grfcs_lo;
+		u32 rx_grfcs_hi;
+		u32 rx_gruca_lo;
+		u32 rx_gruca_hi;
+		u32 rx_grmca_lo;
+		u32 rx_grmca_hi;
+		u32 rx_grbca_lo;
+		u32 rx_grbca_hi;
+		u32 rx_grxpf_lo;
+		u32 rx_grxpf_hi;
+		u32 rx_grxpp_lo;
+		u32 rx_grxpp_hi;
+		u32 rx_grxuo_lo;
+		u32 rx_grxuo_hi;
+		u32 rx_grovr_lo;
+		u32 rx_grovr_hi;
+		u32 rx_grxcf_lo;
+		u32 rx_grxcf_hi;
+		u32 rx_grflr_lo;
+		u32 rx_grflr_hi;
+		u32 rx_grpok_lo;
+		u32 rx_grpok_hi;
+		u32 rx_grbyt_lo;
+		u32 rx_grbyt_hi;
+		u32 rx_grund_lo;
+		u32 rx_grund_hi;
+		u32 rx_grfrg_lo;
+		u32 rx_grfrg_hi;
+		u32 rx_grerb_lo;
+		u32 rx_grerb_hi;
+		u32 rx_grfre_lo;
+		u32 rx_grfre_hi;
+
+		u32 rx_alignmenterrors_lo;
+		u32 rx_alignmenterrors_hi;
+		u32 rx_falsecarrier_lo;
+		u32 rx_falsecarrier_hi;
+		u32 rx_llfcmsgcnt_lo;
+		u32 rx_llfcmsgcnt_hi;
+	} stats_rx;
+};
+
+union mac_stats {
+	struct emac_stats	emac_stats;
+	struct bmac1_stats	bmac1_stats;
+	struct bmac2_stats	bmac2_stats;
+	struct mstat_stats	mstat_stats;
+};
+
+
+struct mac_stx {
+	/* in_bad_octets */
+	u32     rx_stat_ifhcinbadoctets_hi;
+	u32     rx_stat_ifhcinbadoctets_lo;
+
+	/* out_bad_octets */
+	u32     tx_stat_ifhcoutbadoctets_hi;
+	u32     tx_stat_ifhcoutbadoctets_lo;
+
+	/* crc_receive_errors */
+	u32     rx_stat_dot3statsfcserrors_hi;
+	u32     rx_stat_dot3statsfcserrors_lo;
+	/* alignment_errors */
+	u32     rx_stat_dot3statsalignmenterrors_hi;
+	u32     rx_stat_dot3statsalignmenterrors_lo;
+	/* carrier_sense_errors */
+	u32     rx_stat_dot3statscarriersenseerrors_hi;
+	u32     rx_stat_dot3statscarriersenseerrors_lo;
+	/* false_carrier_detections */
+	u32     rx_stat_falsecarriererrors_hi;
+	u32     rx_stat_falsecarriererrors_lo;
+
+	/* runt_packets_received */
+	u32     rx_stat_etherstatsundersizepkts_hi;
+	u32     rx_stat_etherstatsundersizepkts_lo;
+	/* jabber_packets_received */
+	u32     rx_stat_dot3statsframestoolong_hi;
+	u32     rx_stat_dot3statsframestoolong_lo;
+
+	/* error_runt_packets_received */
+	u32     rx_stat_etherstatsfragments_hi;
+	u32     rx_stat_etherstatsfragments_lo;
+	/* error_jabber_packets_received */
+	u32     rx_stat_etherstatsjabbers_hi;
+	u32     rx_stat_etherstatsjabbers_lo;
+
+	/* control_frames_received */
+	u32     rx_stat_maccontrolframesreceived_hi;
+	u32     rx_stat_maccontrolframesreceived_lo;
+	u32     rx_stat_mac_xpf_hi;
+	u32     rx_stat_mac_xpf_lo;
+	u32     rx_stat_mac_xcf_hi;
+	u32     rx_stat_mac_xcf_lo;
+
+	/* xoff_state_entered */
+	u32     rx_stat_xoffstateentered_hi;
+	u32     rx_stat_xoffstateentered_lo;
+	/* pause_xon_frames_received */
+	u32     rx_stat_xonpauseframesreceived_hi;
+	u32     rx_stat_xonpauseframesreceived_lo;
+	/* pause_xoff_frames_received */
+	u32     rx_stat_xoffpauseframesreceived_hi;
+	u32     rx_stat_xoffpauseframesreceived_lo;
+	/* pause_xon_frames_transmitted */
+	u32     tx_stat_outxonsent_hi;
+	u32     tx_stat_outxonsent_lo;
+	/* pause_xoff_frames_transmitted */
+	u32     tx_stat_outxoffsent_hi;
+	u32     tx_stat_outxoffsent_lo;
+	/* flow_control_done */
+	u32     tx_stat_flowcontroldone_hi;
+	u32     tx_stat_flowcontroldone_lo;
+
+	/* ether_stats_collisions */
+	u32     tx_stat_etherstatscollisions_hi;
+	u32     tx_stat_etherstatscollisions_lo;
+	/* single_collision_transmit_frames */
+	u32     tx_stat_dot3statssinglecollisionframes_hi;
+	u32     tx_stat_dot3statssinglecollisionframes_lo;
+	/* multiple_collision_transmit_frames */
+	u32     tx_stat_dot3statsmultiplecollisionframes_hi;
+	u32     tx_stat_dot3statsmultiplecollisionframes_lo;
+	/* deferred_transmissions */
+	u32     tx_stat_dot3statsdeferredtransmissions_hi;
+	u32     tx_stat_dot3statsdeferredtransmissions_lo;
+	/* excessive_collision_frames */
+	u32     tx_stat_dot3statsexcessivecollisions_hi;
+	u32     tx_stat_dot3statsexcessivecollisions_lo;
+	/* late_collision_frames */
+	u32     tx_stat_dot3statslatecollisions_hi;
+	u32     tx_stat_dot3statslatecollisions_lo;
+
+	/* frames_transmitted_64_bytes */
+	u32     tx_stat_etherstatspkts64octets_hi;
+	u32     tx_stat_etherstatspkts64octets_lo;
+	/* frames_transmitted_65_127_bytes */
+	u32     tx_stat_etherstatspkts65octetsto127octets_hi;
+	u32     tx_stat_etherstatspkts65octetsto127octets_lo;
+	/* frames_transmitted_128_255_bytes */
+	u32     tx_stat_etherstatspkts128octetsto255octets_hi;
+	u32     tx_stat_etherstatspkts128octetsto255octets_lo;
+	/* frames_transmitted_256_511_bytes */
+	u32     tx_stat_etherstatspkts256octetsto511octets_hi;
+	u32     tx_stat_etherstatspkts256octetsto511octets_lo;
+	/* frames_transmitted_512_1023_bytes */
+	u32     tx_stat_etherstatspkts512octetsto1023octets_hi;
+	u32     tx_stat_etherstatspkts512octetsto1023octets_lo;
+	/* frames_transmitted_1024_1522_bytes */
+	u32     tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	u32     tx_stat_etherstatspkts1024octetsto1522octets_lo;
+	/* frames_transmitted_1523_9022_bytes */
+	u32     tx_stat_etherstatspktsover1522octets_hi;
+	u32     tx_stat_etherstatspktsover1522octets_lo;
+	u32     tx_stat_mac_2047_hi;
+	u32     tx_stat_mac_2047_lo;
+	u32     tx_stat_mac_4095_hi;
+	u32     tx_stat_mac_4095_lo;
+	u32     tx_stat_mac_9216_hi;
+	u32     tx_stat_mac_9216_lo;
+	u32     tx_stat_mac_16383_hi;
+	u32     tx_stat_mac_16383_lo;
+
+	/* internal_mac_transmit_errors */
+	u32     tx_stat_dot3statsinternalmactransmiterrors_hi;
+	u32     tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+	/* if_out_discards */
+	u32     tx_stat_mac_ufl_hi;
+	u32     tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX                     2
+
+struct host_port_stats {
+	u32            host_port_stats_counter;
+
+	struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+
+	u32            brb_drop_hi;
+	u32            brb_drop_lo;
+
+	u32            not_used; /* obsolete */
+	u32            pfc_frames_tx_hi;
+	u32            pfc_frames_tx_lo;
+	u32            pfc_frames_rx_hi;
+	u32            pfc_frames_rx_lo;
+
+	u32            eee_lpi_count_hi;
+	u32            eee_lpi_count_lo;
+};
+
+
+struct host_func_stats {
+	u32     host_func_stats_start;
+
+	u32     total_bytes_received_hi;
+	u32     total_bytes_received_lo;
+
+	u32     total_bytes_transmitted_hi;
+	u32     total_bytes_transmitted_lo;
+
+	u32     total_unicast_packets_received_hi;
+	u32     total_unicast_packets_received_lo;
+
+	u32     total_multicast_packets_received_hi;
+	u32     total_multicast_packets_received_lo;
+
+	u32     total_broadcast_packets_received_hi;
+	u32     total_broadcast_packets_received_lo;
+
+	u32     total_unicast_packets_transmitted_hi;
+	u32     total_unicast_packets_transmitted_lo;
+
+	u32     total_multicast_packets_transmitted_hi;
+	u32     total_multicast_packets_transmitted_lo;
+
+	u32     total_broadcast_packets_transmitted_hi;
+	u32     total_broadcast_packets_transmitted_lo;
+
+	u32     valid_bytes_received_hi;
+	u32     valid_bytes_received_lo;
+
+	u32     host_func_stats_end;
+};
+
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
+
+
+/* stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ *       order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+	u32 tx_unicast_frames_hi;
+	u32 tx_unicast_frames_lo;
+	u32 tx_unicast_bytes_hi;
+	u32 tx_unicast_bytes_lo;
+	u32 tx_multicast_frames_hi;
+	u32 tx_multicast_frames_lo;
+	u32 tx_multicast_bytes_hi;
+	u32 tx_multicast_bytes_lo;
+	u32 tx_broadcast_frames_hi;
+	u32 tx_broadcast_frames_lo;
+	u32 tx_broadcast_bytes_hi;
+	u32 tx_broadcast_bytes_lo;
+	u32 tx_frames_discarded_hi;
+	u32 tx_frames_discarded_lo;
+	u32 tx_frames_dropped_hi;
+	u32 tx_frames_dropped_lo;
+
+	u32 rx_unicast_frames_hi;
+	u32 rx_unicast_frames_lo;
+	u32 rx_unicast_bytes_hi;
+	u32 rx_unicast_bytes_lo;
+	u32 rx_multicast_frames_hi;
+	u32 rx_multicast_frames_lo;
+	u32 rx_multicast_bytes_hi;
+	u32 rx_multicast_bytes_lo;
+	u32 rx_broadcast_frames_hi;
+	u32 rx_broadcast_frames_lo;
+	u32 rx_broadcast_bytes_hi;
+	u32 rx_broadcast_bytes_lo;
+	u32 rx_frames_discarded_hi;
+	u32 rx_frames_discarded_lo;
+	u32 rx_frames_dropped_hi;
+	u32 rx_frames_dropped_lo;
+};
+
+#define BCM_5710_FW_MAJOR_VERSION			7
+#define BCM_5710_FW_MINOR_VERSION			12
+#define BCM_5710_FW_REVISION_VERSION		30
+#define BCM_5710_FW_ENGINEERING_VERSION		0
+#define BCM_5710_FW_COMPILE_FLAGS			1
+
+
+/*
+ * attention bits
+ */
+struct atten_sp_status_block {
+	__le32 attn_bits;
+	__le32 attn_bits_ack;
+	u8 status_block_id;
+	u8 reserved0;
+	__le16 attn_bits_index;
+	__le32 reserved1;
+};
+
+
+/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context {
+	u32 __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command {
+	u32 opcode;
+#define DMAE_COMMAND_SRC (0x1<<0)
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1)
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3)
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9)
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11)
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12)
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13)
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14)
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15)
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+	u32 src_addr_lo;
+	u32 src_addr_hi;
+	u32 dst_addr_lo;
+	u32 dst_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+	u16 len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 len;
+	u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+	u32 comp_addr_lo;
+	u32 comp_addr_hi;
+	u32 comp_val;
+	u32 crc32;
+	u32 crc32_c;
+#if defined(__BIG_ENDIAN)
+	u16 crc16_c;
+	u16 crc16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 crc16;
+	u16 crc16_c;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 crc_t10;
+#elif defined(__LITTLE_ENDIAN)
+	u16 crc_t10;
+	u16 reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 xsum8;
+	u16 xsum16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 xsum16;
+	u16 xsum8;
+#endif
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct doorbell_hdr {
+	u8 header;
+#define DOORBELL_HDR_RX (0x1<<0)
+#define DOORBELL_HDR_RX_SHIFT 0
+#define DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * Ethernet doorbell
+ */
+struct eth_tx_doorbell {
+#if defined(__BIG_ENDIAN)
+	u16 npackets;
+	u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+	struct doorbell_hdr hdr;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr hdr;
+	u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+	u16 npackets;
+#endif
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e1x {
+	__le16 index_values[HC_SB_MAX_INDICES_E1X];
+	__le16 running_index[HC_SB_MAX_SM];
+	__le32 rsrv[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x {
+	struct hc_status_block_e1x sb;
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e2 {
+	__le16 index_values[HC_SB_MAX_INDICES_E2];
+	__le16 running_index[HC_SB_MAX_SM];
+	__le32 reserved[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2 {
+	struct hc_status_block_e2 sb;
+};
+
+
+/*
+ * 5 lines. slow-path status block
+ */
+struct hc_sp_status_block {
+	__le16 index_values[HC_SP_SB_MAX_INDICES];
+	__le16 running_index;
+	__le16 rsrv;
+	u32 rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block {
+	struct atten_sp_status_block atten_status_block;
+	struct hc_sp_status_block sp_sb;
+};
+
+
+/*
+ * IGU driver acknowledgment register
+ */
+struct igu_ack_register {
+#if defined(__BIG_ENDIAN)
+	u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+	u16 status_block_index;
+#elif defined(__LITTLE_ENDIAN)
+	u16 status_block_index;
+	u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+#endif
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_backward_compatible {
+	u32 sb_id_and_flags;
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
+	u32 reserved_2;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_regular {
+	u32 sb_id_and_flags;
+#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
+#define IGU_REGULAR_SB_INDEX_SHIFT 0
+#define IGU_REGULAR_RESERVED0 (0x1<<20)
+#define IGU_REGULAR_RESERVED0_SHIFT 20
+#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
+#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
+#define IGU_REGULAR_BUPDATE (0x1<<24)
+#define IGU_REGULAR_BUPDATE_SHIFT 24
+#define IGU_REGULAR_ENABLE_INT (0x3<<25)
+#define IGU_REGULAR_ENABLE_INT_SHIFT 25
+#define IGU_REGULAR_RESERVED_1 (0x1<<27)
+#define IGU_REGULAR_RESERVED_1_SHIFT 27
+#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
+#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
+#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
+#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
+#define IGU_REGULAR_BCLEANUP (0x1<<31)
+#define IGU_REGULAR_BCLEANUP_SHIFT 31
+	u32 reserved_2;
+};
+
+/*
+ * IGU driver acknowledgement register
+ */
+union igu_consprod_reg {
+	struct igu_regular regular;
+	struct igu_backward_compatible backward_compatible;
+};
+
+
+/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd {
+	IGU_CTRL_CMD_TYPE_RD,
+	IGU_CTRL_CMD_TYPE_WR,
+	MAX_IGU_CTRL_CMD
+};
+
+
+/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+	u32 ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12)
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19)
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20)
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd {
+	IGU_INT_ENABLE,
+	IGU_INT_DISABLE,
+	IGU_INT_NOP,
+	IGU_INT_NOP2,
+	MAX_IGU_INT_CMD
+};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access {
+	IGU_SEG_ACCESS_NORM,
+	IGU_SEG_ACCESS_DEF,
+	IGU_SEG_ACCESS_ATTN,
+	MAX_IGU_SEG_ACCESS
+};
+
+
+/*
+ * Parser parsing flags field
+ */
+struct parsing_flags {
+	__le16 flags;
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+#define PARSING_FLAGS_VLAN (0x1<<1)
+#define PARSING_FLAGS_VLAN_SHIFT 1
+#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
+#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
+#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
+#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
+#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
+#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
+#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
+#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
+#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
+#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
+#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
+#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
+#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
+#define PARSING_FLAGS_RESERVED0 (0x3<<14)
+#define PARSING_FLAGS_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type {
+	PRS_FLAG_PUREACK_PIGGY,
+	PRS_FLAG_PUREACK_PURE,
+	MAX_PRS_FLAGS_ACK_TYPE
+};
+
+
+/*
+ * Parsing flags for Ethernet address type
+ */
+enum prs_flags_eth_addr_type {
+	PRS_FLAG_ETHTYPE_NON_UNICAST,
+	PRS_FLAG_ETHTYPE_UNICAST,
+	MAX_PRS_FLAGS_ETH_ADDR_TYPE
+};
+
+
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth {
+	PRS_FLAG_OVERETH_UNKNOWN,
+	PRS_FLAG_OVERETH_IPV4,
+	PRS_FLAG_OVERETH_IPV6,
+	PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+	MAX_PRS_FLAGS_OVER_ETH
+};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip {
+	PRS_FLAG_OVERIP_UNKNOWN,
+	PRS_FLAG_OVERIP_TCP,
+	PRS_FLAG_OVERIP_UDP,
+	MAX_PRS_FLAGS_OVER_IP
+};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+	__le32 command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
+#define SDM_OP_GEN_RESERVED_SHIFT 17
+};
+
+
+/*
+ * Timers connection context
+ */
+struct timers_block_context {
+	u32 __reserved_0;
+	u32 __reserved_1;
+	u32 __reserved_2;
+	u32 flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+
+/*
+ * The eth aggregative context of Tstorm
+ */
+struct tstorm_eth_ag_context {
+	u32 __reserved0[14];
+};
+
+
+/*
+ * The eth aggregative context of Ustorm
+ */
+struct ustorm_eth_ag_context {
+	u32 __reserved0;
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 __reserved2;
+	u16 __reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __reserved1;
+	u8 __reserved2;
+	u8 cdu_usage;
+#endif
+	u32 __reserved3[6];
+};
+
+
+/*
+ * The eth aggregative context of Xstorm
+ */
+struct xstorm_eth_ag_context {
+	u32 reserved0;
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 reserved2;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u8 reserved2;
+	u8 cdu_reserved;
+#endif
+	u32 reserved3[30];
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell {
+#if defined(__BIG_ENDIAN)
+	u16 zero_fill2;
+	u8 zero_fill1;
+	struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr header;
+	u8 zero_fill1;
+	u16 zero_fill2;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+	u16 prod;
+	u8 zero_fill1;
+	struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr header;
+	u8 zero_fill1;
+	u16 prod;
+#endif
+};
+
+
+struct regpair {
+	__le32 lo;
+	__le32 hi;
+};
+
+struct regpair_native {
+	u32 lo;
+	u32 hi;
+};
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule {
+	CLASSIFY_RULE_OPCODE_MAC,
+	CLASSIFY_RULE_OPCODE_VLAN,
+	CLASSIFY_RULE_OPCODE_PAIR,
+	CLASSIFY_RULE_OPCODE_VXLAN,
+	MAX_CLASSIFY_RULE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type {
+	CLASSIFY_RULE_REMOVE,
+	CLASSIFY_RULE_ADD,
+	MAX_CLASSIFY_RULE_ACTION_TYPE
+};
+
+
+/*
+ * client init ramrod data
+ */
+struct client_init_general_data {
+	u8 client_id;
+	u8 statistics_counter_id;
+	u8 statistics_en_flg;
+	u8 is_fcoe_flg;
+	u8 activate_flg;
+	u8 sp_client_id;
+	__le16 mtu;
+	u8 statistics_zero_flg;
+	u8 func_id;
+	u8 cos;
+	u8 traffic_type;
+	u8 fp_hsi_ver;
+	u8 reserved0[3];
+};
+
+
+/*
+ * client init rx data
+ */
+struct client_init_rx_data {
+	u8 tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+	u8 vmqueue_mode_en_flg;
+	u8 extra_data_over_sgl_en_flg;
+	u8 cache_line_alignment_log_size;
+	u8 enable_dynamic_hc;
+	u8 max_sges_for_packet;
+	u8 client_qzone_id;
+	u8 drop_ip_cs_err_flg;
+	u8 drop_tcp_cs_err_flg;
+	u8 drop_ttl0_flg;
+	u8 drop_udp_cs_err_flg;
+	u8 inner_vlan_removal_enable_flg;
+	u8 outer_vlan_removal_enable_flg;
+	u8 status_block_id;
+	u8 rx_sb_index_number;
+	u8 dont_verify_rings_pause_thr_flg;
+	u8 max_tpa_queues;
+	u8 silent_vlan_removal_flg;
+	__le16 max_bytes_on_bd;
+	__le16 sge_buff_size;
+	u8 approx_mcast_engine_id;
+	u8 rss_engine_id;
+	struct regpair bd_page_base;
+	struct regpair sge_page_base;
+	struct regpair cqe_page_base;
+	u8 is_leading_rss;
+	u8 is_approx_mcast;
+	__le16 max_agg_size;
+	__le16 state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+	__le16 cqe_pause_thr_low;
+	__le16 cqe_pause_thr_high;
+	__le16 bd_pause_thr_low;
+	__le16 bd_pause_thr_high;
+	__le16 sge_pause_thr_low;
+	__le16 sge_pause_thr_high;
+	__le16 rx_cos_mask;
+	__le16 silent_vlan_value;
+	__le16 silent_vlan_mask;
+	u8 handle_ptp_pkts_flg;
+	u8 reserved6[3];
+	__le32 reserved7;
+};
+
+/*
+ * client init tx data
+ */
+struct client_init_tx_data {
+	u8 enforce_security_flg;
+	u8 tx_status_block_id;
+	u8 tx_sb_index_number;
+	u8 tss_leading_client_id;
+	u8 tx_switching_flg;
+	u8 anti_spoofing_flg;
+	__le16 default_vlan;
+	struct regpair tx_bd_page_base;
+	__le16 state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
+	u8 default_vlan_flg;
+	u8 force_default_pri_flg;
+	u8 tunnel_lso_inc_ip_id;
+	u8 refuse_outband_vlan_flg;
+	u8 tunnel_non_lso_pcsum_location;
+	u8 tunnel_non_lso_outer_ip_csum_location;
+};
+
+/*
+ * client init ramrod data
+ */
+struct client_init_ramrod_data {
+	struct client_init_general_data general;
+	struct client_init_rx_data rx;
+	struct client_init_tx_data tx;
+};
+
+
+/*
+ * client update ramrod data
+ */
+struct client_update_ramrod_data {
+	u8 client_id;
+	u8 func_id;
+	u8 inner_vlan_removal_enable_flg;
+	u8 inner_vlan_removal_change_flg;
+	u8 outer_vlan_removal_enable_flg;
+	u8 outer_vlan_removal_change_flg;
+	u8 anti_spoofing_enable_flg;
+	u8 anti_spoofing_change_flg;
+	u8 activate_flg;
+	u8 activate_change_flg;
+	__le16 default_vlan;
+	u8 default_vlan_enable_flg;
+	u8 default_vlan_change_flg;
+	__le16 silent_vlan_value;
+	__le16 silent_vlan_mask;
+	u8 silent_vlan_removal_flg;
+	u8 silent_vlan_change_flg;
+	u8 refuse_outband_vlan_flg;
+	u8 refuse_outband_vlan_change_flg;
+	u8 tx_switching_flg;
+	u8 tx_switching_change_flg;
+	u8 handle_ptp_pkts_flg;
+	u8 handle_ptp_pkts_change_flg;
+	__le16 reserved1;
+	__le32 echo;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context {
+	u32 __reserved0[4];
+};
+
+
+struct double_regpair {
+	u32 regpair0_lo;
+	u32 regpair0_hi;
+	u32 regpair1_lo;
+	u32 regpair1_hi;
+};
+
+/* 2nd parse bd type used in ethernet tx BDs */
+enum eth_2nd_parse_bd_type {
+	ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL,
+	MAX_ETH_2ND_PARSE_BD_TYPE
+};
+
+/*
+ * Ethernet address typesm used in ethernet tx BDs
+ */
+enum eth_addr_type {
+	UNKNOWN_ADDRESS,
+	UNICAST_ADDRESS,
+	MULTICAST_ADDRESS,
+	BROADCAST_ADDRESS,
+	MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ *
+ */
+struct eth_classify_cmd_header {
+	u8 cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+	u8 func_id;
+	u8 client_id;
+	u8 reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod
+ */
+struct eth_classify_header {
+	u8 rule_cnt;
+	u8 reserved0;
+	__le16 reserved1;
+	__le32 echo;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule
+ */
+struct eth_classify_mac_cmd {
+	struct eth_classify_cmd_header header;
+	__le16 reserved0;
+	__le16 inner_mac;
+	__le16 mac_lsb;
+	__le16 mac_mid;
+	__le16 mac_msb;
+	__le16 reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule
+ */
+struct eth_classify_pair_cmd {
+	struct eth_classify_cmd_header header;
+	__le16 reserved0;
+	__le16 inner_mac;
+	__le16 mac_lsb;
+	__le16 mac_mid;
+	__le16 mac_msb;
+	__le16 vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule
+ */
+struct eth_classify_vlan_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le32 reserved1;
+	__le16 reserved2;
+	__le16 vlan;
+};
+
+/*
+ * Command for adding/removing a VXLAN classification rule
+ */
+struct eth_classify_vxlan_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 vni;
+	__le16 inner_mac_lsb;
+	__le16 inner_mac_mid;
+	__le16 inner_mac_msb;
+	__le16 reserved1;
+};
+
+/*
+ * union for eth classification rule
+ */
+union eth_classify_rule_cmd {
+	struct eth_classify_mac_cmd mac;
+	struct eth_classify_vlan_cmd vlan;
+	struct eth_classify_pair_cmd pair;
+	struct eth_classify_vxlan_cmd vxlan;
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_classify_rules_ramrod_data {
+	struct eth_classify_header header;
+	union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod
+ */
+struct eth_common_ramrod_data {
+	__le32 client_id;
+	__le32 reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context {
+	u32 reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context {
+	u32 __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context {
+	u32 reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context {
+	struct ustorm_eth_st_context ustorm_st_context;
+	struct tstorm_eth_st_context tstorm_st_context;
+	struct xstorm_eth_ag_context xstorm_ag_context;
+	struct tstorm_eth_ag_context tstorm_ag_context;
+	struct cstorm_eth_ag_context cstorm_ag_context;
+	struct ustorm_eth_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct xstorm_eth_st_context xstorm_st_context;
+	struct cstorm_eth_st_context cstorm_st_context;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data {
+	__le16 sgl[8];
+	u32 raw_data[4];
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct
+ */
+struct eth_end_agg_rx_cqe {
+	u8 type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+	u8 reserved1;
+	u8 queue_index;
+	u8 reserved2;
+	__le32 timestamp_delta;
+	__le16 num_of_coalesced_segs;
+	__le16 pkt_len;
+	u8 pure_ack_count;
+	u8 reserved3;
+	__le16 reserved4;
+	union eth_sgl_or_raw_data sgl_or_raw_data;
+	__le32 reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct
+ */
+struct eth_fast_path_rx_cqe {
+	u8 type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7
+	u8 status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+	u8 queue_index;
+	u8 placement_offset;
+	__le32 rss_hash_result;
+	__le16 vlan_tag;
+	__le16 pkt_len_or_gro_seg_len;
+	__le16 len_on_bd;
+	struct parsing_flags pars_flags;
+	union eth_sgl_or_raw_data sgl_or_raw_data;
+	u8 tunn_type;
+	u8 tunn_inner_hdrs_offset;
+	__le16 reserved1;
+	__le32 tunn_tenant_id;
+	__le32 padding[5];
+	u32 marker;
+};
+
+
+/*
+ * Command for setting classification flags for a client
+ */
+struct eth_filter_rules_cmd {
+	u8 cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+	u8 func_id;
+	u8 client_id;
+	u8 reserved1;
+	__le16 state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+	__le16 reserved3;
+	struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod
+ */
+struct eth_filter_rules_ramrod_data {
+	struct eth_classify_header header;
+	struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+/* Hsi version */
+enum eth_fp_hsi_ver {
+	ETH_FP_HSI_VER_0,
+	ETH_FP_HSI_VER_1,
+	ETH_FP_HSI_VER_2,
+	MAX_ETH_FP_HSI_VER
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_general_rules_ramrod_data {
+	struct eth_classify_header header;
+	union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data {
+	__le32 client_id;
+	__le32 reserved0;
+};
+
+
+/*
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+	__le16 dst_mid;
+	__le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+	__le16 dst_lo;
+	__le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+	__le16 src_lo;
+	__le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+	__le16 dst_hi;
+	__le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+	__le16 src_hi;
+	__le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+	__le16 src_mid;
+	__le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+	__le16 dst_lo;
+	__le16 dst_mid;
+	__le16 dst_hi;
+	__le16 fw_ip_hdr_csum;
+	__le16 pseudo_csum;
+	u8 ip_hdr_start_inner_w;
+	u8 flags;
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
+ */
+union eth_mac_addr_or_tunnel_data {
+	struct eth_mac_addresses mac_addr;
+	struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
+struct eth_multicast_rules_cmd {
+	u8 cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+	u8 func_id;
+	u8 bin_id;
+	u8 engine_id;
+	__le32 reserved2;
+	struct regpair reserved3;
+};
+
+/*
+ * parameters for multicast classification ramrod
+ */
+struct eth_multicast_rules_ramrod_data {
+	struct eth_classify_header header;
+	struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data {
+	__le32 data_lo;
+	__le32 data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data {
+	struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type {
+	DEFAULT_HASH_TYPE,
+	IPV4_HASH_TYPE,
+	TCP_IPV4_HASH_TYPE,
+	IPV6_HASH_TYPE,
+	TCP_IPV6_HASH_TYPE,
+	VLAN_PRI_HASH_TYPE,
+	E1HOV_PRI_HASH_TYPE,
+	DSCP_HASH_TYPE,
+	MAX_ETH_RSS_HASH_TYPE
+};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode {
+	ETH_RSS_MODE_DISABLED,
+	ETH_RSS_MODE_REGULAR,
+	ETH_RSS_MODE_VLAN_PRI,
+	ETH_RSS_MODE_E1HOV_PRI,
+	ETH_RSS_MODE_IP_DSCP,
+	MAX_ETH_RSS_MODE
+};
+
+
+/*
+ * parameters for RSS update ramrod (E2)
+ */
+struct eth_rss_update_ramrod_data {
+	u8 rss_engine_id;
+	u8 rss_mode;
+	__le16 capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
+	u8 rss_result_mask;
+	u8 reserved3;
+	__le16 reserved4;
+	u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
+	__le32 rss_key[T_ETH_RSS_KEY];
+	__le32 echo;
+	__le32 reserved5;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods
+ */
+struct common_ramrod_eth_rx_cqe {
+	u8 ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+	u8 conn_type;
+	__le16 reserved1;
+	__le32 conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+	struct ramrod_data protocol_data;
+	__le32 echo;
+	__le32 reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe {
+	struct eth_fast_path_rx_cqe fast_path_cqe;
+	struct common_ramrod_eth_rx_cqe ramrod_cqe;
+	struct eth_rx_cqe_next_page next_page_cqe;
+	struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type {
+	RX_ETH_CQE_TYPE_ETH_FASTPATH,
+	RX_ETH_CQE_TYPE_ETH_RAMROD,
+	RX_ETH_CQE_TYPE_ETH_START_AGG,
+	RX_ETH_CQE_TYPE_ETH_STOP_AGG,
+	MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel {
+	ETH_FP_CQE_REGULAR,
+	ETH_FP_CQE_RAW,
+	MAX_ETH_RX_FP_SEL
+};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge {
+	__le32 addr_lo;
+	__le32 addr_hi;
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct spe_hdr {
+	__le32 conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0)
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24)
+#define SPE_HDR_CMD_ID_SHIFT 24
+	__le16 type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0)
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8)
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+	__le16 reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data {
+	u8 protocol_data[8];
+	struct regpair client_update_ramrod_data;
+	struct regpair client_init_ramrod_init_data;
+	struct eth_halt_ramrod_data halt_ramrod_data;
+	struct regpair update_data_addr;
+	struct eth_common_ramrod_data common_ramrod_data;
+	struct regpair classify_cfg_addr;
+	struct regpair filter_cfg_addr;
+	struct regpair mcast_cfg_addr;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe {
+	struct spe_hdr hdr;
+	union eth_specific_data data;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id {
+	RAMROD_CMD_ID_ETH_UNUSED,
+	RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+	RAMROD_CMD_ID_ETH_HALT,
+	RAMROD_CMD_ID_ETH_FORWARD_SETUP,
+	RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
+	RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+	RAMROD_CMD_ID_ETH_EMPTY,
+	RAMROD_CMD_ID_ETH_TERMINATE,
+	RAMROD_CMD_ID_ETH_TPA_UPDATE,
+	RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
+	RAMROD_CMD_ID_ETH_FILTER_RULES,
+	RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+	RAMROD_CMD_ID_ETH_RSS_UPDATE,
+	RAMROD_CMD_ID_ETH_SET_MAC,
+	MAX_ETH_SPQE_CMD_ID
+};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command {
+	TPA_UPDATE_NONE_COMMAND,
+	TPA_UPDATE_ENABLE_COMMAND,
+	TPA_UPDATE_DISABLE_COMMAND,
+	MAX_ETH_TPA_UPDATE_COMMAND
+};
+
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+	EXT_HEADER,
+	INT_HEADER,
+	MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_csum_location {
+	CSUM_ON_PKT,
+	CSUM_ON_BD,
+	MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
+};
+
+enum eth_tunn_type {
+	TUNN_TYPE_NONE,
+	TUNN_TYPE_VXLAN,
+	TUNN_TYPE_L2_GRE,
+	TUNN_TYPE_IPV4_GRE,
+	TUNN_TYPE_IPV6_GRE,
+	TUNN_TYPE_L2_GENEVE,
+	TUNN_TYPE_IPV4_GENEVE,
+	TUNN_TYPE_IPV6_GENEVE,
+	MAX_ETH_TUNN_TYPE
+};
+
+/*
+ * Tx regular BD structure
+ */
+struct eth_tx_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le16 total_pkt_bytes;
+	__le16 nbytes;
+	u8 reserved[4];
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags {
+	u8 as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor
+ */
+struct eth_tx_start_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le16 nbd;
+	__le16 nbytes;
+	__le16 vlan_or_ethertype;
+	struct eth_tx_bd_flags bd_flags;
+	u8 general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0x7<<0)
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3)
+#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
+};
+
+/*
+ * Tx parsing BD structure for ETH E1/E1h
+ */
+struct eth_tx_parse_bd_e1x {
+	__le16 global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
+	u8 tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+	u8 ip_hlen_w;
+	__le16 total_hlen_w;
+	__le16 tcp_pseudo_csum;
+	__le16 lso_mss;
+	__le16 ip_id;
+	__le32 tcp_send_seq;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2
+ */
+struct eth_tx_parse_bd_e2 {
+	union eth_mac_addr_or_tunnel_data data;
+	__le32 parsing_data;
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
+};
+
+/*
+ * Tx 2nd parsing BD structure for ETH packet
+ */
+struct eth_tx_parse_2nd_bd {
+	__le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
+	u8 bd_type;
+#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4
+	u8 reserved3;
+	u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+	u8 reserved4;
+	u8 tunnel_udp_hdr_start_w;
+	u8 fw_ip_hdr_to_payload_w;
+	__le16 fw_ip_csum_wo_len_flags_frag;
+	__le16 hw_ip_id;
+	__le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
+struct eth_tx_next_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	u8 reserved[8];
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types {
+	struct eth_tx_start_bd start_bd;
+	struct eth_tx_bd reg_bd;
+	struct eth_tx_parse_bd_e1x parse_bd_e1x;
+	struct eth_tx_parse_bd_e2 parse_bd_e2;
+	struct eth_tx_parse_2nd_bd parse_2nd_bd;
+	struct eth_tx_next_bd next_bd;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array {
+	union eth_tx_bd_types bds[13];
+};
+
+
+/*
+ * VLAN mode on TX BDs
+ */
+enum eth_tx_vlan_type {
+	X_ETH_NO_VLAN,
+	X_ETH_OUTBAND_VLAN,
+	X_ETH_INBAND_VLAN,
+	X_ETH_FW_ADDED_VLAN,
+	MAX_ETH_TX_VLAN_TYPE
+};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode {
+	ETH_VLAN_FILTER_ANY_VLAN,
+	ETH_VLAN_FILTER_SPECIFIC_VLAN,
+	ETH_VLAN_FILTER_CLASSIFY,
+	MAX_ETH_VLAN_FILTER_MODE
+};
+
+
+/*
+ * MAC filtering configuration command header
+ */
+struct mac_configuration_hdr {
+	u8 length;
+	u8 offset;
+	__le16 client_id;
+	__le32 echo;
+};
+
+/*
+ * MAC address in list for ramrod
+ */
+struct mac_configuration_entry {
+	__le16 lsb_mac_addr;
+	__le16 middle_mac_addr;
+	__le16 msb_mac_addr;
+	__le16 vlan_id;
+	u8 pf_id;
+	u8 flags;
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+	__le16 reserved0;
+	__le32 clients_bit_vector;
+};
+
+/*
+ * MAC filtering configuration command
+ */
+struct mac_configuration_cmd {
+	struct mac_configuration_hdr hdr;
+	struct mac_configuration_entry config_table[64];
+};
+
+
+/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type {
+	T_ETH_MAC_COMMAND_INVALIDATE,
+	T_ETH_MAC_COMMAND_SET,
+	MAX_SET_MAC_ACTION_TYPE
+};
+
+
+/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode {
+	TPA_LRO,
+	TPA_GRO,
+	MAX_TPA_MODE};
+
+
+/*
+ * tpa update ramrod data
+ */
+struct tpa_update_ramrod_data {
+	u8 update_ipv4;
+	u8 update_ipv6;
+	u8 client_id;
+	u8 max_tpa_queues;
+	u8 max_sges_for_packet;
+	u8 complete_on_both_clients;
+	u8 dont_verify_rings_pause_thr_flg;
+	u8 tpa_mode;
+	__le16 sge_buff_size;
+	__le16 max_agg_size;
+	__le32 sge_page_base_lo;
+	__le32 sge_page_base_hi;
+	__le16 sge_pause_thr_low;
+	__le16 sge_pause_thr_high;
+};
+
+
+/*
+ * approximate-match multicast filtering for E1H per function in Tstorm
+ */
+struct tstorm_eth_approximate_match_multicast_filtering {
+	u32 mcast_add_hash_bit_array[8];
+};
+
+
+/*
+ * Common configuration parameters per function in Tstorm
+ */
+struct tstorm_eth_function_common_config {
+	__le16 config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+	u8 rss_result_mask;
+	u8 reserved1;
+	__le16 vlan_id[2];
+};
+
+
+/*
+ * MAC filtering configuration parameters per port in Tstorm
+ */
+struct tstorm_eth_mac_filter_config {
+	u32 ucast_drop_all;
+	u32 ucast_accept_all;
+	u32 mcast_drop_all;
+	u32 mcast_accept_all;
+	u32 bcast_accept_all;
+	u32 vlan_filter[2];
+	u32 unmatched_unicast;
+};
+
+
+/*
+ * tx only queue init ramrod data
+ */
+struct tx_queue_init_ramrod_data {
+	struct client_init_general_data general;
+	struct client_init_tx_data tx;
+};
+
+
+/*
+ * Three RX producers for ETH
+ */
+struct ustorm_eth_rx_producers {
+#if defined(__BIG_ENDIAN)
+	u16 bd_prod;
+	u16 cqe_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cqe_prod;
+	u16 bd_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved;
+	u16 sge_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_prod;
+	u16 reserved;
+#endif
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+	__le32 fcoe_rx_pkt_cnt;
+	__le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+	__le32 fcoe_ver_cnt;
+	__le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+	__le32 fc_crc_cnt;
+	__le32 eofa_del_cnt;
+	__le32 miss_frame_cnt;
+	__le32 seq_timeout_cnt;
+	__le32 drop_seq_cnt;
+	__le32 fcoe_rx_drop_pkt_cnt;
+	__le32 fcp_rx_pkt_cnt;
+	__le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+	__le32 fcoe_tx_pkt_cnt;
+	__le32 fcoe_tx_byte_cnt;
+	__le32 fcp_tx_pkt_cnt;
+	__le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+	struct fcoe_tx_stat_params tx_stat;
+	struct fcoe_rx_stat_params_section0 rx_stat0;
+	struct fcoe_rx_stat_params_section1 rx_stat1;
+	struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * The data afex vif list ramrod need
+ */
+struct afex_vif_list_ramrod_data {
+	u8 afex_vif_list_command;
+	u8 func_bit_map;
+	__le16 vif_list_index;
+	u8 func_to_clear;
+	u8 echo;
+	__le16 reserved1;
+};
+
+struct c2s_pri_trans_table_entry {
+	u8 val[MAX_VLAN_PRIORITIES];
+};
+
+/*
+ * cfc delete event data
+ */
+struct cfc_del_event_data {
+	u32 cid;
+	u32 reserved0;
+	u32 reserved1;
+};
+
+
+/*
+ * per-port SAFC demo variables
+ */
+struct cmng_flags_per_port {
+	u32 cmng_enables;
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+	u32 __reserved1;
+};
+
+
+/*
+ * per-port rate shaping variables
+ */
+struct rate_shaping_vars_per_port {
+	u32 rs_periodic_timeout;
+	u32 rs_threshold;
+};
+
+/*
+ * per-port fairness variables
+ */
+struct fairness_vars_per_port {
+	u32 upper_bound;
+	u32 fair_threshold;
+	u32 fairness_timeout;
+	u32 reserved0;
+};
+
+/*
+ * per-port SAFC variables
+ */
+struct safc_struct_per_port {
+#if defined(__BIG_ENDIAN)
+	u16 __reserved1;
+	u8 __reserved0;
+	u8 safc_timeout_usec;
+#elif defined(__LITTLE_ENDIAN)
+	u8 safc_timeout_usec;
+	u8 __reserved0;
+	u16 __reserved1;
+#endif
+	u8 cos_to_traffic_types[MAX_COS_NUMBER];
+	u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct cmng_struct_per_port {
+	struct rate_shaping_vars_per_port rs_vars;
+	struct fairness_vars_per_port fair_vars;
+	struct safc_struct_per_port safc_vars;
+	struct cmng_flags_per_port flags;
+};
+
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+	u32 quota;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved0;
+	u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rate;
+	u16 __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+	struct rate_shaping_counter vn_counter;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+	u32 cos_credit_delta[MAX_COS_NUMBER];
+	u32 vn_credit_delta;
+	u32 __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic {
+	struct rate_shaping_vars_per_vn vnic_max_rate[4];
+	struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init {
+	struct cmng_struct_per_port port;
+	struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input {
+	u32 port_rate;
+	u16 vnic_min_rate[4];
+	u16 vnic_max_rate[4];
+	u16 cos_min_rate[MAX_COS_NUMBER];
+	u16 cos_to_pause_mask[MAX_COS_NUMBER];
+	struct cmng_flags_per_port flags;
+};
+
+
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id {
+	RAMROD_CMD_ID_COMMON_UNUSED,
+	RAMROD_CMD_ID_COMMON_FUNCTION_START,
+	RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+	RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
+	RAMROD_CMD_ID_COMMON_CFC_DEL,
+	RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
+	RAMROD_CMD_ID_COMMON_STAT_QUERY,
+	RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+	RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+	RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+	RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
+	MAX_COMMON_SPQE_CMD_ID
+};
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type {
+	ETH_CONNECTION_TYPE,
+	TOE_CONNECTION_TYPE,
+	RDMA_CONNECTION_TYPE,
+	ISCSI_CONNECTION_TYPE,
+	FCOE_CONNECTION_TYPE,
+	RESERVED_CONNECTION_TYPE_0,
+	RESERVED_CONNECTION_TYPE_1,
+	RESERVED_CONNECTION_TYPE_2,
+	NONE_CONNECTION_TYPE,
+	MAX_CONNECTION_TYPE
+};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode {
+	OVERRIDE_COS,
+	STATIC_COS,
+	FW_WRR,
+	MAX_COS_MODE
+};
+
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter {
+	u32 val[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data {
+	struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
+	struct regpair reserved[2];
+};
+
+
+/*
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
+ */
+struct vf_pf_channel_zone_data {
+	u32 msg_addr_lo;
+	u32 msg_addr_hi;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone {
+	struct vf_pf_channel_zone_data vf_pf_channel;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger {
+	u8 addr_valid;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone {
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u8 reserved0;
+	struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+	struct vf_pf_channel_zone_trigger vf_pf_channel;
+	u8 reserved0;
+	u16 reserved1;
+#endif
+	u32 reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data {
+	struct non_trigger_vf_zone non_trigger;
+	struct trigger_vf_zone trigger;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config {
+	u32 threshold[3];
+	u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * Dynamic host coalescing init parameters
+ */
+struct dynamic_hc_config {
+	struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
+};
+
+
+struct e2_integ_data {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+	u8 cos;
+	u8 voq;
+	u8 pbf_queue;
+#elif defined(__LITTLE_ENDIAN)
+	u8 pbf_queue;
+	u8 voq;
+	u8 cos;
+	u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 ramEn;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ramEn;
+	u8 reserved2;
+	u16 reserved3;
+#endif
+};
+
+
+/*
+ * set mac event data
+ */
+struct eth_event_data {
+	u32 echo;
+	u32 reserved0;
+	u32 reserved1;
+};
+
+
+/*
+ * pf-vf event data
+ */
+struct vf_pf_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 msg_addr_lo;
+	u32 msg_addr_hi;
+};
+
+/*
+ * VF FLR event data
+ */
+struct vf_flr_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 reserved2;
+	u32 reserved3;
+};
+
+/*
+ * malicious VF event data
+ */
+struct malicious_vf_event_data {
+	u8 vf_id;
+	u8 err_id;
+	u16 reserved1;
+	u32 reserved2;
+	u32 reserved3;
+};
+
+/*
+ * vif list event data
+ */
+struct vif_list_event_data {
+	u8 func_bit_map;
+	u8 echo;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le32 reserved2;
+};
+
+/* function update event data */
+struct function_update_event_data {
+	u8 echo;
+	u8 reserved;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le32 reserved2;
+};
+
+
+/* union for all event ring message types */
+union event_data {
+	struct vf_pf_event_data vf_pf_event;
+	struct eth_event_data eth_event;
+	struct cfc_del_event_data cfc_del_event;
+	struct vf_flr_event_data vf_flr_event;
+	struct malicious_vf_event_data malicious_vf_event;
+	struct vif_list_event_data vif_list_event;
+	struct function_update_event_data function_update_event;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data {
+	struct regpair_native base_addr;
+#if defined(__BIG_ENDIAN)
+	u8 index_id;
+	u8 sb_id;
+	u16 producer;
+#elif defined(__LITTLE_ENDIAN)
+	u16 producer;
+	u8 sb_id;
+	u8 index_id;
+#endif
+	u32 reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits)
+ */
+struct event_ring_msg {
+	u8 opcode;
+	u8 error;
+	u16 reserved1;
+	union event_data data;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next {
+	struct regpair addr;
+	u32 reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem {
+	struct event_ring_msg message;
+	struct event_ring_next next_page;
+};
+
+
+/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode {
+	EVENT_RING_OPCODE_VF_PF_CHANNEL,
+	EVENT_RING_OPCODE_FUNCTION_START,
+	EVENT_RING_OPCODE_FUNCTION_STOP,
+	EVENT_RING_OPCODE_CFC_DEL,
+	EVENT_RING_OPCODE_CFC_DEL_WB,
+	EVENT_RING_OPCODE_STAT_QUERY,
+	EVENT_RING_OPCODE_STOP_TRAFFIC,
+	EVENT_RING_OPCODE_START_TRAFFIC,
+	EVENT_RING_OPCODE_VF_FLR,
+	EVENT_RING_OPCODE_MALICIOUS_VF,
+	EVENT_RING_OPCODE_FORWARD_SETUP,
+	EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+	EVENT_RING_OPCODE_FUNCTION_UPDATE,
+	EVENT_RING_OPCODE_AFEX_VIF_LISTS,
+	EVENT_RING_OPCODE_SET_MAC,
+	EVENT_RING_OPCODE_CLASSIFICATION_RULES,
+	EVENT_RING_OPCODE_FILTERS_RULES,
+	EVENT_RING_OPCODE_MULTICAST_RULES,
+	EVENT_RING_OPCODE_SET_TIMESYNC,
+	MAX_EVENT_RING_OPCODE
+};
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode {
+	FAIRNESS_COS_WRR_MODE,
+	FAIRNESS_COS_ETS_MODE,
+	MAX_FAIRNESS_MODE
+};
+
+
+/*
+ * Priority and cos
+ */
+struct priority_cos {
+	u8 priority;
+	u8 cos;
+	__le16 reserved1;
+};
+
+/*
+ * The data for flow control configuration
+ */
+struct flow_control_configuration {
+	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+	u8 dcb_enabled;
+	u8 dcb_version;
+	u8 dont_add_pri_0_en;
+	u8 reserved1;
+	__le32 reserved2;
+	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
+};
+
+
+/*
+ *
+ */
+struct function_start_data {
+	u8 function_mode;
+	u8 allow_npar_tx_switching;
+	__le16 sd_vlan_tag;
+	__le16 vif_id;
+	u8 path_id;
+	u8 network_cos_mode;
+	u8 dmae_cmd_id;
+	u8 no_added_tags;
+	__le16 reserved0;
+	__le32 reserved1;
+	u8 inner_clss_vxlan;
+	u8 inner_clss_l2gre;
+	u8 inner_clss_l2geneve;
+	u8 inner_rss;
+	__le16 vxlan_dst_port;
+	__le16 geneve_dst_port;
+	u8 sd_accept_mf_clss_fail;
+	u8 sd_accept_mf_clss_fail_match_ethtype;
+	__le16 sd_accept_mf_clss_fail_ethtype;
+	__le16 sd_vlan_eth_type;
+	u8 sd_vlan_force_pri_flg;
+	u8 sd_vlan_force_pri_val;
+	u8 c2s_pri_tt_valid;
+	u8 c2s_pri_default;
+	u8 reserved2[6];
+	struct c2s_pri_trans_table_entry c2s_pri_trans_table;
+};
+
+struct function_update_data {
+	u8 vif_id_change_flg;
+	u8 afex_default_vlan_change_flg;
+	u8 allowed_priorities_change_flg;
+	u8 network_cos_mode_change_flg;
+	__le16 vif_id;
+	__le16 afex_default_vlan;
+	u8 allowed_priorities;
+	u8 network_cos_mode;
+	u8 lb_mode_en_change_flg;
+	u8 lb_mode_en;
+	u8 tx_switch_suspend_change_flg;
+	u8 tx_switch_suspend;
+	u8 echo;
+	u8 update_tunn_cfg_flg;
+	u8 inner_clss_vxlan;
+	u8 inner_clss_l2gre;
+	u8 inner_clss_l2geneve;
+	u8 inner_rss;
+	__le16 vxlan_dst_port;
+	__le16 geneve_dst_port;
+	u8 sd_vlan_force_pri_change_flg;
+	u8 sd_vlan_force_pri_flg;
+	u8 sd_vlan_force_pri_val;
+	u8 sd_vlan_tag_change_flg;
+	u8 sd_vlan_eth_type_change_flg;
+	u8 reserved1;
+	__le16 sd_vlan_tag;
+	__le16 sd_vlan_eth_type;
+	__le16 reserved0;
+	__le32 reserved2;
+};
+
+/*
+ * FW version stored in the Xstorm RAM
+ */
+struct fw_version {
+#if defined(__BIG_ENDIAN)
+	u8 engineering;
+	u8 revision;
+	u8 minor;
+	u8 major;
+#elif defined(__LITTLE_ENDIAN)
+	u8 major;
+	u8 minor;
+	u8 revision;
+	u8 engineering;
+#endif
+	u32 flags;
+#define FW_VERSION_OPTIMIZED (0x1<<0)
+#define FW_VERSION_OPTIMIZED_SHIFT 0
+#define FW_VERSION_BIG_ENDIEN (0x1<<1)
+#define FW_VERSION_BIG_ENDIEN_SHIFT 1
+#define FW_VERSION_CHIP_VERSION (0x3<<2)
+#define FW_VERSION_CHIP_VERSION_SHIFT 2
+#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
+#define __FW_VERSION_RESERVED_SHIFT 4
+};
+
+/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters {
+	u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+	u8 timeout;
+#elif defined(__LITTLE_ENDIAN)
+	u8 timeout;
+	u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm {
+#if defined(__BIG_ENDIAN)
+	u8 igu_seg_id;
+	u8 igu_sb_id;
+	u8 timer_value;
+	u8 __flags;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __flags;
+	u8 timer_value;
+	u8 igu_sb_id;
+	u8 igu_seg_id;
+#endif
+	u32 time_to_expire;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity {
+#if defined(__BIG_ENDIAN)
+	u8 vf_valid;
+	u8 vf_id;
+	u8 vnic_id;
+	u8 pf_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 pf_id;
+	u8 vnic_id;
+	u8 vf_id;
+	u8 vf_valid;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data {
+	struct regpair_native host_sb_addr;
+	struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
+	struct pci_entity p_func;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv0;
+	u8 state;
+	u8 dhc_qzone_id;
+	u8 same_igu_sb_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 same_igu_sb_1b;
+	u8 dhc_qzone_id;
+	u8 state;
+	u8 rsrv0;
+#endif
+	struct regpair_native rsrv1[2];
+};
+
+
+/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment {
+	HC_REGULAR_SEGMENT,
+	HC_DEFAULT_SEGMENT,
+	MAX_HC_SEGMENT
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data {
+	struct regpair_native host_sb_addr;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1;
+	u8 state;
+	u8 igu_seg_id;
+	u8 igu_sb_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 igu_sb_id;
+	u8 igu_seg_id;
+	u8 state;
+	u8 rsrv1;
+#endif
+	struct pci_entity p_func;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x {
+	struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
+	struct hc_sb_data common;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2 {
+	struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
+	struct hc_sb_data common;
+};
+
+
+/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode {
+	HC_IGU_BC_MODE,
+	HC_IGU_NBC_MODE,
+	MAX_IGU_MODE
+};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver {
+	IP_V4,
+	IP_V6,
+	MAX_IP_VER
+};
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+	MALICIOUS_VF_NO_ERROR,
+	VF_PF_CHANNEL_NOT_READY,
+	ETH_ILLEGAL_BD_LENGTHS,
+	ETH_PACKET_TOO_SHORT,
+	ETH_PAYLOAD_TOO_BIG,
+	ETH_ILLEGAL_ETH_TYPE,
+	ETH_ILLEGAL_LSO_HDR_LEN,
+	ETH_TOO_MANY_BDS,
+	ETH_ZERO_HDR_NBDS,
+	ETH_START_BD_NOT_SET,
+	ETH_ILLEGAL_PARSE_NBDS,
+	ETH_IPV6_AND_CHECKSUM,
+	ETH_VLAN_FLG_INCORRECT,
+	ETH_ILLEGAL_LSO_MSS,
+	ETH_TUNNEL_NOT_SUPPORTED,
+	MAX_MALICIOUS_VF_ERROR_ID
+};
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode {
+	SINGLE_FUNCTION,
+	MULTI_FUNCTION_SD,
+	MULTI_FUNCTION_SI,
+	MULTI_FUNCTION_AFEX,
+	MAX_MF_MODE
+};
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf)
+ */
+struct tstorm_per_pf_stats {
+	struct regpair rcv_error_bytes;
+};
+
+/*
+ *
+ */
+struct per_pf_stats {
+	struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port)
+ */
+struct tstorm_per_port_stats {
+	__le32 mac_discard;
+	__le32 mac_filter_discard;
+	__le32 brb_truncate_discard;
+	__le32 mf_tag_discard;
+	__le32 packet_drop;
+	__le32 reserved;
+};
+
+/*
+ *
+ */
+struct per_port_stats {
+	struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+struct tstorm_per_queue_stats {
+	struct regpair rcv_ucast_bytes;
+	__le32 rcv_ucast_pkts;
+	__le32 checksum_discard;
+	struct regpair rcv_bcast_bytes;
+	__le32 rcv_bcast_pkts;
+	__le32 pkts_too_big_discard;
+	struct regpair rcv_mcast_bytes;
+	__le32 rcv_mcast_pkts;
+	__le32 ttl0_discard;
+	__le16 no_buff_discard;
+	__le16 reserved0;
+	__le32 reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client)
+ */
+struct ustorm_per_queue_stats {
+	struct regpair ucast_no_buff_bytes;
+	struct regpair mcast_no_buff_bytes;
+	struct regpair bcast_no_buff_bytes;
+	__le32 ucast_no_buff_pkts;
+	__le32 mcast_no_buff_pkts;
+	__le32 bcast_no_buff_pkts;
+	__le32 coalesced_pkts;
+	struct regpair coalesced_bytes;
+	__le32 coalesced_events;
+	__le32 coalesced_aborts;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client)
+ */
+struct xstorm_per_queue_stats {
+	struct regpair ucast_bytes_sent;
+	struct regpair mcast_bytes_sent;
+	struct regpair bcast_bytes_sent;
+	__le32 ucast_pkts_sent;
+	__le32 mcast_pkts_sent;
+	__le32 bcast_pkts_sent;
+	__le32 error_drop_pkts;
+};
+
+/*
+ *
+ */
+struct per_queue_stats {
+	struct tstorm_per_queue_stats tstorm_queue_statistics;
+	struct ustorm_per_queue_stats ustorm_queue_statistics;
+	struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
+ * FW version stored in first line of pram
+ */
+struct pram_fw_version {
+	u8 major;
+	u8 minor;
+	u8 revision;
+	u8 engineering;
+	u8 flags;
+#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
+#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
+#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
+#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
+#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
+#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
+#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
+};
+
+
+/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data {
+	u8 protocol_data[8];
+	struct regpair phy_address;
+	struct regpair mac_config_addr;
+	struct afex_vif_list_ramrod_data afex_vif_list_data;
+};
+
+/*
+ * The send queue element
+ */
+struct protocol_common_spe {
+	struct spe_hdr hdr;
+	union protocol_common_specific_data data;
+};
+
+/* The data for the Set Timesync Ramrod */
+struct set_timesync_ramrod_data {
+	u8 drift_adjust_cmd;
+	u8 offset_cmd;
+	u8 add_sub_drift_adjust_value;
+	u8 drift_adjust_value;
+	u32 drift_adjust_period;
+	struct regpair offset_delta;
+};
+
+/*
+ * The send queue element
+ */
+struct slow_path_element {
+	struct spe_hdr hdr;
+	struct regpair protocol_data;
+};
+
+
+/*
+ * Protocol-common statistics counter
+ */
+struct stats_counter {
+	__le16 xstats_counter;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le16 tstats_counter;
+	__le16 reserved2;
+	__le32 reserved3;
+	__le16 ustats_counter;
+	__le16 reserved4;
+	__le32 reserved5;
+	__le16 cstats_counter;
+	__le16 reserved6;
+	__le32 reserved7;
+};
+
+
+/*
+ *
+ */
+struct stats_query_entry {
+	u8 kind;
+	u8 index;
+	__le16 funcID;
+	__le32 reserved;
+	struct regpair address;
+};
+
+/*
+ * statistic command
+ */
+struct stats_query_cmd_group {
+	struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header
+ */
+struct stats_query_header {
+	u8 cmd_num;
+	u8 reserved0;
+	__le16 drv_stats_counter;
+	__le32 reserved1;
+	struct regpair stats_counters_addrs;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type {
+	STATS_TYPE_QUEUE,
+	STATS_TYPE_PORT,
+	STATS_TYPE_PF,
+	STATS_TYPE_TOE,
+	STATS_TYPE_FCOE,
+	MAX_STATS_QUERY_TYPE
+};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state {
+	SB_DISABLED,
+	SB_ENABLED,
+	SB_CLEANED,
+	MAX_STATUS_BLOCK_STATE
+};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id {
+	USTORM_ID,
+	CSTORM_ID,
+	XSTORM_ID,
+	TSTORM_ID,
+	ATTENTION_ID,
+	MAX_STORM_ID
+};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type {
+	LLFC_TRAFFIC_TYPE_NW,
+	LLFC_TRAFFIC_TYPE_FCOE,
+	LLFC_TRAFFIC_TYPE_ISCSI,
+	MAX_TRAFFIC_TYPE
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data {
+	struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data {
+	struct regpair reserved;
+};
+
+/* Add or Subtract Value for Set Timesync Ramrod */
+enum ts_add_sub_value {
+	TS_SUB_VALUE,
+	TS_ADD_VALUE,
+	MAX_TS_ADD_SUB_VALUE
+};
+
+/* Drift-Adjust Commands for Set Timesync Ramrod */
+enum ts_drift_adjust_cmd {
+	TS_DRIFT_ADJUST_KEEP,
+	TS_DRIFT_ADJUST_SET,
+	TS_DRIFT_ADJUST_RESET,
+	MAX_TS_DRIFT_ADJUST_CMD
+};
+
+/* Offset Commands for Set Timesync Ramrod */
+enum ts_offset_cmd {
+	TS_OFFSET_KEEP,
+	TS_OFFSET_INC,
+	TS_OFFSET_DEC,
+	MAX_TS_OFFSET_CMD
+};
+
+/* Tunnel Mode */
+enum tunnel_mode {
+	TUNN_MODE_NONE,
+	TUNN_MODE_VXLAN,
+	TUNN_MODE_GRE,
+	MAX_TUNNEL_MODE
+};
+
+ /* zone A per-queue data */
+struct ustorm_queue_zone_data {
+	struct ustorm_eth_rx_producers eth_rx_producers;
+	struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data {
+	struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data {
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u8 valid;
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 valid;
+	u16 reserved0;
+#endif
+	u32 reserved1;
+};
+
+
+/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state {
+	VF_PF_CHANNEL_STATE_READY,
+	VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
+	MAX_VF_PF_CHANNEL_STATE
+};
+
+
+/*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind {
+	VIF_LIST_RULE_SET,
+	VIF_LIST_RULE_GET,
+	VIF_LIST_RULE_CLEAR_ALL,
+	VIF_LIST_RULE_CLEAR_FUNC,
+	MAX_VIF_LIST_RULE_KIND
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data {
+	struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data {
+	struct regpair reserved;
+};
+
+#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
new file mode 100644
index 0000000..46ee2c0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -0,0 +1,788 @@
+/* bnx2x_init.h: Qlogic Everest network driver.
+ *               Structures and macroes needed during the initialization.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Modified by: Vladislav Zolotarov
+ */
+
+#ifndef BNX2X_INIT_H
+#define BNX2X_INIT_H
+
+/* Init operation types and structures */
+enum {
+	OP_RD = 0x1,	/* read a single register */
+	OP_WR,		/* write a single register */
+	OP_SW,		/* copy a string to the device */
+	OP_ZR,		/* clear memory */
+	OP_ZP,		/* unzip then copy with DMAE */
+	OP_WR_64,	/* write 64 bit pattern */
+	OP_WB,		/* copy a string using DMAE */
+	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
+	/* Skip the following ops if all of the init modes don't match */
+	OP_IF_MODE_OR,
+	/* Skip the following ops if any of the init modes don't match */
+	OP_IF_MODE_AND,
+	OP_MAX
+};
+
+enum {
+	STAGE_START,
+	STAGE_END,
+};
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+/* structs for the various opcodes */
+struct raw_op {
+	u32 op:8;
+	u32 offset:24;
+	u32 raw_data;
+};
+
+struct op_read {
+	u32 op:8;
+	u32 offset:24;
+	u32 val;
+};
+
+struct op_write {
+	u32 op:8;
+	u32 offset:24;
+	u32 val;
+};
+
+struct op_arr_write {
+	u32 op:8;
+	u32 offset:24;
+#ifdef __BIG_ENDIAN
+	u16 data_len;
+	u16 data_off;
+#else /* __LITTLE_ENDIAN */
+	u16 data_off;
+	u16 data_len;
+#endif
+};
+
+struct op_zero {
+	u32 op:8;
+	u32 offset:24;
+	u32 len;
+};
+
+struct op_if_mode {
+	u32 op:8;
+	u32 cmd_offset:24;
+	u32 mode_bit_map;
+};
+
+
+union init_op {
+	struct op_read		read;
+	struct op_write		write;
+	struct op_arr_write	arr_wr;
+	struct op_zero		zero;
+	struct raw_op		raw;
+	struct op_if_mode	if_mode;
+};
+
+
+/* Init Phases */
+enum {
+	PHASE_COMMON,
+	PHASE_PORT0,
+	PHASE_PORT1,
+	PHASE_PF0,
+	PHASE_PF1,
+	PHASE_PF2,
+	PHASE_PF3,
+	PHASE_PF4,
+	PHASE_PF5,
+	PHASE_PF6,
+	PHASE_PF7,
+	NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+	MODE_ASIC                      = 0x00000001,
+	MODE_FPGA                      = 0x00000002,
+	MODE_EMUL                      = 0x00000004,
+	MODE_E2                        = 0x00000008,
+	MODE_E3                        = 0x00000010,
+	MODE_PORT2                     = 0x00000020,
+	MODE_PORT4                     = 0x00000040,
+	MODE_SF                        = 0x00000080,
+	MODE_MF                        = 0x00000100,
+	MODE_MF_SD                     = 0x00000200,
+	MODE_MF_SI                     = 0x00000400,
+	MODE_MF_AFEX                   = 0x00000800,
+	MODE_E3_A0                     = 0x00001000,
+	MODE_E3_B0                     = 0x00002000,
+	MODE_COS3                      = 0x00004000,
+	MODE_COS6                      = 0x00008000,
+	MODE_LITTLE_ENDIAN             = 0x00010000,
+	MODE_BIG_ENDIAN                = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+	BLOCK_ATC,
+	BLOCK_BRB1,
+	BLOCK_CCM,
+	BLOCK_CDU,
+	BLOCK_CFC,
+	BLOCK_CSDM,
+	BLOCK_CSEM,
+	BLOCK_DBG,
+	BLOCK_DMAE,
+	BLOCK_DORQ,
+	BLOCK_HC,
+	BLOCK_IGU,
+	BLOCK_MISC,
+	BLOCK_NIG,
+	BLOCK_PBF,
+	BLOCK_PGLUE_B,
+	BLOCK_PRS,
+	BLOCK_PXP2,
+	BLOCK_PXP,
+	BLOCK_QM,
+	BLOCK_SRC,
+	BLOCK_TCM,
+	BLOCK_TM,
+	BLOCK_TSDM,
+	BLOCK_TSEM,
+	BLOCK_UCM,
+	BLOCK_UPB,
+	BLOCK_USDM,
+	BLOCK_USEM,
+	BLOCK_XCM,
+	BLOCK_XPB,
+	BLOCK_XSDM,
+	BLOCK_XSEM,
+	BLOCK_MISC_AEU,
+	NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q		0
+#define BNX2X_TOE_Q		3
+#define BNX2X_TOE_ACK_Q		6
+#define BNX2X_ISCSI_Q		9
+#define BNX2X_ISCSI_ACK_Q	11
+#define BNX2X_FCOE_Q		10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+	((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+	/* find current COS mapping */
+	u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+	/* check if queue->COS mapping has changed */
+	if (curr_cos != new_cos) {
+		u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+		u32 reg_addr, reg_bit_map, vnic;
+
+		/* update parameters for 4port mode */
+		if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+			num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+			if (BP_PORT(bp)) {
+				curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+				new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+			}
+		}
+
+		/* change queue mapping for each VNIC */
+		for (vnic = 0; vnic < num_vnics; vnic++) {
+			u32 pf_q_num =
+				BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+			u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+			/* overwrite queue->VOQ mapping */
+			REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+			/* clear queue bit from current COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+			/* set queue bit in new COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+			/* set/clear queue bit in command-queue bit map
+			 * (E2/E3A0 only, valid COS values are 0/1)
+			 */
+			if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+				reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+				reg_bit_map = REG_RD(bp, reg_addr);
+				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+				reg_bit_map = new_cos ?
+					      (reg_bit_map | q_bit_map) :
+					      (reg_bit_map & (~q_bit_map));
+				REG_WR(bp, reg_addr, reg_bit_map);
+			}
+		}
+	}
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
+				       struct priority_cos *traffic_cos)
+{
+	bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+	bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+		traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	if (mode != STATIC_COS) {
+		/* required only in backward compatible COS mode */
+		bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+	}
+}
+
+
+/* congestion management port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ *    offset in order to write the port sub struct and the
+ *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ *    words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ *    possible, the driver should only write the valid vnics into the internal
+ *    ram according to the appropriate port mode.
+ */
+#define BITS_TO_BYTES(x) ((x)/8)
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/* how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
+				  u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+	/* rate shaping per-port variables
+	 * 100 micro seconds in SDM ticks = 25
+	 * since each tick is 4 microSeconds
+	 */
+
+	pdata->rs_vars.rs_periodic_timeout =
+	RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+	/* this is the threshold below which no timer arming will occur.
+	 * 1.25 coefficient is for the threshold to be a little bigger
+	 * then the real time to compensate for timer in-accuracy
+	 */
+	pdata->rs_vars.rs_threshold =
+	(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+	/* rate shaping per-vnic variables */
+	for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+		/* global vnic counter */
+		vdata->vnic_max_rate[vnic].vn_counter.rate =
+		input_data->vnic_max_rate[vnic];
+		/* maximal Mbps for this vnic
+		 * the quota in each timer period - number of bytes
+		 * transmitted in this period
+		 */
+		vdata->vnic_max_rate[vnic].vn_counter.quota =
+			RS_PERIODIC_TIMEOUT_USEC *
+			(u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+	}
+
+}
+
+static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
+				  u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+
+	/* this is the resolution of the fairness timer */
+	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+	/* fairness per-port variables
+	 * for 10G it is 1000usec. for 1G it is 10000usec.
+	 */
+	tFair = T_FAIR_COEF / input_data->port_rate;
+
+	/* this is the threshold below which we won't arm the timer anymore */
+	pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+	/* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+	 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+	 */
+	pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+	/* since each tick is 4 microSeconds */
+	pdata->fair_vars.fairness_timeout =
+				fair_periodic_timeout_usec / SDM_TICKS;
+
+	/* calculate sum of weights */
+	vnicWeightSum = 0;
+
+	for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
+		vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+	/* global vnic counter */
+	if (vnicWeightSum > 0) {
+		/* fairness per-vnic variables */
+		for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+			/* this is the credit for each period of the fairness
+			 * algorithm - number of bytes in T_FAIR (this vnic
+			 * share of the port rate)
+			 */
+			vdata->vnic_min_rate[vnic].vn_credit_delta =
+				(u32)input_data->vnic_min_rate[vnic] * 100 *
+				(T_FAIR_COEF / (8 * 100 * vnicWeightSum));
+			if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+			    pdata->fair_vars.fair_threshold +
+			    MIN_ABOVE_THRESH) {
+				vdata->vnic_min_rate[vnic].vn_credit_delta =
+					pdata->fair_vars.fair_threshold +
+					MIN_ABOVE_THRESH;
+			}
+		}
+	}
+}
+
+static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
+				     u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic, cos;
+	u32 cosWeightSum = 0;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+
+	for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+		cosWeightSum += input_data->cos_min_rate[cos];
+
+	if (cosWeightSum > 0) {
+
+		for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+			/* Since cos and vnic shouldn't work together the rate
+			 * to divide between the coses is the port rate.
+			 */
+			u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+			for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+				/* this is the credit for each period of
+				 * the fairness algorithm - number of bytes
+				 * in T_FAIR (this cos share of the vnic rate)
+				 */
+				ccd[cos] =
+				    (u32)input_data->cos_min_rate[cos] * 100 *
+				    (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+				 if (ccd[cos] < pdata->fair_vars.fair_threshold
+						+ MIN_ABOVE_THRESH) {
+					ccd[cos] =
+					    pdata->fair_vars.fair_threshold +
+					    MIN_ABOVE_THRESH;
+				}
+			}
+		}
+	}
+}
+
+static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
+				   struct cmng_init *ram_data)
+{
+	/* in microSeconds */
+	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
+				   struct cmng_init *ram_data)
+{
+	u32 r_param;
+	memset(ram_data, 0, sizeof(struct cmng_init));
+
+	ram_data->port.flags = input_data->flags;
+
+	/* number of bytes transmitted in a rate of 10Gbps
+	 * in one usec = 1.25KB.
+	 */
+	r_param = BITS_TO_BYTES(input_data->port_rate);
+	bnx2x_init_max(input_data, r_param, ram_data);
+	bnx2x_init_min(input_data, r_param, ram_data);
+	bnx2x_init_fw_wrr(input_data, r_param, ram_data);
+	bnx2x_init_safc(input_data, ram_data);
+}
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array */
+#define BLOCK_OPS_IDX(block, stage, end) \
+			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+#define INITOP_SET		0	/* set the HW directly */
+#define INITOP_CLEAR		1	/* clear the HW directly */
+#define INITOP_INIT		2	/* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+	dma_addr_t page_mapping;
+	void *page;
+	u32 size;
+};
+
+struct ilt_client_info {
+	u32 page_size;
+	u16 start;
+	u16 end;
+	u16 client_num;
+	u16 flags;
+#define ILT_CLIENT_SKIP_INIT	0x1
+#define ILT_CLIENT_SKIP_MEM	0x2
+};
+
+struct bnx2x_ilt {
+	u32 start_line;
+	struct ilt_line		*lines;
+	struct ilt_client_info	clients[4];
+#define ILT_CLIENT_CDU	0
+#define ILT_CLIENT_QM	1
+#define ILT_CLIENT_SRC	2
+#define ILT_CLIENT_TM	3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+	u8 opaque[56];
+	u64 next;
+};
+
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
+{ \
+	block##_REG_##block##_PRTY_MASK, \
+	block##_REG_##block##_PRTY_STS_CLR, \
+	en_mask, {m1, m1h, m2, m3}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
+{ \
+	block##_REG_##block##_PRTY_MASK_0, \
+	block##_REG_##block##_PRTY_STS_CLR_0, \
+	en_mask, {m1, m1h, m2, m3}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
+{ \
+	block##_REG_##block##_PRTY_MASK_1, \
+	block##_REG_##block##_PRTY_STS_CLR_1, \
+	en_mask, {m1, m1h, m2, m3}, #block"_1" \
+}
+
+static const struct {
+	u32 mask_addr;
+	u32 sts_clr_addr;
+	u32 en_mask;		/* Mask to enable parity attentions */
+	struct {
+		u32 e1;		/* 57710 */
+		u32 e1h;	/* 57711 */
+		u32 e2;		/* 57712 */
+		u32 e3;		/* 578xx */
+	} reg_mask;		/* Register mask (all valid bits) */
+	char name[8];		/* Block's longest name is 7 characters long
+				 * (name + suffix)
+				 */
+} bnx2x_blocks_parity_data[] = {
+	/* bit 19 masked */
+	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+	/* bit 5,18,20-31 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+	/* bit 5 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
+	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+	 * want to handle "system kill" flow at the moment.
+	 */
+	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
+			0x7ffffff),
+	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
+	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
+	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
+	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xff, 0xffff),
+	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
+	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
+	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
+	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+		{0xf, 0xf, 0xf, 0xf}, "UPB"},
+	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf, 0xf}, "XPB"},
+	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
+	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
+	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
+	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
+	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS	\
+	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+	(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const struct {
+	u32 addr;
+	u32 bits;
+} mcp_attn_ctl_regs[] = {
+	{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+		MISC_AEU_ENABLE_MCP_PRTY_BITS },
+	{ MISC_REG_AEU_ENABLE4_NIG_0,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+	{ MISC_REG_AEU_ENABLE4_PXP_0,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+	{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+		MISC_AEU_ENABLE_MCP_PRTY_BITS },
+	{ MISC_REG_AEU_ENABLE4_NIG_1,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+	{ MISC_REG_AEU_ENABLE4_PXP_1,
+		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+	int i;
+	u32 reg_val;
+
+	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
+
+		if (enable)
+			reg_val |= mcp_attn_ctl_regs[i].bits;
+		else
+			reg_val &= ~mcp_attn_ctl_regs[i].bits;
+
+		REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
+	}
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+	if (CHIP_IS_E1(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+	else if (CHIP_IS_E1H(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+	else if (CHIP_IS_E2(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+	else /* CHIP_IS_E3 */
+		return bnx2x_blocks_parity_data[idx].reg_mask.e3;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (dis_mask) {
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+			       dis_mask);
+			DP(NETIF_MSG_HW, "Setting parity mask "
+						 "for %s to\t\t0x%x\n",
+				    bnx2x_blocks_parity_data[i].name, dis_mask);
+		}
+	}
+
+	/* Disable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, false);
+}
+
+/* Clear the parity error status registers. */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+	u32 reg_val, mcp_aeu_bits =
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+	/* Clear SEM_FAST parities */
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask) {
+			reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+					 sts_clr_addr);
+			if (reg_val & reg_mask)
+				DP(NETIF_MSG_HW,
+					    "Parity errors in %s: 0x%x\n",
+					    bnx2x_blocks_parity_data[i].name,
+					    reg_val & reg_mask);
+		}
+	}
+
+	/* Check if there were parity attentions in MCP */
+	reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+	if (reg_val & mcp_aeu_bits)
+		DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+		   reg_val & mcp_aeu_bits);
+
+	/* Clear parity attentions in MCP:
+	 * [7]  clears Latched rom_parity
+	 * [8]  clears Latched ump_rx_parity
+	 * [9]  clears Latched ump_tx_parity
+	 * [10] clears Latched scpad_parity (both ports)
+	 */
+	REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask)
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+				bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+	}
+
+	/* Enable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, true);
+}
+
+
+#endif /* BNX2X_INIT_H */
+
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 0000000..1835d2e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,936 @@
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
+ *               Static functions needed during the initialization.
+ *               This file is "included" in bnx2x_main.c.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ */
+
+#ifndef BNX2X_INIT_OPS_H
+#define BNX2X_INIT_OPS_H
+
+
+#ifndef BP_ILT
+#define BP_ILT(bp)	NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp)	0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp)	0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x)	x
+#endif
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+				      dma_addr_t phys_addr, u32 addr,
+				      u32 len);
+
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
+{
+	u32 i;
+
+	for (i = 0; i < len; i++)
+		REG_WR(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
+{
+	u32 i;
+
+	for (i = 0; i < len; i++)
+		bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+				u8 wb)
+{
+	if (bp->dmae_ready)
+		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+
+	/* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+	else if (wb && CHIP_IS_E1(bp))
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+	/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+	else
+		bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+			    u32 len, u8 wb)
+{
+	u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
+	u32 buf_len32 = buf_len/4;
+	u32 i;
+
+	memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
+
+	for (i = 0; i < len; i += buf_len32) {
+		u32 cur_len = min(buf_len32, len - i);
+
+		bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
+	}
+}
+
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+	if (bp->dmae_ready)
+		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+
+	/* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+	else if (CHIP_IS_E1(bp))
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+	/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+	else
+		bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len64)
+{
+	u32 buf_len32 = FW_BUF_SIZE/4;
+	u32 len = len64*2;
+	u64 data64 = 0;
+	u32 i;
+
+	/* 64 bit value is in a blob: first low DWORD, then high DWORD */
+	data64 = HILO_U64((*(data + 1)), (*data));
+
+	len64 = min((u32)(FW_BUF_SIZE/8), len64);
+	for (i = 0; i < len64; i++) {
+		u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
+
+		*pdata = data64;
+	}
+
+	for (i = 0; i < len; i += buf_len32) {
+		u32 cur_len = min(buf_len32, len - i);
+
+		bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
+	}
+}
+
+/*********************************************************
+   There are different blobs for each PRAM section.
+   In addition, each blob write operation is divided into a few operations
+   in order to decrease the amount of phys. contiguous buffer needed.
+   Thus, when we select a blob the address may be with some offset
+   from the beginning of PRAM section.
+   The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+			if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+			if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+				const u8 *data)
+{
+	IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+		data = INIT_TSEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+			data = INIT_CSEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+			data = INIT_USEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+			data = INIT_XSEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+			data = INIT_TSEM_PRAM_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+			data = INIT_CSEM_PRAM_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+			data = INIT_USEM_PRAM_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+			data = INIT_XSEM_PRAM_DATA(bp);
+
+	return data;
+}
+
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len)
+{
+	if (bp->dmae_ready)
+		VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
+
+	/* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+	else if (CHIP_IS_E1(bp))
+		bnx2x_init_ind_wr(bp, addr, data, len);
+
+	/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+	else
+		bnx2x_init_str_wr(bp, addr, data, len);
+}
+
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+			u32 val_hi)
+{
+	u32 wb_write[2];
+
+	wb_write[0] = val_lo;
+	wb_write[1] = val_hi;
+	REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
+}
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+			     u32 blob_off)
+{
+	const u8 *data = NULL;
+	int rc;
+	u32 i;
+
+	data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
+
+	rc = bnx2x_gunzip(bp, data, len);
+	if (rc)
+		return;
+
+	/* gunzip_outlen is in dwords */
+	len = GUNZIP_OUTLEN(bp);
+	for (i = 0; i < len; i++)
+		((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
+				cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
+
+	bnx2x_write_big_buf_wb(bp, addr, len);
+}
+
+static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
+{
+	u16 op_start =
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_START)];
+	u16 op_end =
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_END)];
+	const union init_op *op;
+	u32 op_idx, op_type, addr, len;
+	const u32 *data, *data_base;
+
+	/* If empty block */
+	if (op_start == op_end)
+		return;
+
+	data_base = INIT_DATA(bp);
+
+	for (op_idx = op_start; op_idx < op_end; op_idx++) {
+
+		op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
+		/* Get generic data */
+		op_type = op->raw.op;
+		addr = op->raw.offset;
+		/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+		 * OP_WR64 (we assume that op_arr_write and op_write have the
+		 * same structure).
+		 */
+		len = op->arr_wr.data_len;
+		data = data_base + op->arr_wr.data_off;
+
+		switch (op_type) {
+		case OP_RD:
+			REG_RD(bp, addr);
+			break;
+		case OP_WR:
+			REG_WR(bp, addr, op->write.val);
+			break;
+		case OP_SW:
+			bnx2x_init_str_wr(bp, addr, data, len);
+			break;
+		case OP_WB:
+			bnx2x_init_wr_wb(bp, addr, data, len);
+			break;
+		case OP_ZR:
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+			break;
+		case OP_WB_ZR:
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
+			break;
+		case OP_ZP:
+			bnx2x_init_wr_zp(bp, addr, len,
+					 op->arr_wr.data_off);
+			break;
+		case OP_WR_64:
+			bnx2x_init_wr_64(bp, addr, data, len);
+			break;
+		case OP_IF_MODE_AND:
+			/* if any of the flags doesn't match, skip the
+			 * conditional block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) !=
+				op->if_mode.mode_bit_map)
+				op_idx += op->if_mode.cmd_offset;
+			break;
+		case OP_IF_MODE_OR:
+			/* if all the flags don't match, skip the conditional
+			 * block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) == 0)
+				op_idx += op->if_mode.cmd_offset;
+			break;
+		default:
+			/* Should never get here! */
+
+			break;
+		}
+	}
+}
+
+
+/****************************************************************************
+* PXP Arbiter
+****************************************************************************/
+/*
+ * This code configures the PCI read/write arbiter
+ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+ * since max payload and max request size are only known at run time,
+ * this is done as a separate init stage.
+ */
+
+#define NUM_WR_Q			13
+#define NUM_RD_Q			29
+#define MAX_RD_ORD			3
+#define MAX_WR_ORD			2
+
+/* configuration for one arbiter queue */
+struct arb_line {
+	int l;
+	int add;
+	int ubound;
+};
+
+/* derived configuration for each read queue for each max request size */
+static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
+/* 1 */	{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+	{ {4, 8,  4},  {4,  8,  4},  {4,  8,  4},  {4,  8,  4}  },
+	{ {4, 3,  3},  {4,  3,  3},  {4,  3,  3},  {4,  3,  3}  },
+	{ {8, 3,  6},  {16, 3,  11}, {16, 3,  11}, {16, 3,  11} },
+	{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+/* 10 */{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 64, 6},  {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+/* 20 */{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
+};
+
+/* derived configuration for each write queue for each max request size */
+static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+/* 1 */	{ {4, 6,  3},  {4,  6,  3},  {4,  6,  3} },
+	{ {4, 2,  3},  {4,  2,  3},  {4,  2,  3} },
+	{ {8, 2,  6},  {16, 2,  11}, {16, 2,  11} },
+	{ {8, 2,  6},  {16, 2,  11}, {32, 2,  21} },
+	{ {8, 2,  6},  {16, 2,  11}, {32, 2,  21} },
+	{ {8, 2,  6},  {16, 2,  11}, {32, 2,  21} },
+	{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
+	{ {8, 2,  6},  {16, 2,  11}, {16, 2,  11} },
+	{ {8, 2,  6},  {16, 2,  11}, {16, 2,  11} },
+/* 10 */{ {8, 9,  6},  {16, 9,  11}, {32, 9,  21} },
+	{ {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
+	{ {8, 9,  6},  {16, 9,  11}, {16, 9,  11} },
+	{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
+};
+
+/* register addresses for read queues */
+static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+/* 1 */	{PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+		PXP2_REG_RQ_BW_RD_UBOUND0},
+	{PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+		PXP2_REG_PSWRQ_BW_UB1},
+	{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+		PXP2_REG_PSWRQ_BW_UB2},
+	{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+		PXP2_REG_PSWRQ_BW_UB3},
+	{PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
+		PXP2_REG_RQ_BW_RD_UBOUND4},
+	{PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
+		PXP2_REG_RQ_BW_RD_UBOUND5},
+	{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+		PXP2_REG_PSWRQ_BW_UB6},
+	{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+		PXP2_REG_PSWRQ_BW_UB7},
+	{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+		PXP2_REG_PSWRQ_BW_UB8},
+/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+		PXP2_REG_PSWRQ_BW_UB9},
+	{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+		PXP2_REG_PSWRQ_BW_UB10},
+	{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+		PXP2_REG_PSWRQ_BW_UB11},
+	{PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
+		PXP2_REG_RQ_BW_RD_UBOUND12},
+	{PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
+		PXP2_REG_RQ_BW_RD_UBOUND13},
+	{PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
+		PXP2_REG_RQ_BW_RD_UBOUND14},
+	{PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
+		PXP2_REG_RQ_BW_RD_UBOUND15},
+	{PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
+		PXP2_REG_RQ_BW_RD_UBOUND16},
+	{PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
+		PXP2_REG_RQ_BW_RD_UBOUND17},
+	{PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
+		PXP2_REG_RQ_BW_RD_UBOUND18},
+/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
+		PXP2_REG_RQ_BW_RD_UBOUND19},
+	{PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
+		PXP2_REG_RQ_BW_RD_UBOUND20},
+	{PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
+		PXP2_REG_RQ_BW_RD_UBOUND22},
+	{PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
+		PXP2_REG_RQ_BW_RD_UBOUND23},
+	{PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
+		PXP2_REG_RQ_BW_RD_UBOUND24},
+	{PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
+		PXP2_REG_RQ_BW_RD_UBOUND25},
+	{PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
+		PXP2_REG_RQ_BW_RD_UBOUND26},
+	{PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
+		PXP2_REG_RQ_BW_RD_UBOUND27},
+	{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+		PXP2_REG_PSWRQ_BW_UB28}
+};
+
+/* register addresses for write queues */
+static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+/* 1 */	{PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+		PXP2_REG_PSWRQ_BW_UB1},
+	{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+		PXP2_REG_PSWRQ_BW_UB2},
+	{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+		PXP2_REG_PSWRQ_BW_UB3},
+	{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+		PXP2_REG_PSWRQ_BW_UB6},
+	{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+		PXP2_REG_PSWRQ_BW_UB7},
+	{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+		PXP2_REG_PSWRQ_BW_UB8},
+	{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+		PXP2_REG_PSWRQ_BW_UB9},
+	{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+		PXP2_REG_PSWRQ_BW_UB10},
+	{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+		PXP2_REG_PSWRQ_BW_UB11},
+/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+		PXP2_REG_PSWRQ_BW_UB28},
+	{PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
+		PXP2_REG_RQ_BW_WR_UBOUND29},
+	{PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
+		PXP2_REG_RQ_BW_WR_UBOUND30}
+};
+
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+			       int w_order)
+{
+	u32 val, i;
+
+	if (r_order > MAX_RD_ORD) {
+		DP(NETIF_MSG_HW, "read order of %d  order adjusted to %d\n",
+		   r_order, MAX_RD_ORD);
+		r_order = MAX_RD_ORD;
+	}
+	if (w_order > MAX_WR_ORD) {
+		DP(NETIF_MSG_HW, "write order of %d  order adjusted to %d\n",
+		   w_order, MAX_WR_ORD);
+		w_order = MAX_WR_ORD;
+	}
+	if (CHIP_REV_IS_FPGA(bp)) {
+		DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
+		w_order = 0;
+	}
+	DP(NETIF_MSG_HW, "read order %d  write order %d\n", r_order, w_order);
+
+	for (i = 0; i < NUM_RD_Q-1; i++) {
+		REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+		REG_WR(bp, read_arb_addr[i].add,
+		       read_arb_data[i][r_order].add);
+		REG_WR(bp, read_arb_addr[i].ubound,
+		       read_arb_data[i][r_order].ubound);
+	}
+
+	for (i = 0; i < NUM_WR_Q-1; i++) {
+		if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+		    (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+			REG_WR(bp, write_arb_addr[i].l,
+			       write_arb_data[i][w_order].l);
+
+			REG_WR(bp, write_arb_addr[i].add,
+			       write_arb_data[i][w_order].add);
+
+			REG_WR(bp, write_arb_addr[i].ubound,
+			       write_arb_data[i][w_order].ubound);
+		} else {
+
+			val = REG_RD(bp, write_arb_addr[i].l);
+			REG_WR(bp, write_arb_addr[i].l,
+			       val | (write_arb_data[i][w_order].l << 10));
+
+			val = REG_RD(bp, write_arb_addr[i].add);
+			REG_WR(bp, write_arb_addr[i].add,
+			       val | (write_arb_data[i][w_order].add << 10));
+
+			val = REG_RD(bp, write_arb_addr[i].ubound);
+			REG_WR(bp, write_arb_addr[i].ubound,
+			       val | (write_arb_data[i][w_order].ubound << 7));
+		}
+	}
+
+	val =  write_arb_data[NUM_WR_Q-1][w_order].add;
+	val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+	val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+	REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
+
+	val =  read_arb_data[NUM_RD_Q-1][r_order].add;
+	val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+	val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+	REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
+
+	REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
+	REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
+	REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
+	REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
+
+	if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
+		REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+	else if (CHIP_IS_E2(bp))
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+	else
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+	if (!CHIP_IS_E1(bp)) {
+		/*    MPS      w_order     optimal TH      presently TH
+		 *    128         0             0               2
+		 *    256         1             1               3
+		 *    >=512       2             2               3
+		 */
+		/* DMAE is special */
+		if (!CHIP_IS_E1H(bp)) {
+			/* E2 can use optimal TH */
+			val = w_order;
+			REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
+		} else {
+			val = ((w_order == 0) ? 2 : 3);
+			REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
+		}
+
+		REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
+	}
+
+	/* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST		0x2980
+	val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
+	val &= 0xFF;
+	if (val <= 0x20)
+		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC		0
+#define ILT_MEMOP_FREE		1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
+#define ILT_RANGE(f, l)		(((l) << 10) | f)
+
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+				 struct ilt_line *line, u32 size, u8 memop)
+{
+	if (memop == ILT_MEMOP_FREE) {
+		BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
+		return 0;
+	}
+	BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
+	if (!line->page)
+		return -1;
+	line->size = size;
+	return 0;
+}
+
+
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+				   u8 memop)
+{
+	int i, rc;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+	if (!ilt || !ilt->lines)
+		return -1;
+
+	if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+		return 0;
+
+	for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+		rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
+					   ilt_cli->page_size, memop);
+	}
+	return rc;
+}
+
+static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
+{
+	int rc = 0;
+
+	if (CONFIGURE_NIC_MODE(bp))
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+	if (!rc)
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+	return rc;
+}
+
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+{
+	int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
+	if (!rc)
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
+	if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+
+	return rc;
+}
+
+static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
+			      dma_addr_t page_mapping)
+{
+	u32 reg;
+
+	if (CHIP_IS_E1(bp))
+		reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
+	else
+		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+	bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+				   struct bnx2x_ilt *ilt, int idx, u8 initop)
+{
+	dma_addr_t	null_mapping;
+	int abs_idx = ilt->start_line + idx;
+
+
+	switch (initop) {
+	case INITOP_INIT:
+		/* set in the init-value array */
+	case INITOP_SET:
+		bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
+		break;
+	case INITOP_CLEAR:
+		null_mapping = 0;
+		bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
+		break;
+	}
+}
+
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+				      struct ilt_client_info *ilt_cli,
+				      u32 ilt_start, u8 initop)
+{
+	u32 start_reg = 0;
+	u32 end_reg = 0;
+
+	/* The boundary is either SET or INIT,
+	   CLEAR => SET and for now SET ~~ INIT */
+
+	/* find the appropriate regs */
+	if (CHIP_IS_E1(bp)) {
+		switch (ilt_cli->client_num) {
+		case ILT_CLIENT_CDU:
+			start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
+			break;
+		case ILT_CLIENT_QM:
+			start_reg = PXP2_REG_PSWRQ_QM0_L2P;
+			break;
+		case ILT_CLIENT_SRC:
+			start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
+			break;
+		case ILT_CLIENT_TM:
+			start_reg = PXP2_REG_PSWRQ_TM0_L2P;
+			break;
+		}
+		REG_WR(bp, start_reg + BP_FUNC(bp)*4,
+		       ILT_RANGE((ilt_start + ilt_cli->start),
+				 (ilt_start + ilt_cli->end)));
+	} else {
+		switch (ilt_cli->client_num) {
+		case ILT_CLIENT_CDU:
+			start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+			break;
+		case ILT_CLIENT_QM:
+			start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+			break;
+		case ILT_CLIENT_SRC:
+			start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+			break;
+		case ILT_CLIENT_TM:
+			start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+			break;
+		}
+		REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
+		REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
+	}
+}
+
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+					 struct bnx2x_ilt *ilt,
+					 struct ilt_client_info *ilt_cli,
+					 u8 initop)
+{
+	int i;
+
+	if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+		return;
+
+	for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+		bnx2x_ilt_line_init_op(bp, ilt, i, initop);
+
+	/* init/clear the ILT boundries */
+	bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
+}
+
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+				     struct ilt_client_info *ilt_cli, u8 initop)
+{
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+	bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
+					int cli_num, u8 initop)
+{
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+	bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
+{
+	if (CONFIGURE_NIC_MODE(bp))
+		bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+{
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
+	if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+		bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+}
+
+static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
+				      u32 psz_reg, u8 initop)
+{
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+	if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+		return;
+
+	switch (initop) {
+	case INITOP_INIT:
+		/* set in the init-value array */
+	case INITOP_SET:
+		REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+		break;
+	case INITOP_CLEAR:
+		break;
+	}
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+{
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
+				  PXP2_REG_RQ_CDU_P_SIZE, initop);
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
+				  PXP2_REG_RQ_QM_P_SIZE, initop);
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
+				  PXP2_REG_RQ_SRC_P_SIZE, initop);
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
+				  PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC	16 /* E1 has 32, but only 16 are used */
+#define QM_INIT_MIN_CID_COUNT	31
+#define QM_INIT(cid_cnt)	(cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+				    u8 initop)
+{
+	int port = BP_PORT(bp);
+
+	if (QM_INIT(qm_cid_count)) {
+		switch (initop) {
+		case INITOP_INIT:
+			/* set in the init-value array */
+		case INITOP_SET:
+			REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
+			       qm_cid_count/16 - 1);
+			break;
+		case INITOP_CLEAR:
+			break;
+		}
+	}
+}
+
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
+				   u32 base_reg, u32 reg)
+{
+	int i;
+	u32 wb_data[2] = {0, 0};
+	for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+		REG_WR(bp, base_reg + i*4,
+		       qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+		bnx2x_init_wr_wb(bp, reg + i*8,	 wb_data, 2);
+	}
+}
+
+/* called during init common stage */
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+				    u8 initop)
+{
+	if (!QM_INIT(qm_cid_count))
+		return;
+
+	switch (initop) {
+	case INITOP_INIT:
+		/* set in the init-value array */
+	case INITOP_SET:
+		bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+				       QM_REG_BASEADDR, QM_REG_PTRTBL);
+		if (CHIP_IS_E1H(bp))
+			bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+					       QM_REG_BASEADDR_EXT_A,
+					       QM_REG_PTRTBL_EXT_A);
+		break;
+	case INITOP_CLEAR:
+		break;
+	}
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+/* called during init func stage */
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+			      dma_addr_t t2_mapping, int src_cid_count)
+{
+	int i;
+	int port = BP_PORT(bp);
+
+	/* Initialize T2 */
+	for (i = 0; i < src_cid_count-1; i++)
+		t2[i].next = (u64)(t2_mapping +
+			     (i+1)*sizeof(struct src_ent));
+
+	/* tell the searcher where the T2 table is */
+	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+	bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
+		    U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+	bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
+		    U64_LO((u64)t2_mapping +
+			   (src_cid_count-1) * sizeof(struct src_ent)),
+		    U64_HI((u64)t2_mapping +
+			   (src_cid_count-1) * sizeof(struct src_ent)));
+}
+#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
new file mode 100644
index 0000000..d946bba
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -0,0 +1,13946 @@
+/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u8 dev_addr, u16 addr, u8 byte_cnt,
+					     u8 *o_buf, u8);
+/********************************************************/
+#define ETH_HLEN			14
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD			(ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE		60
+#define ETH_MAX_PACKET_SIZE		1500
+#define ETH_MAX_JUMBO_PACKET_SIZE	9600
+#define MDIO_ACCESS_TIMEOUT		1000
+#define WC_LANE_MAX			4
+#define I2C_SWITCH_WIDTH		2
+#define I2C_BSC0			0
+#define I2C_BSC1			1
+#define I2C_WA_RETRY_CNT		3
+#define I2C_WA_PWR_ITER			(I2C_WA_RETRY_CNT - 1)
+#define MCPR_IMC_COMMAND_READ_OP	1
+#define MCPR_IMC_COMMAND_WRITE_OP	2
+
+/* LED Blink rate that will achieve ~15.9Hz */
+#define LED_BLINK_RATE_VAL_E3		354
+#define LED_BLINK_RATE_VAL_E1X_E2	480
+/***********************************************************/
+/*			Shortcut definitions		   */
+/***********************************************************/
+
+#define NIG_LATCH_BC_ENABLE_MI_INT 0
+
+#define NIG_STATUS_EMAC0_MI_INT \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
+#define NIG_STATUS_XGXS0_LINK10G \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+#define NIG_STATUS_XGXS0_LINK_STATUS \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+#define NIG_STATUS_SERDES0_LINK_STATUS \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+#define NIG_MASK_MI_INT \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+#define NIG_MASK_XGXS0_LINK10G \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+#define NIG_MASK_XGXS0_LINK_STATUS \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+#define NIG_MASK_SERDES0_LINK_STATUS \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+
+#define MDIO_AN_CL73_OR_37_COMPLETE \
+		(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+		 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+
+#define XGXS_RESET_BITS \
+	(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW |   \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ |      \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN |    \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define SERDES_RESET_BITS \
+	(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ |    \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN |  \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define AUTONEG_CL37		SHARED_HW_CFG_AN_ENABLE_CL37
+#define AUTONEG_CL73		SHARED_HW_CFG_AN_ENABLE_CL73
+#define AUTONEG_BAM		SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
+				SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define AUTONEG_SGMII_FIBER_AUTODET \
+				SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define AUTONEG_REMOTE_PHY	SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define GP_STATUS_SPEED_MASK \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define GP_STATUS_10M	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define GP_STATUS_100M	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define GP_STATUS_1G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define GP_STATUS_2_5G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define GP_STATUS_5G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define GP_STATUS_6G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define GP_STATUS_10G_HIG \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define GP_STATUS_10G_CX4 \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define GP_STATUS_10G_KX4 \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+#define	GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define	GP_STATUS_10G_XFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define	GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define	GP_STATUS_10G_SFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define	GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
+#define LINK_10THD		LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD		LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_100TXHD		LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define LINK_100T4		LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100TXFD		LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define LINK_1000THD		LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define LINK_1000TFD		LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define LINK_1000XFD		LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define LINK_2500THD		LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define LINK_2500TFD		LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define LINK_2500XFD		LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define LINK_10GTFD		LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD		LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_20GTFD		LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD		LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+#define LINK_UPDATE_MASK \
+			(LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
+			 LINK_STATUS_LINK_UP | \
+			 LINK_STATUS_PHYSICAL_LINK_FLAG | \
+			 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
+			 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
+			 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
+			 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
+			 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
+			 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+
+#define SFP_EEPROM_CON_TYPE_ADDR		0x2
+	#define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN	0x0
+	#define SFP_EEPROM_CON_TYPE_VAL_LC	0x7
+	#define SFP_EEPROM_CON_TYPE_VAL_COPPER	0x21
+	#define SFP_EEPROM_CON_TYPE_VAL_RJ45	0x22
+
+
+#define SFP_EEPROM_10G_COMP_CODE_ADDR		0x3
+	#define SFP_EEPROM_10G_COMP_CODE_SR_MASK	(1<<4)
+	#define SFP_EEPROM_10G_COMP_CODE_LR_MASK	(1<<5)
+	#define SFP_EEPROM_10G_COMP_CODE_LRM_MASK	(1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR		0x6
+	#define SFP_EEPROM_1G_COMP_CODE_SX	(1<<0)
+	#define SFP_EEPROM_1G_COMP_CODE_LX	(1<<1)
+	#define SFP_EEPROM_1G_COMP_CODE_CX	(1<<2)
+	#define SFP_EEPROM_1G_COMP_CODE_BASE_T	(1<<3)
+
+#define SFP_EEPROM_FC_TX_TECH_ADDR		0x8
+	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE  0x8
+
+#define SFP_EEPROM_OPTIONS_ADDR			0x40
+	#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+#define SFP_EEPROM_OPTIONS_SIZE			2
+
+#define EDC_MODE_LINEAR				0x0022
+#define EDC_MODE_LIMITING				0x0044
+#define EDC_MODE_PASSIVE_DAC			0x0055
+#define EDC_MODE_ACTIVE_DAC			0x0066
+
+/* ETS defines*/
+#define DCBX_INVALID_COS					(0xFF)
+
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND		(0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT		(0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS		(1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS			(2720)
+#define ETS_E3B0_PBF_MIN_W_VAL				(10000)
+
+#define MAX_PACKET_SIZE					(9700)
+#define MAX_KR_LINK_RETRY				4
+#define DEFAULT_TX_DRV_BRDCT		2
+#define DEFAULT_TX_DRV_IFIR		0
+#define DEFAULT_TX_DRV_POST2		3
+#define DEFAULT_TX_DRV_IPRE_DRIVER	6
+
+/**********************************************************/
+/*                     INTERFACE                          */
+/**********************************************************/
+
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+	bnx2x_cl45_write(_bp, _phy, \
+		(_phy)->def_md_devad, \
+		(_bank + (_addr & 0xf)), \
+		_val)
+
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+	bnx2x_cl45_read(_bp, _phy, \
+		(_phy)->def_md_devad, \
+		(_bank + (_addr & 0xf)), \
+		_val)
+
+static int bnx2x_check_half_open_conn(struct link_params *params,
+				      struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+				      struct link_params *params);
+
+static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
+{
+	u32 val = REG_RD(bp, reg);
+
+	val |= bits;
+	REG_WR(bp, reg, val);
+	return val;
+}
+
+static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
+{
+	u32 val = REG_RD(bp, reg);
+
+	val &= ~bits;
+	REG_WR(bp, reg, val);
+	return val;
+}
+
+/*
+ * bnx2x_check_lfa - This function checks if link reinitialization is required,
+ *                   or link flap can be avoided.
+ *
+ * @params:	link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ *         condition code.
+ */
+static int bnx2x_check_lfa(struct link_params *params)
+{
+	u32 link_status, cfg_idx, lfa_mask, cfg_size;
+	u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+	u32 saved_val, req_val, eee_status;
+	struct bnx2x *bp = params->bp;
+
+	additional_config =
+		REG_RD(bp, params->lfa_base +
+			   offsetof(struct shmem_lfa, additional_config));
+
+	/* NOTE: must be first condition checked -
+	* to verify DCC bit is cleared in any case!
+	*/
+	if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+		DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
+		REG_WR(bp, params->lfa_base +
+			   offsetof(struct shmem_lfa, additional_config),
+		       additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+		return LFA_DCC_LFA_DISABLED;
+	}
+
+	/* Verify that link is up */
+	link_status = REG_RD(bp, params->shmem_base +
+			     offsetof(struct shmem_region,
+				      port_mb[params->port].link_status));
+	if (!(link_status & LINK_STATUS_LINK_UP))
+		return LFA_LINK_DOWN;
+
+	/* if loaded after BOOT from SAN, don't flap the link in any case and
+	 * rely on link set by preboot driver
+	 */
+	if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN)
+		return 0;
+
+	/* Verify that loopback mode is not set */
+	if (params->loopback_mode)
+		return LFA_LOOPBACK_ENABLED;
+
+	/* Verify that MFW supports LFA */
+	if (!params->lfa_base)
+		return LFA_MFW_IS_TOO_OLD;
+
+	if (params->num_phys == 3) {
+		cfg_size = 2;
+		lfa_mask = 0xffffffff;
+	} else {
+		cfg_size = 1;
+		lfa_mask = 0xffff;
+	}
+
+	/* Compare Duplex */
+	saved_val = REG_RD(bp, params->lfa_base +
+			   offsetof(struct shmem_lfa, req_duplex));
+	req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+	if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+		DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
+			       (saved_val & lfa_mask), (req_val & lfa_mask));
+		return LFA_DUPLEX_MISMATCH;
+	}
+	/* Compare Flow Control */
+	saved_val = REG_RD(bp, params->lfa_base +
+			   offsetof(struct shmem_lfa, req_flow_ctrl));
+	req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+	if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+		DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
+			       (saved_val & lfa_mask), (req_val & lfa_mask));
+		return LFA_FLOW_CTRL_MISMATCH;
+	}
+	/* Compare Link Speed */
+	saved_val = REG_RD(bp, params->lfa_base +
+			   offsetof(struct shmem_lfa, req_line_speed));
+	req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+	if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+		DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
+			       (saved_val & lfa_mask), (req_val & lfa_mask));
+		return LFA_LINK_SPEED_MISMATCH;
+	}
+
+	for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+		cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
+					    offsetof(struct shmem_lfa,
+						     speed_cap_mask[cfg_idx]));
+
+		if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+			DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
+				       cur_speed_cap_mask,
+				       params->speed_cap_mask[cfg_idx]);
+			return LFA_SPEED_CAP_MISMATCH;
+		}
+	}
+
+	cur_req_fc_auto_adv =
+		REG_RD(bp, params->lfa_base +
+		       offsetof(struct shmem_lfa, additional_config)) &
+		REQ_FC_AUTO_ADV_MASK;
+
+	if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+		DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
+			       cur_req_fc_auto_adv, params->req_fc_auto_adv);
+		return LFA_FLOW_CTRL_MISMATCH;
+	}
+
+	eee_status = REG_RD(bp, params->shmem2_base +
+			    offsetof(struct shmem2_region,
+				     eee_status[params->port]));
+
+	if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+	     (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
+	    ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+	     (params->eee_mode & EEE_MODE_ADV_LPI))) {
+		DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
+			       eee_status);
+		return LFA_EEE_MISMATCH;
+	}
+
+	/* LFA conditions are met */
+	return 0;
+}
+/******************************************************************/
+/*			EPIO/GPIO section			  */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+	u32 epio_mask, gp_oenable;
+	*en = 0;
+	/* Sanity check */
+	if (epio_pin > 31) {
+		DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+		return;
+	}
+
+	epio_mask = 1 << epio_pin;
+	/* Set this EPIO to output */
+	gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+	REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+	*en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+	u32 epio_mask, gp_output, gp_oenable;
+
+	/* Sanity check */
+	if (epio_pin > 31) {
+		DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+		return;
+	}
+	DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+	epio_mask = 1 << epio_pin;
+	/* Set this EPIO to output */
+	gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+	if (en)
+		gp_output |= epio_mask;
+	else
+		gp_output &= ~epio_mask;
+
+	REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+	/* Set the value for this EPIO */
+	gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+	REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+	if (pin_cfg == PIN_CFG_NA)
+		return;
+	if (pin_cfg >= PIN_CFG_EPIO0) {
+		bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+	} else {
+		u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+		u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+		bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+	}
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+	if (pin_cfg == PIN_CFG_NA)
+		return -EINVAL;
+	if (pin_cfg >= PIN_CFG_EPIO0) {
+		bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+	} else {
+		u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+		u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+		*val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+	}
+	return 0;
+
+}
+/******************************************************************/
+/*				ETS section			  */
+/******************************************************************/
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
+{
+	/* ETS disabled configuration*/
+	struct bnx2x *bp = params->bp;
+
+	DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
+
+	/* mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+	 * 3bits client num.
+	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 * cos1-100     cos0-011     dbg1-010     dbg0-001     MCP-000
+	 */
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
+	 * COS0 entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2   bit1	  bit0
+	 * MCP and debug are strict
+	 */
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+	/* defines which entries (clients) are subjected to WFQ arbitration */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+	/* For strict priority entries defines the number of consecutive
+	 * slots for the highest priority.
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/* mapping between the CREDIT_WEIGHT registers and actual client
+	 * numbers
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+	/* ETS mode disable */
+	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+	/* If ETS mode is enabled (there is no strict priority) defines a WFQ
+	 * weight for COS0/COS1.
+	 */
+	REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+	REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+	REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+	REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+/******************************************************************************
+* Description:
+*	Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+	u32 min_w_val = 0;
+	/* Calculate min_w_val.*/
+	if (vars->link_up) {
+		if (vars->line_speed == SPEED_20000)
+			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+		else
+			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+	} else
+		min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+	/* If the link isn't up (static configuration for example ) The
+	 * link will be according to 20GBPS.
+	 */
+	return min_w_val;
+}
+/******************************************************************************
+* Description:
+*	Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+	const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+						MAX_PACKET_SIZE);
+	return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+*	Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+	const struct link_params *params,
+	const u32 min_w_val)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 credit_upper_bound =
+	    bnx2x_ets_get_credit_upper_bound(min_w_val);
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+		NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+	if (!port) {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+			credit_upper_bound);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+			credit_upper_bound);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+			credit_upper_bound);
+	}
+}
+/******************************************************************************
+* Description:
+*	Will return the NIG ETS registers to init values.Except
+*	credit_upper_bound.
+*	That isn't used in this configuration (No WFQ is enabled) and will be
+*	configured according to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+					const struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+	/* Mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+	 * reset value or init tool
+	 */
+	if (port) {
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+	} else {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+	}
+	/* For strict priority entries defines the number of consecutive
+	 * slots for the highest priority.
+	 */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/* Mapping between the CREDIT_WEIGHT registers and actual client
+	 * numbers
+	 */
+	if (port) {
+		/*Port 1 has 6 COS*/
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+	} else {
+		/*Port 0 has 9 COS*/
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+		       0x43210876);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+	}
+
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
+	 * COS0 entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2   bit1	  bit0
+	 * MCP and debug are strict
+	 */
+	if (port)
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+	else
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+	/* defines which entries (clients) are subjected to WFQ arbitration */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+	/* Please notice the register address are note continuous and a
+	 * for here is note appropriate.In 2 port mode port0 only COS0-5
+	 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+	 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+	 * are never used for WFQ
+	 */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+	if (!port) {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+	}
+
+	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+*	Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+	const struct link_params *params,
+	const u32 min_w_val)
+{
+	struct bnx2x *bp = params->bp;
+	const u32 credit_upper_bound =
+	    bnx2x_ets_get_credit_upper_bound(min_w_val);
+	const u8 port = params->port;
+	u32 base_upper_bound = 0;
+	u8 max_cos = 0;
+	u8 i = 0;
+	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+	 * port mode port1 has COS0-2 that can be used for WFQ.
+	 */
+	if (!port) {
+		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+	} else {
+		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+	}
+
+	for (i = 0; i < max_cos; i++)
+		REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+*	Will return the PBF ETS registers to init values.Except
+*	credit_upper_bound.
+*	That isn't used in this configuration (No WFQ is enabled) and will be
+*	configured according to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+	u8 i = 0;
+	u32 base_weight = 0;
+	u8 max_cos = 0;
+
+	/* Mapping between entry  priority to client number 0 - COS0
+	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+	 * TODO_ETS - Should be done by reset value or init tool
+	 */
+	if (port)
+		/*  0x688 (|011|0 10|00 1|000) */
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+	else
+		/*  (10 1|100 |011|0 10|00 1|000) */
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+	/* TODO_ETS - Should be done by reset value or init tool */
+	if (port)
+		/* 0x688 (|011|0 10|00 1|000)*/
+		REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+	else
+	/* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+	REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+		   PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+		   PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+	 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+	 */
+	if (!port) {
+		base_weight = PBF_REG_COS0_WEIGHT_P0;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+	} else {
+		base_weight = PBF_REG_COS0_WEIGHT_P1;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+	}
+
+	for (i = 0; i < max_cos; i++)
+		REG_WR(bp, base_weight + (0x4 * i), 0);
+
+	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+*	E3B0 disable will return basically the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+				   const struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (!CHIP_IS_E3B0(bp)) {
+		DP(NETIF_MSG_LINK,
+		   "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+		return -EINVAL;
+	}
+
+	bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+	bnx2x_ets_e3b0_pbf_disabled(params);
+
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Disable will return basically the values to init values.
+*
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+		      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+
+	if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+		bnx2x_ets_e2e3a0_disabled(params);
+	else if (CHIP_IS_E3B0(bp))
+		bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+	else {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+		return -EINVAL;
+	}
+
+	return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+*	Set the COS mappimg to SP and BW until this point all the COS are not
+*	set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+				  const struct bnx2x_ets_params *ets_params,
+				  const u8 cos_sp_bitmap,
+				  const u8 cos_bw_bitmap)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+	const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+	const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+	const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+	       NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+	       PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+	       NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+	       nig_cli_subject2wfq_bitmap);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+	       PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+	       pbf_cli_subject2wfq_bitmap);
+
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*	not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+				     const u8 cos_entry,
+				     const u32 min_w_val_nig,
+				     const u32 min_w_val_pbf,
+				     const u16 total_bw,
+				     const u8 bw,
+				     const u8 port)
+{
+	u32 nig_reg_adress_crd_weight = 0;
+	u32 pbf_reg_adress_crd_weight = 0;
+	/* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+	const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+	const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
+
+	switch (cos_entry) {
+	case 0:
+	    nig_reg_adress_crd_weight =
+		 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+		     NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+	     pbf_reg_adress_crd_weight = (port) ?
+		 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+	     break;
+	case 1:
+	     nig_reg_adress_crd_weight = (port) ?
+		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+	     pbf_reg_adress_crd_weight = (port) ?
+		 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+	     break;
+	case 2:
+	     nig_reg_adress_crd_weight = (port) ?
+		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+		 pbf_reg_adress_crd_weight = (port) ?
+		     PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+	     break;
+	case 3:
+	    if (port)
+			return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+	     pbf_reg_adress_crd_weight =
+		 PBF_REG_COS3_WEIGHT_P0;
+	     break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+	     pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+	     break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+	     pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+	     break;
+	}
+
+	REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+	REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+	return 0;
+}
+/******************************************************************************
+* Description:
+*	Calculate the total BW.A value of 0 isn't legal.
+*
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+	const struct link_params *params,
+	struct bnx2x_ets_params *ets_params,
+	u16 *total_bw)
+{
+	struct bnx2x *bp = params->bp;
+	u8 cos_idx = 0;
+	u8 is_bw_cos_exist = 0;
+
+	*total_bw = 0 ;
+	/* Calculate total BW requested */
+	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+		if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+			is_bw_cos_exist = 1;
+			if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+						   "was set to 0\n");
+				/* This is to prevent a state when ramrods
+				 * can't be sent
+				 */
+				ets_params->cos[cos_idx].params.bw_params.bw
+					 = 1;
+			}
+			*total_bw +=
+				ets_params->cos[cos_idx].params.bw_params.bw;
+		}
+	}
+
+	/* Check total BW is valid */
+	if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+		if (*total_bw == 0) {
+			DP(NETIF_MSG_LINK,
+			   "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
+			return -EINVAL;
+		}
+		DP(NETIF_MSG_LINK,
+		   "bnx2x_ets_E3B0_config total BW should be 100\n");
+		/* We can handle a case whre the BW isn't 100 this can happen
+		 * if the TC are joined.
+		 */
+	}
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Invalidate all the sp_pri_to_cos.
+*
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+	u8 pri = 0;
+	for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+		sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+*	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+*	according to sp_pri_to_cos.
+*
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+					    u8 *sp_pri_to_cos, const u8 pri,
+					    const u8 cos_entry)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+	if (pri >= max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+		   "parameter Illegal strict priority\n");
+	    return -EINVAL;
+	}
+
+	if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+				   "parameter There can't be two COS's with "
+				   "the same strict pri\n");
+		return -EINVAL;
+	}
+
+	sp_pri_to_cos[pri] = cos_entry;
+	return 0;
+
+}
+
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in
+*	the sp_pri_cli register.
+*
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+					 const u8 pri_set,
+					 const u8 pri_offset,
+					 const u8 entry_size)
+{
+	u64 pri_cli_nig = 0;
+	pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+						    (pri_set + pri_offset));
+
+	return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in the
+*	sp_pri_cli register for NIG.
+*
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
+	const u8 nig_cos_offset = 3;
+	const u8 nig_pri_offset = 3;
+
+	return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+		nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in the
+*	sp_pri_cli register for PBF.
+*
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+	const u8 pbf_cos_offset = 0;
+	const u8 pbf_pri_offset = 0;
+
+	return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+		pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+*	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+*	according to sp_pri_to_cos.(which COS has higher priority)
+*
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+					     u8 *sp_pri_to_cos)
+{
+	struct bnx2x *bp = params->bp;
+	u8 i = 0;
+	const u8 port = params->port;
+	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
+	u64 pri_cli_nig = 0x210;
+	u32 pri_cli_pbf = 0x0;
+	u8 pri_set = 0;
+	u8 pri_bitmask = 0;
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+	u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+	/* Set all the strict priority first */
+	for (i = 0; i < max_num_of_cos; i++) {
+		if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+			if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
+				DP(NETIF_MSG_LINK,
+					   "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+					   "invalid cos entry\n");
+				return -EINVAL;
+			}
+
+			pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+			    sp_pri_to_cos[i], pri_set);
+
+			pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+			    sp_pri_to_cos[i], pri_set);
+			pri_bitmask = 1 << sp_pri_to_cos[i];
+			/* COS is used remove it from bitmap.*/
+			if (!(pri_bitmask & cos_bit_to_set)) {
+				DP(NETIF_MSG_LINK,
+					"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+					"invalid There can't be two COS's with"
+					" the same strict pri\n");
+				return -EINVAL;
+			}
+			cos_bit_to_set &= ~pri_bitmask;
+			pri_set++;
+		}
+	}
+
+	/* Set all the Non strict priority i= COS*/
+	for (i = 0; i < max_num_of_cos; i++) {
+		pri_bitmask = 1 << i;
+		/* Check if COS was already used for SP */
+		if (pri_bitmask & cos_bit_to_set) {
+			/* COS wasn't used for SP */
+			pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+			    i, pri_set);
+
+			pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+			    i, pri_set);
+			/* COS is used remove it from bitmap.*/
+			cos_bit_to_set &= ~pri_bitmask;
+			pri_set++;
+		}
+	}
+
+	if (pri_set != max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+				   "entries were set\n");
+		return -EINVAL;
+	}
+
+	if (port) {
+		/* Only 6 usable clients*/
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+		       (u32)pri_cli_nig);
+
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+	} else {
+		/* Only 9 usable clients*/
+		const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+		const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+		       pri_cli_nig_lsb);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+		       pri_cli_nig_msb);
+
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+	}
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+			 const struct link_vars *vars,
+			 struct bnx2x_ets_params *ets_params)
+{
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+	const u8 port = params->port;
+	u16 total_bw = 0;
+	const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+	const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+	u8 cos_bw_bitmap = 0;
+	u8 cos_sp_bitmap = 0;
+	u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+	u8 cos_entry = 0;
+
+	if (!CHIP_IS_E3B0(bp)) {
+		DP(NETIF_MSG_LINK,
+		   "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+		return -EINVAL;
+	}
+
+	if ((ets_params->num_of_cos > max_num_of_cos)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+				   "isn't supported\n");
+		return -EINVAL;
+	}
+
+	/* Prepare sp strict priority parameters*/
+	bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+	/* Prepare BW parameters*/
+	bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+						   &total_bw);
+	if (bnx2x_status) {
+		DP(NETIF_MSG_LINK,
+		   "bnx2x_ets_E3B0_config get_total_bw failed\n");
+		return -EINVAL;
+	}
+
+	/* Upper bound is set according to current link speed (min_w_val
+	 * should be the same for upper bound and COS credit val).
+	 */
+	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+		if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+			cos_bw_bitmap |= (1 << cos_entry);
+			/* The function also sets the BW in HW(not the mappin
+			 * yet)
+			 */
+			bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+				bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+				total_bw,
+				ets_params->cos[cos_entry].params.bw_params.bw,
+				 port);
+		} else if (bnx2x_cos_state_strict ==
+			ets_params->cos[cos_entry].state){
+			cos_sp_bitmap |= (1 << cos_entry);
+
+			bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+				params,
+				sp_pri_to_cos,
+				ets_params->cos[cos_entry].params.sp_params.pri,
+				cos_entry);
+
+		} else {
+			DP(NETIF_MSG_LINK,
+			   "bnx2x_ets_e3b0_config cos state not valid\n");
+			return -EINVAL;
+		}
+		if (bnx2x_status) {
+			DP(NETIF_MSG_LINK,
+			   "bnx2x_ets_e3b0_config set cos bw failed\n");
+			return bnx2x_status;
+		}
+	}
+
+	/* Set SP register (which COS has higher priority) */
+	bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+							 sp_pri_to_cos);
+
+	if (bnx2x_status) {
+		DP(NETIF_MSG_LINK,
+		   "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
+		return bnx2x_status;
+	}
+
+	/* Set client mapping of BW and strict */
+	bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+					      cos_sp_bitmap,
+					      cos_bw_bitmap);
+
+	if (bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+		return bnx2x_status;
+	}
+	return 0;
+}
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+	/* ETS disabled configuration */
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+	/* Defines which entries (clients) are subjected to WFQ arbitration
+	 * COS0 0x8
+	 * COS1 0x10
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+	/* Mapping between the ARB_CREDIT_WEIGHT registers and actual
+	 * client numbers (WEIGHT_0 does not actually have to represent
+	 * client 0)
+	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+	/* ETS mode enabled*/
+	REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
+	 * entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2     bit1	   bit0
+	 * MCP and debug are strict
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+	REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+	REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+			const u32 cos1_bw)
+{
+	/* ETS disabled configuration*/
+	struct bnx2x *bp = params->bp;
+	const u32 total_bw = cos0_bw + cos1_bw;
+	u32 cos0_credit_weight = 0;
+	u32 cos1_credit_weight = 0;
+
+	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+	if ((!total_bw) ||
+	    (!cos0_bw) ||
+	    (!cos1_bw)) {
+		DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
+		return;
+	}
+
+	cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+		total_bw;
+	cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+		total_bw;
+
+	bnx2x_ets_bw_limit_common(params);
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+	REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+	REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+	/* ETS disabled configuration*/
+	struct bnx2x *bp = params->bp;
+	u32 val	= 0;
+
+	DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries,
+	 * 3 - COS0 entry, 4 - COS1 entry.
+	 *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+	 *  bit4   bit3	  bit2      bit1     bit0
+	 * MCP and debug are strict
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+	/* For strict priority entries defines the number of consecutive slots
+	 * for the highest priority.
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/* ETS mode disable */
+	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+	/* Mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+	 * 3bits client num.
+	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
+	 * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
+	 */
+	val = (!strict_cos) ? 0x2318 : 0x22E0;
+	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+	return 0;
+}
+
+/******************************************************************/
+/*			PFC section				  */
+/******************************************************************/
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+				  struct link_vars *vars,
+				  u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u32 xmac_base;
+	u32 pause_val, pfc0_val, pfc1_val;
+
+	/* XMAC base adrr */
+	xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	/* Initialize pause and pfc registers */
+	pause_val = 0x18000;
+	pfc0_val = 0xFFFF8000;
+	pfc1_val = 0x2;
+
+	/* No PFC support */
+	if (!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) {
+
+		/* RX flow control - Process pause frame in receive direction
+		 */
+		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+			pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+		/* TX flow control - Send pause packet when buffer is full */
+		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+			pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+	} else {/* PFC support */
+		pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+			XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+			XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+			XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
+			XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+		/* Write pause and PFC registers */
+		REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+		pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+
+	}
+
+	/* Write pause and PFC registers */
+	REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+	REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+	REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+
+	/* Set MAC address for source TX Pause/PFC frames */
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
+	       ((params->mac_addr[2] << 24) |
+		(params->mac_addr[3] << 16) |
+		(params->mac_addr[4] << 8) |
+		(params->mac_addr[5])));
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
+	       ((params->mac_addr[0] << 8) |
+		(params->mac_addr[1])));
+
+	udelay(30);
+}
+
+/******************************************************************/
+/*			MAC/PBF section				  */
+/******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
+			       u32 emac_base)
+{
+	u32 new_mode, cur_mode;
+	u32 clc_cnt;
+	/* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	 * (a value of 49==0x31) and make sure that the AUTO poll is off
+	 */
+	cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+
+	if (USES_WARPCORE(bp))
+		clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
+	else
+		clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
+
+	if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
+	    (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
+		return;
+
+	new_mode = cur_mode &
+		~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+	new_mode |= clc_cnt;
+	new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+
+	DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
+	   cur_mode, new_mode);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
+	udelay(40);
+}
+
+static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
+					struct link_params *params)
+{
+	u8 phy_index;
+	/* Set mdio clock per phy */
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++)
+		bnx2x_set_mdio_clk(bp, params->chip_id,
+				   params->phy[phy_index].mdio_ctrl);
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+	u32 port4mode_ovwr_val;
+	/* Check 4-port override enabled */
+	port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+	if (port4mode_ovwr_val & (1<<0)) {
+		/* Return 4-port mode override value */
+		return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+	}
+	/* Return 4-port mode from input pin */
+	return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+static void bnx2x_emac_init(struct link_params *params,
+			    struct link_vars *vars)
+{
+	/* reset and unreset the emac core */
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	u32 val;
+	u16 timeout;
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+	udelay(5);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+	/* init emac - use read-modify-write */
+	/* self clear reset */
+	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+	EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+
+	timeout = 200;
+	do {
+		val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+		DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+		if (!timeout) {
+			DP(NETIF_MSG_LINK, "EMAC timeout!\n");
+			return;
+		}
+		timeout--;
+	} while (val & EMAC_MODE_RESET);
+
+	bnx2x_set_mdio_emac_per_phy(bp, params);
+	/* Set mac address */
+	val = ((params->mac_addr[0] << 8) |
+		params->mac_addr[1]);
+	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
+
+	val = ((params->mac_addr[2] << 24) |
+	       (params->mac_addr[3] << 16) |
+	       (params->mac_addr[4] << 8) |
+		params->mac_addr[5]);
+	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
+}
+
+static void bnx2x_set_xumac_nig(struct link_params *params,
+				u16 tx_pause_en,
+				u8 enable)
+{
+	struct bnx2x *bp = params->bp;
+
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+	       enable);
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+	       enable);
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+	       NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
+{
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+	u32 val;
+	struct bnx2x *bp = params->bp;
+	if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
+		   (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
+		return;
+	val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
+	if (en)
+		val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+			UMAC_COMMAND_CONFIG_REG_RX_ENA);
+	else
+		val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+			 UMAC_COMMAND_CONFIG_REG_RX_ENA);
+	/* Disable RX and TX */
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
+			    struct link_vars *vars, u8 lb)
+{
+	u32 val;
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+	struct bnx2x *bp = params->bp;
+	/* Reset UMAC */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+	usleep_range(1000, 2000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+	DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+	/* This register opens the gate for the UMAC despite its name */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+	val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+		UMAC_COMMAND_CONFIG_REG_PAD_EN |
+		UMAC_COMMAND_CONFIG_REG_SW_RESET |
+		UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+	switch (vars->line_speed) {
+	case SPEED_10:
+		val |= (0<<2);
+		break;
+	case SPEED_100:
+		val |= (1<<2);
+		break;
+	case SPEED_1000:
+		val |= (2<<2);
+		break;
+	case SPEED_2500:
+		val |= (3<<2);
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+			       vars->line_speed);
+		break;
+	}
+	if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+	if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+		val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+	if (vars->duplex == DUPLEX_HALF)
+		val |= UMAC_COMMAND_CONFIG_REG_HD_ENA;
+
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+	udelay(50);
+
+	/* Configure UMAC for EEE */
+	if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+		DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
+		REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+		       UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+		REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+	} else {
+		REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+	}
+
+	/* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+	REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
+	       ((params->mac_addr[2] << 24) |
+		(params->mac_addr[3] << 16) |
+		(params->mac_addr[4] << 8) |
+		(params->mac_addr[5])));
+	REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
+	       ((params->mac_addr[0] << 8) |
+		(params->mac_addr[1])));
+
+	/* Enable RX and TX */
+	val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+	val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+		UMAC_COMMAND_CONFIG_REG_RX_ENA;
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+	udelay(50);
+
+	/* Remove SW Reset */
+	val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+	/* Check loopback mode */
+	if (lb)
+		val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+	/* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	 * length used by the MAC receive logic to check frames.
+	 */
+	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+	bnx2x_set_xumac_nig(params,
+			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+	vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
+{
+	struct bnx2x *bp = params->bp;
+	u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+	/* In 4-port mode, need to set the mode only once, so if XMAC is
+	 * already out of reset, it means the mode has already been set,
+	 * and it must not* reset the XMAC again, since it controls both
+	 * ports of the path
+	 */
+
+	if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
+	     (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
+	     (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
+	    is_port4mode &&
+	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	     MISC_REGISTERS_RESET_REG_2_XMAC)) {
+		DP(NETIF_MSG_LINK,
+		   "XMAC already out of reset in 4-port mode\n");
+		return;
+	}
+
+	/* Hard reset */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       MISC_REGISTERS_RESET_REG_2_XMAC);
+	usleep_range(1000, 2000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       MISC_REGISTERS_RESET_REG_2_XMAC);
+	if (is_port4mode) {
+		DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+		/* Set the number of ports on the system side to up to 2 */
+		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+		/* Set the number of ports on the Warp Core to 10G */
+		REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+	} else {
+		/* Set the number of ports on the system side to 1 */
+		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+		if (max_speed == SPEED_10000) {
+			DP(NETIF_MSG_LINK,
+			   "Init XMAC to 10G x 1 port per path\n");
+			/* Set the number of ports on the Warp Core to 10G */
+			REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+		} else {
+			DP(NETIF_MSG_LINK,
+			   "Init XMAC to 20G x 2 ports per path\n");
+			/* Set the number of ports on the Warp Core to 20G */
+			REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+		}
+	}
+	/* Soft reset */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+	usleep_range(1000, 2000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
+{
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+	u32 val;
+
+	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	    MISC_REGISTERS_RESET_REG_2_XMAC) {
+		/* Send an indication to change the state in the NIG back to XON
+		 * Clearing this bit enables the next set of this bit to get
+		 * rising edge
+		 */
+		pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+		       (pfc_ctrl & ~(1<<1)));
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+		       (pfc_ctrl | (1<<1)));
+		DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+		val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
+		if (en)
+			val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+		else
+			val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+		REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+	}
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+			     struct link_vars *vars, u8 lb)
+{
+	u32 val, xmac_base;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+	xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	bnx2x_xmac_init(params, vars->line_speed);
+
+	/* This register determines on which events the MAC will assert
+	 * error on the i/f to the NIG along w/ EOP.
+	 */
+
+	/* This register tells the NIG whether to send traffic to UMAC
+	 * or XMAC
+	 */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+	/* When XMAC is in XLGMII mode, disable sending idles for fault
+	 * detection.
+	 */
+	if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
+		REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
+		       (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
+			XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
+		REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+		REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+	}
+	/* Set Max packet size */
+	REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+	/* CRC append for Tx packets */
+	REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+	/* update PFC */
+	bnx2x_update_pfc_xmac(params, vars, 0);
+
+	if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+		DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+		REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+		REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+	} else {
+		REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+	}
+
+	/* Enable TX and RX */
+	val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+	/* Set MAC in XLGMII mode for dual-mode */
+	if ((vars->line_speed == SPEED_20000) &&
+	    (params->phy[INT_PHY].supported &
+	     SUPPORTED_20000baseKR2_Full))
+		val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
+
+	/* Check loopback mode */
+	if (lb)
+		val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+	bnx2x_set_xumac_nig(params,
+			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+	vars->mac_type = MAC_TYPE_XMAC;
+
+	return 0;
+}
+
+static int bnx2x_emac_enable(struct link_params *params,
+			     struct link_vars *vars, u8 lb)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "enabling EMAC\n");
+
+	/* Disable BMAC */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* enable emac and not bmac */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
+
+	/* ASIC */
+	if (vars->phy_flags & PHY_XGXS_FLAG) {
+		u32 ser_lane = ((params->lane_config &
+				 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+				PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+		DP(NETIF_MSG_LINK, "XGXS\n");
+		/* select the master lanes (out of 0-3) */
+		REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
+		/* select XGXS */
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+	} else { /* SerDes */
+		DP(NETIF_MSG_LINK, "SerDes\n");
+		/* select SerDes */
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+	}
+
+	bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+		      EMAC_RX_MODE_RESET);
+	bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+		      EMAC_TX_MODE_RESET);
+
+		/* pause enable/disable */
+		bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+			       EMAC_RX_MODE_FLOW_EN);
+
+		bnx2x_bits_dis(bp,  emac_base + EMAC_REG_EMAC_TX_MODE,
+			       (EMAC_TX_MODE_EXT_PAUSE_EN |
+				EMAC_TX_MODE_FLOW_EN));
+		if (!(params->feature_config_flags &
+		      FEATURE_CONFIG_PFC_ENABLED)) {
+			if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+				bnx2x_bits_en(bp, emac_base +
+					      EMAC_REG_EMAC_RX_MODE,
+					      EMAC_RX_MODE_FLOW_EN);
+
+			if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+				bnx2x_bits_en(bp, emac_base +
+					      EMAC_REG_EMAC_TX_MODE,
+					      (EMAC_TX_MODE_EXT_PAUSE_EN |
+					       EMAC_TX_MODE_FLOW_EN));
+		} else
+			bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+				      EMAC_TX_MODE_FLOW_EN);
+
+	/* KEEP_VLAN_TAG, promiscuous */
+	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+	/* Setting this bit causes MAC control frames (except for pause
+	 * frames) to be passed on for processing. This setting has no
+	 * affect on the operation of the pause frames. This bit effects
+	 * all packets regardless of RX Parser packet sorting logic.
+	 * Turn the PFC off to make sure we are in Xon state before
+	 * enabling it.
+	 */
+	EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+		DP(NETIF_MSG_LINK, "PFC is enabled\n");
+		/* Enable PFC again */
+		EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+			EMAC_REG_RX_PFC_MODE_RX_EN |
+			EMAC_REG_RX_PFC_MODE_TX_EN |
+			EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+		EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+			((0x0101 <<
+			  EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+			 (0x00ff <<
+			  EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+		val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+	}
+	EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
+
+	/* Set Loopback */
+	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+	if (lb)
+		val |= 0x810;
+	else
+		val &= ~0x810;
+	EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
+
+	/* Enable emac */
+	REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
+
+	/* Enable emac for jumbo packets */
+	EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
+		(EMAC_RX_MTU_SIZE_JUMBO_ENA |
+		 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
+
+	/* Strip CRC */
+	REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+
+	/* Disable the NIG in/out to the bmac */
+	REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
+
+	/* Enable the NIG in/out to the emac */
+	REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
+	val = 0;
+	if ((params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED) ||
+	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val = 1;
+
+	REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+
+	REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+
+	vars->mac_type = MAC_TYPE_EMAC;
+	return 0;
+}
+
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+				   struct link_vars *vars)
+{
+	u32 wb_data[2];
+	struct bnx2x *bp = params->bp;
+	u32 bmac_addr =  params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+		NIG_REG_INGRESS_BMAC0_MEM;
+
+	u32 val = 0x14;
+	if ((!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) &&
+		(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+		/* Enable BigMAC to react on received Pause packets */
+		val |= (1<<5);
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+	/* TX control */
+	val = 0xc0;
+	if (!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED) &&
+		(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val |= 0x800000;
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+				   struct link_vars *vars,
+				   u8 is_lb)
+{
+	/* Set rx control: Strip CRC and enable BigMAC to relay
+	 * control packets to the system as well
+	 */
+	u32 wb_data[2];
+	struct bnx2x *bp = params->bp;
+	u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+		NIG_REG_INGRESS_BMAC0_MEM;
+	u32 val = 0x14;
+
+	if ((!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) &&
+		(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+		/* Enable BigMAC to react on received Pause packets */
+		val |= (1<<5);
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
+	udelay(30);
+
+	/* Tx control */
+	val = 0xc0;
+	if (!(params->feature_config_flags &
+				FEATURE_CONFIG_PFC_ENABLED) &&
+	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val |= 0x800000;
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+		DP(NETIF_MSG_LINK, "PFC is enabled\n");
+		/* Enable PFC RX & TX & STATS and set 8 COS  */
+		wb_data[0] = 0x0;
+		wb_data[0] |= (1<<0);  /* RX */
+		wb_data[0] |= (1<<1);  /* TX */
+		wb_data[0] |= (1<<2);  /* Force initial Xon */
+		wb_data[0] |= (1<<3);  /* 8 cos */
+		wb_data[0] |= (1<<5);  /* STATS */
+		wb_data[1] = 0;
+		REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+			    wb_data, 2);
+		/* Clear the force Xon */
+		wb_data[0] &= ~(1<<2);
+	} else {
+		DP(NETIF_MSG_LINK, "PFC is disabled\n");
+		/* Disable PFC RX & TX & STATS and set 8 COS */
+		wb_data[0] = 0x8;
+		wb_data[1] = 0;
+	}
+
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+	/* Set Time (based unit is 512 bit time) between automatic
+	 * re-sending of PP packets amd enable automatic re-send of
+	 * Per-Priroity Packet as long as pp_gen is asserted and
+	 * pp_disable is low.
+	 */
+	val = 0x8000;
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		val |= (1<<16); /* enable automatic re-send */
+
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+		    wb_data, 2);
+
+	/* mac control */
+	val = 0x3; /* Enable RX and TX */
+	if (is_lb) {
+		val |= 0x4; /* Local loopback */
+		DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+	}
+	/* When PFC enabled, Pass pause frames towards the NIG. */
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		val |= ((1<<6)|(1<<5));
+
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+}
+
+/******************************************************************************
+* Description:
+*  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+					   u8 cos_entry,
+					   u32 priority_mask, u8 port)
+{
+	u32 nig_reg_rx_priority_mask_add = 0;
+
+	switch (cos_entry) {
+	case 0:
+	     nig_reg_rx_priority_mask_add = (port) ?
+		 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+		 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+	     break;
+	case 1:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+	    break;
+	case 2:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+	    break;
+	case 3:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+	    break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+	    break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+	    break;
+	}
+
+	REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+	return 0;
+}
+static void bnx2x_update_mng(struct link_params *params, u32 link_status)
+{
+	struct bnx2x *bp = params->bp;
+
+	REG_WR(bp, params->shmem_base +
+	       offsetof(struct shmem_region,
+			port_mb[params->port].link_status), link_status);
+}
+
+static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (SHMEM2_HAS(bp, link_attr_sync))
+		REG_WR(bp, params->shmem2_base +
+		       offsetof(struct shmem2_region,
+				link_attr_sync[params->port]), link_attr);
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+		struct link_vars *vars,
+		struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+	u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+	u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
+	u32 pkt_priority_to_cos = 0;
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+
+	int set_pfc = params->feature_config_flags &
+		FEATURE_CONFIG_PFC_ENABLED;
+	DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+	/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+	 * MAC control frames (that are not pause packets)
+	 * will be forwarded to the XCM.
+	 */
+	xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+			  NIG_REG_LLH0_XCM_MASK);
+	/* NIG params will override non PFC params, since it's possible to
+	 * do transition from PFC to SAFC
+	 */
+	if (set_pfc) {
+		pause_enable = 0;
+		llfc_out_en = 0;
+		llfc_enable = 0;
+		if (CHIP_IS_E3(bp))
+			ppp_enable = 0;
+		else
+			ppp_enable = 1;
+		xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+				     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+		xcm_out_en = 0;
+		hwpfc_enable = 1;
+	} else  {
+		if (nig_params) {
+			llfc_out_en = nig_params->llfc_out_en;
+			llfc_enable = nig_params->llfc_enable;
+			pause_enable = nig_params->pause_enable;
+		} else  /* Default non PFC mode - PAUSE */
+			pause_enable = 1;
+
+		xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+			NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+		xcm_out_en = 1;
+	}
+
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+		       NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
+	REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+	       NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+	REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+	       NIG_REG_LLFC_ENABLE_0, llfc_enable);
+	REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+	       NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+	REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+	       NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+	REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+	       NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+	REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+	       NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+	/* Output enable for RX_XCM # IF */
+	REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+	       NIG_REG_XCM0_OUT_EN, xcm_out_en);
+
+	/* HW PFC TX enable */
+	REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+	       NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
+
+	if (nig_params) {
+		u8 i = 0;
+		pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+		for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+			bnx2x_pfc_nig_rx_priority_mask(bp, i,
+		nig_params->rx_cos_priority_mask[i], port);
+
+		REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+		       NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+		       nig_params->llfc_high_priority_classes);
+
+		REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+		       NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+		       nig_params->llfc_low_priority_classes);
+	}
+	REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+	       NIG_REG_P0_PKT_PRIORITY_TO_COS,
+	       pkt_priority_to_cos);
+}
+
+int bnx2x_update_pfc(struct link_params *params,
+		      struct link_vars *vars,
+		      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+	/* The PFC and pause are orthogonal to one another, meaning when
+	 * PFC is enabled, the pause are disabled, and when PFC is
+	 * disabled, pause are set according to the pause result.
+	 */
+	u32 val;
+	struct bnx2x *bp = params->bp;
+	u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
+
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		vars->link_status |= LINK_STATUS_PFC_ENABLED;
+	else
+		vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* Update NIG params */
+	bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+	if (!vars->link_up)
+		return 0;
+
+	DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+
+	if (CHIP_IS_E3(bp)) {
+		if (vars->mac_type == MAC_TYPE_XMAC)
+			bnx2x_update_pfc_xmac(params, vars, 0);
+	} else {
+		val = REG_RD(bp, MISC_REG_RESET_REG_2);
+		if ((val &
+		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+		    == 0) {
+			DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+			bnx2x_emac_enable(params, vars, 0);
+			return 0;
+		}
+		if (CHIP_IS_E2(bp))
+			bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+		else
+			bnx2x_update_pfc_bmac1(params, vars);
+
+		val = 0;
+		if ((params->feature_config_flags &
+		     FEATURE_CONFIG_PFC_ENABLED) ||
+		    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+			val = 1;
+		REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+	}
+	return 0;
+}
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+			      struct link_vars *vars,
+			      u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+			       NIG_REG_INGRESS_BMAC0_MEM;
+	u32 wb_data[2];
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
+
+	/* XGXS control */
+	wb_data[0] = 0x3c;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+		    wb_data, 2);
+
+	/* TX MAC SA */
+	wb_data[0] = ((params->mac_addr[2] << 24) |
+		       (params->mac_addr[3] << 16) |
+		       (params->mac_addr[4] << 8) |
+			params->mac_addr[5]);
+	wb_data[1] = ((params->mac_addr[0] << 8) |
+			params->mac_addr[1]);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
+
+	/* MAC control */
+	val = 0x3;
+	if (is_lb) {
+		val |= 0x4;
+		DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+	}
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
+
+	/* Set rx mtu */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
+
+	bnx2x_update_pfc_bmac1(params, vars);
+
+	/* Set tx mtu */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
+
+	/* Set cnt max size */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+
+	/* Configure SAFC */
+	wb_data[0] = 0x1000200;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+		    wb_data, 2);
+
+	return 0;
+}
+
+static int bnx2x_bmac2_enable(struct link_params *params,
+			      struct link_vars *vars,
+			      u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+			       NIG_REG_INGRESS_BMAC0_MEM;
+	u32 wb_data[2];
+
+	DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
+
+	wb_data[0] = 0;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+	udelay(30);
+
+	/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+	wb_data[0] = 0x3c;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+		    wb_data, 2);
+
+	udelay(30);
+
+	/* TX MAC SA */
+	wb_data[0] = ((params->mac_addr[2] << 24) |
+		       (params->mac_addr[3] << 16) |
+		       (params->mac_addr[4] << 8) |
+			params->mac_addr[5]);
+	wb_data[1] = ((params->mac_addr[0] << 8) |
+			params->mac_addr[1]);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+		    wb_data, 2);
+
+	udelay(30);
+
+	/* Configure SAFC */
+	wb_data[0] = 0x1000200;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+		    wb_data, 2);
+	udelay(30);
+
+	/* Set RX MTU */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
+	udelay(30);
+
+	/* Set TX MTU */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
+	udelay(30);
+	/* Set cnt max size */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+	udelay(30);
+	bnx2x_update_pfc_bmac2(params, vars, is_lb);
+
+	return 0;
+}
+
+static int bnx2x_bmac_enable(struct link_params *params,
+			     struct link_vars *vars,
+			     u8 is_lb, u8 reset_bmac)
+{
+	int rc = 0;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 val;
+	/* Reset and unreset the BigMac */
+	if (reset_bmac) {
+		REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+		       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+		usleep_range(1000, 2000);
+	}
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* Enable access for bmac registers */
+	REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+
+	/* Enable BMAC according to BMAC type*/
+	if (CHIP_IS_E2(bp))
+		rc = bnx2x_bmac2_enable(params, vars, is_lb);
+	else
+		rc = bnx2x_bmac1_enable(params, vars, is_lb);
+	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+	REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+	val = 0;
+	if ((params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED) ||
+	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val = 1;
+	REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+	REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
+
+	vars->mac_type = MAC_TYPE_BMAC;
+	return rc;
+}
+
+static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
+{
+	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+			NIG_REG_INGRESS_BMAC0_MEM;
+	u32 wb_data[2];
+	u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+
+	if (CHIP_IS_E2(bp))
+		bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+	else
+		bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
+	/* Only if the bmac is out of reset */
+	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+			(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
+	    nig_bmac_enable) {
+		/* Clear Rx Enable bit in BMAC_CONTROL register */
+		REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
+		if (en)
+			wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
+		else
+			wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+		REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
+		usleep_range(1000, 2000);
+	}
+}
+
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+			    u32 line_speed)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 init_crd, crd;
+	u32 count = 1000;
+
+	/* Disable port */
+	REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+
+	/* Wait for init credit */
+	init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
+	crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+	DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
+
+	while ((init_crd != crd) && count) {
+		usleep_range(5000, 10000);
+		crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+		count--;
+	}
+	crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+	if (init_crd != crd) {
+		DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
+			  init_crd, crd);
+		return -EINVAL;
+	}
+
+	if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
+	    line_speed == SPEED_10 ||
+	    line_speed == SPEED_100 ||
+	    line_speed == SPEED_1000 ||
+	    line_speed == SPEED_2500) {
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
+		/* Update threshold */
+		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
+		/* Update init credit */
+		init_crd = 778;		/* (800-18-4) */
+
+	} else {
+		u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
+			      ETH_OVREHEAD)/16;
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+		/* Update threshold */
+		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
+		/* Update init credit */
+		switch (line_speed) {
+		case SPEED_10000:
+			init_crd = thresh + 553 - 22;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+				  line_speed);
+			return -EINVAL;
+		}
+	}
+	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
+	DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+		 line_speed, init_crd);
+
+	/* Probe the credit changes */
+	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
+	usleep_range(5000, 10000);
+	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
+
+	/* Enable port */
+	REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+	return 0;
+}
+
+/**
+ * bnx2x_get_emac_base - retrive emac base address
+ *
+ * @bp:			driver handle
+ * @mdc_mdio_access:	access type
+ * @port:		port id
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+			       u32 mdc_mdio_access, u8 port)
+{
+	u32 emac_base = 0;
+	switch (mdc_mdio_access) {
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+		if (REG_RD(bp, NIG_REG_PORT_SWAP))
+			emac_base = GRCBASE_EMAC1;
+		else
+			emac_base = GRCBASE_EMAC0;
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
+		if (REG_RD(bp, NIG_REG_PORT_SWAP))
+			emac_base = GRCBASE_EMAC0;
+		else
+			emac_base = GRCBASE_EMAC1;
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+		emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
+		emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+		break;
+	default:
+		break;
+	}
+	return emac_base;
+
+}
+
+/******************************************************************/
+/*			CL22 access functions			  */
+/******************************************************************/
+static int bnx2x_cl22_write(struct bnx2x *bp,
+				       struct bnx2x_phy *phy,
+				       u16 reg, u16 val)
+{
+	u32 tmp, mode;
+	u8 i;
+	int rc = 0;
+	/* Switch to CL22 */
+	mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+	       mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+	/* Address */
+	tmp = ((phy->addr << 21) | (reg << 16) | val |
+	       EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "write phy register failed\n");
+		rc = -EFAULT;
+	}
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+	return rc;
+}
+
+static int bnx2x_cl22_read(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u16 reg, u16 *ret_val)
+{
+	u32 val, mode;
+	u16 i;
+	int rc = 0;
+
+	/* Switch to CL22 */
+	mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+	       mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+	/* Address */
+	val = ((phy->addr << 21) | (reg << 16) |
+	       EMAC_MDIO_COMM_COMMAND_READ_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+			udelay(5);
+			break;
+		}
+	}
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "read phy register failed\n");
+
+		*ret_val = 0;
+		rc = -EFAULT;
+	}
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+	return rc;
+}
+
+/******************************************************************/
+/*			CL45 access functions			  */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+			   u8 devad, u16 reg, u16 *ret_val)
+{
+	u32 val;
+	u16 i;
+	int rc = 0;
+	u32 chip_id;
+	if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+		chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+			  ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+		bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+	}
+
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			      EMAC_MDIO_STATUS_10MB);
+	/* Address */
+	val = ((phy->addr << 21) | (devad << 16) | reg |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "read phy register failed\n");
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+		*ret_val = 0;
+		rc = -EFAULT;
+	} else {
+		/* Data */
+		val = ((phy->addr << 21) | (devad << 16) |
+		       EMAC_MDIO_COMM_COMMAND_READ_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			val = REG_RD(bp, phy->mdio_ctrl +
+				     EMAC_REG_EMAC_MDIO_COMM);
+			if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+				*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+				break;
+			}
+		}
+		if (val & EMAC_MDIO_COMM_START_BUSY) {
+			DP(NETIF_MSG_LINK, "read phy register failed\n");
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+			*ret_val = 0;
+			rc = -EFAULT;
+		}
+	}
+	/* Work around for E3 A0 */
+	if (phy->flags & FLAGS_MDC_MDIO_WA) {
+		phy->flags ^= FLAGS_DUMMY_READ;
+		if (phy->flags & FLAGS_DUMMY_READ) {
+			u16 temp_val;
+			bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+		}
+	}
+
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			       EMAC_MDIO_STATUS_10MB);
+	return rc;
+}
+
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+			    u8 devad, u16 reg, u16 val)
+{
+	u32 tmp;
+	u8 i;
+	int rc = 0;
+	u32 chip_id;
+	if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+		chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+			  ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+		bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+	}
+
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			      EMAC_MDIO_STATUS_10MB);
+
+	/* Address */
+	tmp = ((phy->addr << 21) | (devad << 16) | reg |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "write phy register failed\n");
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+		rc = -EFAULT;
+	} else {
+		/* Data */
+		tmp = ((phy->addr << 21) | (devad << 16) | val |
+		       EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			tmp = REG_RD(bp, phy->mdio_ctrl +
+				     EMAC_REG_EMAC_MDIO_COMM);
+			if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+				udelay(5);
+				break;
+			}
+		}
+		if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+			DP(NETIF_MSG_LINK, "write phy register failed\n");
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+			rc = -EFAULT;
+		}
+	}
+	/* Work around for E3 A0 */
+	if (phy->flags & FLAGS_MDC_MDIO_WA) {
+		phy->flags ^= FLAGS_DUMMY_READ;
+		if (phy->flags & FLAGS_DUMMY_READ) {
+			u16 temp_val;
+			bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+		}
+	}
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			       EMAC_MDIO_STATUS_10MB);
+	return rc;
+}
+
+/******************************************************************/
+/*			EEE section				   */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (REG_RD(bp, params->shmem2_base) <=
+		   offsetof(struct shmem2_region, eee_status[params->port]))
+		return 0;
+
+	return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+	switch (nvram_mode) {
+	case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+		*idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+		break;
+	case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+		*idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+		break;
+	case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+		*idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+		break;
+	default:
+		*idle_timer = 0;
+		break;
+	}
+
+	return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+	switch (idle_timer) {
+	case EEE_MODE_NVRAM_BALANCED_TIME:
+		*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+		break;
+	case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+		*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+		break;
+	case EEE_MODE_NVRAM_LATENCY_TIME:
+		*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+		break;
+	default:
+		*nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+		break;
+	}
+
+	return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+	u32 eee_mode, eee_idle;
+	struct bnx2x *bp = params->bp;
+
+	if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+		if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+			/* time value in eee_mode --> used directly*/
+			eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+		} else {
+			/* hsi value in eee_mode --> time */
+			if (bnx2x_eee_nvram_to_time(params->eee_mode &
+						    EEE_MODE_NVRAM_MASK,
+						    &eee_idle))
+				return 0;
+		}
+	} else {
+		/* hsi values in nvram --> time*/
+		eee_mode = ((REG_RD(bp, params->shmem_base +
+				    offsetof(struct shmem_region, dev_info.
+				    port_feature_config[params->port].
+				    eee_power_mode)) &
+			     PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+			    PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+		if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+			return 0;
+	}
+
+	return eee_idle;
+}
+
+static int bnx2x_eee_set_timers(struct link_params *params,
+				   struct link_vars *vars)
+{
+	u32 eee_idle = 0, eee_mode;
+	struct bnx2x *bp = params->bp;
+
+	eee_idle = bnx2x_eee_calc_timer(params);
+
+	if (eee_idle) {
+		REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+		       eee_idle);
+	} else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+		   (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+		   (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+		DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+		return -EINVAL;
+	}
+
+	vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+	if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+		/* eee_idle in 1u --> eee_status in 16u */
+		eee_idle >>= 4;
+		vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+				    SHMEM_EEE_TIME_OUTPUT_BIT;
+	} else {
+		if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+			return -EINVAL;
+		vars->eee_status |= eee_mode;
+	}
+
+	return 0;
+}
+
+static int bnx2x_eee_initial_config(struct link_params *params,
+				     struct link_vars *vars, u8 mode)
+{
+	vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+	/* Propagate params' bits --> vars (for migration exposure) */
+	if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+		vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+	else
+		vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+	if (params->eee_mode & EEE_MODE_ADV_LPI)
+		vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+	else
+		vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+	return bnx2x_eee_set_timers(params, vars);
+}
+
+static int bnx2x_eee_disable(struct bnx2x_phy *phy,
+				struct link_params *params,
+				struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+
+	/* Make Certain LPI is disabled */
+	REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+	vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+	return 0;
+}
+
+static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars, u8 modes)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0;
+
+	/* Mask events preventing LPI generation */
+	REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+	if (modes & SHMEM_EEE_10G_ADV) {
+		DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+		val |= 0x8;
+	}
+	if (modes & SHMEM_EEE_1G_ADV) {
+		DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
+		val |= 0x4;
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+	vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+	vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+	return 0;
+}
+
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (bnx2x_eee_has_cap(params))
+		REG_WR(bp, params->shmem2_base +
+		       offsetof(struct shmem2_region,
+				eee_status[params->port]), eee_status);
+}
+
+static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 adv = 0, lp = 0;
+	u32 lp_adv = 0;
+	u8 neg = 0;
+
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+	if (lp & 0x2) {
+		lp_adv |= SHMEM_EEE_100M_ADV;
+		if (adv & 0x2) {
+			if (vars->line_speed == SPEED_100)
+				neg = 1;
+			DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
+		}
+	}
+	if (lp & 0x14) {
+		lp_adv |= SHMEM_EEE_1G_ADV;
+		if (adv & 0x14) {
+			if (vars->line_speed == SPEED_1000)
+				neg = 1;
+			DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
+		}
+	}
+	if (lp & 0x68) {
+		lp_adv |= SHMEM_EEE_10G_ADV;
+		if (adv & 0x68) {
+			if (vars->line_speed == SPEED_10000)
+				neg = 1;
+			DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
+		}
+	}
+
+	vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+	vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+	if (neg) {
+		DP(NETIF_MSG_LINK, "EEE is active\n");
+		vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+	}
+
+}
+
+/******************************************************************/
+/*			BSC access functions from E3	          */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+	int idx;
+	u32 board_cfg, sfp_ctrl;
+	u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	/* Read I2C output PINs */
+	board_cfg = REG_RD(bp, params->shmem_base +
+			   offsetof(struct shmem_region,
+				    dev_info.shared_hw_config.board));
+	i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+	i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+			SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+	/* Read I2C output value */
+	sfp_ctrl = REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+	i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+	i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+	DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+	for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+		bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+			  struct bnx2x *bp,
+			  u8 sl_devid,
+			  u16 sl_addr,
+			  u8 lc_addr,
+			  u8 xfer_cnt,
+			  u32 *data_array)
+{
+	u32 val, i;
+	int rc = 0;
+
+	if (xfer_cnt > 16) {
+		DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+					xfer_cnt);
+		return -EINVAL;
+	}
+	bnx2x_bsc_module_sel(params);
+
+	xfer_cnt = 16 - lc_addr;
+
+	/* Enable the engine */
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	val |= MCPR_IMC_COMMAND_ENABLE;
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* Program slave device ID */
+	val = (sl_devid << 16) | sl_addr;
+	REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+	/* Start xfer with 0 byte to update the address pointer ???*/
+	val = (MCPR_IMC_COMMAND_ENABLE) |
+	      (MCPR_IMC_COMMAND_WRITE_OP <<
+		MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+		(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* Poll for completion */
+	i = 0;
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+		udelay(10);
+		val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+		if (i++ > 1000) {
+			DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
+								i);
+			rc = -EFAULT;
+			break;
+		}
+	}
+	if (rc == -EFAULT)
+		return rc;
+
+	/* Start xfer with read op */
+	val = (MCPR_IMC_COMMAND_ENABLE) |
+		(MCPR_IMC_COMMAND_READ_OP <<
+		MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+		(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+		  (xfer_cnt);
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* Poll for completion */
+	i = 0;
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+		udelay(10);
+		val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+		if (i++ > 1000) {
+			DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+			rc = -EFAULT;
+			break;
+		}
+	}
+	if (rc == -EFAULT)
+		return rc;
+
+	for (i = (lc_addr >> 2); i < 4; i++) {
+		data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+		data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+				((data_array[i] & 0x0000ff00) << 8) |
+				((data_array[i] & 0x00ff0000) >> 8) |
+				((data_array[i] & 0xff000000) >> 24);
+#endif
+	}
+	return rc;
+}
+
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+				     u8 devad, u16 reg, u16 or_val)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy, devad, reg, &val);
+	bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 devad, u16 reg, u16 and_val)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy, devad, reg, &val);
+	bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+		   u8 devad, u16 reg, u16 *ret_val)
+{
+	u8 phy_index;
+	/* Probe for the phy according to the given phy_addr, and execute
+	 * the read request on it
+	 */
+	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+		if (params->phy[phy_index].addr == phy_addr) {
+			return bnx2x_cl45_read(params->bp,
+					       &params->phy[phy_index], devad,
+					       reg, ret_val);
+		}
+	}
+	return -EINVAL;
+}
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+		    u8 devad, u16 reg, u16 val)
+{
+	u8 phy_index;
+	/* Probe for the phy according to the given phy_addr, and execute
+	 * the write request on it
+	 */
+	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+		if (params->phy[phy_index].addr == phy_addr) {
+			return bnx2x_cl45_write(params->bp,
+						&params->phy[phy_index], devad,
+						reg, val);
+		}
+	}
+	return -EINVAL;
+}
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	u8 lane = 0;
+	struct bnx2x *bp = params->bp;
+	u32 path_swap, path_swap_ovr;
+	u8 path, port;
+
+	path = BP_PATH(bp);
+	port = params->port;
+
+	if (bnx2x_is_4_port_mode(bp)) {
+		u32 port_swap, port_swap_ovr;
+
+		/* Figure out path swap value */
+		path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+		if (path_swap_ovr & 0x1)
+			path_swap = (path_swap_ovr & 0x2);
+		else
+			path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+		if (path_swap)
+			path = path ^ 1;
+
+		/* Figure out port swap value */
+		port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+		if (port_swap_ovr & 0x1)
+			port_swap = (port_swap_ovr & 0x2);
+		else
+			port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+		if (port_swap)
+			port = port ^ 1;
+
+		lane = (port<<1) + path;
+	} else { /* Two port mode - no port swap */
+
+		/* Figure out path swap value */
+		path_swap_ovr =
+			REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+		if (path_swap_ovr & 0x1) {
+			path_swap = (path_swap_ovr & 0x2);
+		} else {
+			path_swap =
+				REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+		}
+		if (path_swap)
+			path = path ^ 1;
+
+		lane = path << 1 ;
+	}
+	return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+			      struct bnx2x_phy *phy)
+{
+	u32 ser_lane;
+	u16 offset, aer_val;
+	struct bnx2x *bp = params->bp;
+	ser_lane = ((params->lane_config &
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+	offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+		(phy->addr + ser_lane) : 0;
+
+	if (USES_WARPCORE(bp)) {
+		aer_val = bnx2x_get_warpcore_lane(phy, params);
+		/* In Dual-lane mode, two lanes are joined together,
+		 * so in order to configure them, the AER broadcast method is
+		 * used here.
+		 * 0x200 is the broadcast address for lanes 0,1
+		 * 0x201 is the broadcast address for lanes 2,3
+		 */
+		if (phy->flags & FLAGS_WC_DUAL_MODE)
+			aer_val = (aer_val >> 1) | 0x200;
+	} else if (CHIP_IS_E2(bp))
+		aer_val = 0x3800 + offset - 1;
+	else
+		aer_val = 0x3800 + offset;
+
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, aer_val);
+
+}
+
+/******************************************************************/
+/*			Internal phy section			  */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+	u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	/* Set Clause 22 */
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+	udelay(500);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+	udelay(500);
+	 /* Set Clause 45 */
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+	val = SERDES_RESET_BITS << (port*16);
+
+	/* Reset and unreset the SerDes/XGXS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+	udelay(500);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+	bnx2x_set_serdes_access(bp, port);
+
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+	       DEFAULT_PHY_DEV_ADDR);
+}
+
+static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     u32 action)
+{
+	struct bnx2x *bp = params->bp;
+	switch (action) {
+	case PHY_INIT:
+		/* Set correct devad */
+		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
+		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
+		       phy->def_md_devad);
+		break;
+	}
+}
+
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u32 val;
+	DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+	port = params->port;
+
+	val = XGXS_RESET_BITS << (port*16);
+
+	/* Reset and unreset the SerDes/XGXS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+	udelay(500);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+	bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
+				 PHY_INIT);
+}
+
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+				     struct link_params *params, u16 *ieee_fc)
+{
+	struct bnx2x *bp = params->bp;
+	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+	/* Resolve pause mode and advertisement Please refer to Table
+	 * 28B-3 of the 802.3ab-1999 spec
+	 */
+
+	switch (phy->req_flow_ctrl) {
+	case BNX2X_FLOW_CTRL_AUTO:
+		switch (params->req_fc_auto_adv) {
+		case BNX2X_FLOW_CTRL_BOTH:
+		case BNX2X_FLOW_CTRL_RX:
+			*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+			break;
+		case BNX2X_FLOW_CTRL_TX:
+			*ieee_fc |=
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+			break;
+		default:
+			break;
+		}
+		break;
+	case BNX2X_FLOW_CTRL_TX:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		break;
+
+	case BNX2X_FLOW_CTRL_RX:
+	case BNX2X_FLOW_CTRL_BOTH:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		break;
+
+	case BNX2X_FLOW_CTRL_NONE:
+	default:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+		break;
+	}
+	DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+			 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 actual_phy_idx, phy_index, link_cfg_idx;
+	u8 phy_config_swapped = params->multi_phy_config &
+			PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++) {
+		link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+		actual_phy_idx = phy_index;
+		if (phy_config_swapped) {
+			if (phy_index == EXT_PHY1)
+				actual_phy_idx = EXT_PHY2;
+			else if (phy_index == EXT_PHY2)
+				actual_phy_idx = EXT_PHY1;
+		}
+		params->phy[actual_phy_idx].req_flow_ctrl =
+			params->req_flow_ctrl[link_cfg_idx];
+
+		params->phy[actual_phy_idx].req_line_speed =
+			params->req_line_speed[link_cfg_idx];
+
+		params->phy[actual_phy_idx].speed_cap_mask =
+			params->speed_cap_mask[link_cfg_idx];
+
+		params->phy[actual_phy_idx].req_duplex =
+			params->req_duplex[link_cfg_idx];
+
+		if (params->req_line_speed[link_cfg_idx] ==
+		    SPEED_AUTO_NEG)
+			vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+		DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+			   " speed_cap_mask %x\n",
+			   params->phy[actual_phy_idx].req_flow_ctrl,
+			   params->phy[actual_phy_idx].req_line_speed,
+			   params->phy[actual_phy_idx].speed_cap_mask);
+	}
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+				    struct bnx2x_phy *phy,
+				    struct link_vars *vars)
+{
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* Read modify write pause advertizing */
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+	val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+		val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+		val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+	}
+	DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct bnx2x_phy *phy,
+				struct link_params *params,
+				struct link_vars *vars,
+				u32 pause_result)
+{
+	struct bnx2x *bp = params->bp;
+						/*  LD	    LP	 */
+	switch (pause_result) {			/* ASYM P ASYM P */
+	case 0xb:				/*   1  0   1  1 */
+		DP(NETIF_MSG_LINK, "Flow Control: TX only\n");
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+		break;
+
+	case 0xe:				/*   1  1   1  0 */
+		DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+		break;
+
+	case 0x5:				/*   0  1   0  1 */
+	case 0x7:				/*   0  1   1  1 */
+	case 0xd:				/*   1  1   0  1 */
+	case 0xf:				/*   1  1   1  1 */
+		/* If the user selected to advertise RX ONLY,
+		 * although we advertised both, need to enable
+		 * RX only.
+		 */
+		if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
+			DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n");
+			vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+		} else {
+			DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
+			vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+		}
+		break;
+
+	default:
+		DP(NETIF_MSG_LINK, "Flow Control: None\n");
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		break;
+	}
+	if (pause_result & (1<<0))
+		vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+	if (pause_result & (1<<1))
+		vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+
+}
+
+static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
+					struct link_params *params,
+					struct link_vars *vars)
+{
+	u16 ld_pause;		/* local */
+	u16 lp_pause;		/* link partner */
+	u16 pause_result;
+	struct bnx2x *bp = params->bp;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+		bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
+		bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
+	} else if (CHIP_IS_E3(bp) &&
+		SINGLE_MEDIA_DIRECT(params)) {
+		u8 lane = bnx2x_get_warpcore_lane(phy, params);
+		u16 gp_status, gp_mask;
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
+				&gp_status);
+		gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
+			   MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
+			lane;
+		if ((gp_status & gp_mask) == gp_mask) {
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+		} else {
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+			ld_pause = ((ld_pause &
+				     MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+				    << 3);
+			lp_pause = ((lp_pause &
+				     MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+				    << 3);
+		}
+	} else {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+	}
+	pause_result = (ld_pause &
+			MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+	pause_result |= (lp_pause &
+			 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+	DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
+	bnx2x_pause_resolve(phy, params, vars, pause_result);
+
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	u8 ret = 0;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+		/* Update the advertised flow-controled of LD/LP in AN */
+		if (phy->req_line_speed == SPEED_AUTO_NEG)
+			bnx2x_ext_phy_update_adv_fc(phy, params, vars);
+		/* But set the flow-control result as the requested one */
+		vars->flow_ctrl = phy->req_flow_ctrl;
+	} else if (phy->req_line_speed != SPEED_AUTO_NEG)
+		vars->flow_ctrl = params->req_fc_auto_adv;
+	else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+		ret = 1;
+		bnx2x_ext_phy_update_adv_fc(phy, params, vars);
+	}
+	return ret;
+}
+/******************************************************************/
+/*			Warpcore section			  */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \
+	((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+	 (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+	 (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \
+	 (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+	((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+	 (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+	 (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
+static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
+					 struct link_params *params,
+					 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 i;
+	static struct bnx2x_reg_set reg_set[] = {
+		/* Step 1 - Program the TX/RX alignment markers */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
+		/* Step 2 - Configure the NP registers */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
+	};
+	DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
+
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+
+	/* Start KR2 work-around timer which handles BCM8073 link-parner */
+	params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+	bnx2x_update_link_attr(params, params->link_attr_sync);
+}
+
+static void bnx2x_disable_kr2(struct link_params *params,
+			      struct link_vars *vars,
+			      struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	int i;
+	static struct bnx2x_reg_set reg_set[] = {
+		/* Step 1 - Program the TX/RX alignment markers */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+	};
+	DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+	params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+	bnx2x_update_link_attr(params, params->link_attr_sync);
+
+	vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
+static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
+					       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+
+	DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
+static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	/* Restart autoneg on the leading lane only */
+	struct bnx2x *bp = params->bp;
+	u16 lane = bnx2x_get_warpcore_lane(phy, params);
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, lane);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+	/* Restore AER */
+	bnx2x_set_aer_mmd(params, phy);
+}
+
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+					struct link_params *params,
+					struct link_vars *vars) {
+	u16 lane, i, cl72_ctrl, an_adv = 0, val;
+	u32 wc_lane_config;
+	struct bnx2x *bp = params->bp;
+	static struct bnx2x_reg_set reg_set[] = {
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+		{MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+		/* Disable Autoneg: re-enable it after adv is done. */
+		{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
+		{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
+	};
+	DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+	/* Set to default registers that may be overriden by 10G force */
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+	cl72_ctrl &= 0x08ff;
+	cl72_ctrl |= 0x3800;
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+
+	/* Check adding advertisement for 1G KX */
+	if (((vars->line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (vars->line_speed == SPEED_1000)) {
+		u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+		an_adv |= (1<<5);
+
+		/* Enable CL37 1G Parallel Detect */
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
+		DP(NETIF_MSG_LINK, "Advertize 1G\n");
+	}
+	if (((vars->line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+	    (vars->line_speed ==  SPEED_10000)) {
+		/* Check adding advertisement for 10G KR */
+		an_adv |= (1<<7);
+		/* Enable 10G Parallel Detect */
+		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+				  MDIO_AER_BLOCK_AER_REG, 0);
+
+		bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+				 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+		bnx2x_set_aer_mmd(params, phy);
+		DP(NETIF_MSG_LINK, "Advertize 10G\n");
+	}
+
+	/* Set Transmit PMD settings */
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+			 WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
+	/* Configure the next lane if dual mode */
+	if (phy->flags & FLAGS_WC_DUAL_MODE)
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
+				 WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+			 0x03f0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+			 0x03f0);
+
+	/* Advertised speeds */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
+
+	/* Advertised and set FEC (Forward Error Correction) */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+			 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+			  MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
+	/* Enable CL37 BAM */
+	if (REG_RD(bp, params->shmem_base +
+		   offsetof(struct shmem_region, dev_info.
+			    port_hw_config[params->port].default_cfg)) &
+	    PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+					 1);
+		DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+	}
+
+	/* Advertise pause */
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+	vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
+
+	/* Over 1G - AN local device user page 1 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+	    (phy->req_line_speed == SPEED_20000)) {
+
+		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+				  MDIO_AER_BLOCK_AER_REG, lane);
+
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
+					 (1<<11));
+
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
+		bnx2x_set_aer_mmd(params, phy);
+
+		bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+	} else {
+		/* Enable Auto-Detect to support 1G over CL37 as well */
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+		wc_lane_config = REG_RD(bp, params->shmem_base +
+					offsetof(struct shmem_region, dev_info.
+					shared_hw_config.wc_lane_config));
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
+		/* Force cl48 sync_status LOW to avoid getting stuck in CL73
+		 * parallel-detect loop when CL73 and CL37 are enabled.
+		 */
+		val |= 1 << 11;
+
+		/* Restore Polarity settings in case it was run over by
+		 * previous link owner
+		 */
+		if (wc_lane_config &
+		    (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+			val |= 3 << 2;
+		else
+			val &= ~(3 << 2);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+				 val);
+
+		bnx2x_disable_kr2(params, vars, phy);
+	}
+
+	/* Enable Autoneg: only on the main lane */
+	bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16, i, lane;
+	static struct bnx2x_reg_set reg_set[] = {
+		/* Disable Autoneg */
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			0x3f00},
+		{MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+		{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+		/* Leave cl72 training enable, needed for KR */
+		{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	/* Global registers */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+	/* Disable CL36 PCS Tx */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+	val16 &= ~(0x0011 << lane);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+	val16 |= (0x0303 << (lane << 1));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+	/* Restore AER */
+	bnx2x_set_aer_mmd(params, phy);
+	/* Set speed via PMA/PMD register */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+	/* Enable encoded forced speed */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+	/* Turn TX scramble payload only the 64/66 scrambler */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+	/* Turn RX scramble payload only the 64/66 scrambler */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+	/* Set and clear loopback to cause a reset to 64/66 decoder */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       u8 is_xfi)
+{
+	struct bnx2x *bp = params->bp;
+	u16 misc1_val, tap_val, tx_driver_val, lane, val;
+	u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+	u32 ifir_val, ipost2_val, ipre_driver_val;
+
+	/* Hold rxSeqStart */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
+
+	/* Hold tx_fifo_reset */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
+
+	/* Disable CL73 AN */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+	/* Disable 100FX Enable and Auto-Detect */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
+
+	/* Disable 100FX Idle detect */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_FX100_CTRL3, 0x0080);
+
+	/* Set Block address to Remote PHY & Clear forced_speed[5] */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
+
+	/* Turn off auto-detect & fiber mode */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+				  0xFFEE);
+
+	/* Set filter_force_link, disable_false_link and parallel_detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			 ((val | 0x0006) & 0xFFFE));
+
+	/* Set XFI / SFI */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+	misc1_val &= ~(0x1f);
+
+	if (is_xfi) {
+		misc1_val |= 0x5;
+		tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+		tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0);
+	} else {
+		cfg_tap_val = REG_RD(bp, params->shmem_base +
+				     offsetof(struct shmem_region, dev_info.
+					      port_hw_config[params->port].
+					      sfi_tap_values));
+
+		tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+		misc1_val |= 0x9;
+
+		/* TAP values are controlled by nvram, if value there isn't 0 */
+		if (tx_equal)
+			tap_val = (u16)tx_equal;
+		else
+			tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+		ifir_val = DEFAULT_TX_DRV_IFIR;
+		ipost2_val = DEFAULT_TX_DRV_POST2;
+		ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER;
+		tx_drv_brdct = DEFAULT_TX_DRV_BRDCT;
+
+		/* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all
+		 * configuration.
+		 */
+		if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK |
+				   PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK |
+				   PORT_HW_CFG_TX_DRV_POST2_MASK)) {
+			ifir_val = (cfg_tap_val &
+				    PORT_HW_CFG_TX_DRV_IFIR_MASK) >>
+				PORT_HW_CFG_TX_DRV_IFIR_SHIFT;
+			ipre_driver_val = (cfg_tap_val &
+					   PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK)
+			>> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT;
+			ipost2_val = (cfg_tap_val &
+				      PORT_HW_CFG_TX_DRV_POST2_MASK) >>
+				PORT_HW_CFG_TX_DRV_POST2_SHIFT;
+		}
+
+		if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) {
+			tx_drv_brdct = (cfg_tap_val &
+					PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+				PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+		}
+
+		tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct,
+					     ipre_driver_val, ifir_val);
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+	/* Set Transmit PMD settings */
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP,
+			 tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+			 tx_driver_val);
+
+	/* Enable fiber mode, enable and invert sig_det */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
+
+	/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+	bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
+	/* 10G XFI Full Duplex */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+	/* Release tx_fifo_reset */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+				  0xFFFE);
+	/* Release rxSeqStart */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
+}
+
+static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
+					     struct link_params *params)
+{
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* Set global registers, so set AER lane to 0 */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+
+	/* Disable sequencer */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
+
+	bnx2x_set_aer_mmd(params, phy);
+
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
+				  MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_AN_REG_CTRL, 0);
+	/* Turn off CL73 */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_CL73_USERB0_CTRL, &val);
+	val &= ~(1<<5);
+	val |= (1<<6);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL73_USERB0_CTRL, val);
+
+	/* Set 20G KR2 force speed */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
+
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
+	val &= ~(3<<14);
+	val |= (1<<15);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
+
+	/* Enable sequencer (over lane 0) */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
+
+	bnx2x_set_aer_mmd(params, phy);
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+					 struct bnx2x_phy *phy,
+					 u16 lane)
+{
+	/* Rx0 anaRxControl1G */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+	/* Rx2 anaRxControl1G */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+	/* Serdes Digital Misc1 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+	/* Serdes Digital4 Misc3 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+	/* Set Transmit PMD settings */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP,
+			 (WC_TX_FIR(0x12, 0x2d, 0x00) |
+			  MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+			 WC_TX_DRIVER(0x02, 0x02, 0x02, 0));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+					   struct link_params *params,
+					   u8 fiber_mode,
+					   u8 always_autoneg)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16, digctrl_kx1, digctrl_kx2;
+
+	/* Clear XFI clock comp in non-10G single lane mode. */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
+
+	bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
+	if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
+		/* SGMII Autoneg */
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+					 0x1000);
+		DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		val16 &= 0xcebf;
+		switch (phy->req_line_speed) {
+		case SPEED_10:
+			break;
+		case SPEED_100:
+			val16 |= 0x2000;
+			break;
+		case SPEED_1000:
+			val16 |= 0x0040;
+			break;
+		default:
+			DP(NETIF_MSG_LINK,
+			   "Speed not supported: 0x%x\n", phy->req_line_speed);
+			return;
+		}
+
+		if (phy->req_duplex == DUPLEX_FULL)
+			val16 |= 0x0100;
+
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+		DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+			       phy->req_line_speed);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		DP(NETIF_MSG_LINK, "  (readback) %x\n", val16);
+	}
+
+	/* SGMII Slave mode and disable signal detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+	if (fiber_mode)
+		digctrl_kx1 = 1;
+	else
+		digctrl_kx1 &= 0xff4a;
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			digctrl_kx1);
+
+	/* Turn off parallel detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			(digctrl_kx2 & ~(1<<2)));
+
+	/* Re-enable parallel detect */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			(digctrl_kx2 | (1<<2)));
+
+	/* Enable autodet */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			(digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 reset)
+{
+	u16 val;
+	/* Take lane out of reset after configuration is finished */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL5_MISC6, &val);
+	if (reset)
+		val |= 0xC000;
+	else
+		val &= 0x3FFF;
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC6, val);
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+/* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      u16 lane)
+{
+	struct bnx2x *bp = params->bp;
+	u16 i;
+	static struct bnx2x_reg_set wc_regs[] = {
+		{MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			0x0195},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			0x0007},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+			0x0002},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+		{MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+	};
+	/* Set XFI clock comp as default. */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+	for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
+		bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+				 wc_regs[i].val);
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+						u32 chip_id,
+						u32 shmem_base, u8 port,
+						u8 *gpio_num, u8 *gpio_port)
+{
+	u32 cfg_pin;
+	*gpio_num = 0;
+	*gpio_port = 0;
+	if (CHIP_IS_E3(bp)) {
+		cfg_pin = (REG_RD(bp, shmem_base +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+				PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+				PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+		/* Should not happen. This function called upon interrupt
+		 * triggered by GPIO ( since EPIO can only generate interrupts
+		 * to MCP).
+		 * So if this function was called and none of the GPIOs was set,
+		 * it means the shit hit the fan.
+		 */
+		if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+		    (cfg_pin > PIN_CFG_GPIO3_P1)) {
+			DP(NETIF_MSG_LINK,
+			   "No cfg pin %x for module detect indication\n",
+			   cfg_pin);
+			return -EINVAL;
+		}
+
+		*gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+		*gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+	} else {
+		*gpio_num = MISC_REGISTERS_GPIO_3;
+		*gpio_port = port;
+	}
+
+	return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_num, gpio_port;
+	u32 gpio_val;
+	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+				      params->shmem_base, params->port,
+				      &gpio_num, &gpio_port) != 0)
+		return 0;
+	gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+	/* Call the handling function in case module is detected */
+	if (gpio_val == 0)
+		return 1;
+	else
+		return 0;
+}
+static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
+				     struct link_params *params)
+{
+	u16 gp2_status_reg0, lane;
+	struct bnx2x *bp = params->bp;
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
+				 &gp2_status_reg0);
+
+	return (gp2_status_reg0 >> (8+lane)) & 0x1;
+}
+
+static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
+					  struct link_params *params,
+					  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 serdes_net_if;
+	u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
+
+	vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
+
+	if (!vars->turn_to_run_wc_rt)
+		return;
+
+	if (vars->rx_tx_asic_rst) {
+		u16 lane = bnx2x_get_warpcore_lane(phy, params);
+		serdes_net_if = (REG_RD(bp, params->shmem_base +
+				offsetof(struct shmem_region, dev_info.
+				port_hw_config[params->port].default_cfg)) &
+				PORT_HW_CFG_NET_SERDES_IF_MASK);
+
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			/* Do we get link yet? */
+			bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
+					&gp_status1);
+			lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
+				/*10G KR*/
+			lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
+
+			if (lnkup_kr || lnkup) {
+				vars->rx_tx_asic_rst = 0;
+			} else {
+				/* Reset the lane to see if link comes up.*/
+				bnx2x_warpcore_reset_lane(bp, phy, 1);
+				bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+				/* Restart Autoneg */
+				bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+					MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+				vars->rx_tx_asic_rst--;
+				DP(NETIF_MSG_LINK, "0x%x retry left\n",
+				vars->rx_tx_asic_rst);
+			}
+			break;
+
+		default:
+			break;
+		}
+
+	} /*params->rx_tx_asic_rst*/
+
+}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	u16 lane = bnx2x_get_warpcore_lane(phy, params);
+	struct bnx2x *bp = params->bp;
+	bnx2x_warpcore_clear_regs(phy, params, lane);
+	if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+	     SPEED_10000) &&
+	    (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+		DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+		bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+	} else {
+		DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+		bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+	}
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+					 struct bnx2x_phy *phy,
+					 u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+
+	cfg_pin = REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region,
+				  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+		PORT_HW_CFG_E3_TX_LASER_MASK;
+	/* Set the !tx_en since this pin is DISABLE_TX_LASER */
+	DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+
+	/* For 20G, the expected pin to be used is 3 pins after the current */
+	bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+		bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 serdes_net_if;
+	u8 fiber_mode;
+	u16 lane = bnx2x_get_warpcore_lane(phy, params);
+	serdes_net_if = (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+			 PORT_HW_CFG_NET_SERDES_IF_MASK);
+	DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+			   "serdes_net_if = 0x%x\n",
+		       vars->line_speed, serdes_net_if);
+	bnx2x_set_aer_mmd(params, phy);
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+	vars->phy_flags |= PHY_XGXS_FLAG;
+	if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+	    (phy->req_line_speed &&
+	     ((phy->req_line_speed == SPEED_100) ||
+	      (phy->req_line_speed == SPEED_10)))) {
+		vars->phy_flags |= PHY_SGMII_FLAG;
+		DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+		bnx2x_warpcore_clear_regs(phy, params, lane);
+		bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
+	} else {
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			/* Enable KR Auto Neg */
+			if (params->loopback_mode != LOOPBACK_EXT)
+				bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+			else {
+				DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+				bnx2x_warpcore_set_10G_KR(phy, params, vars);
+			}
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_XFI:
+			bnx2x_warpcore_clear_regs(phy, params, lane);
+			if (vars->line_speed == SPEED_10000) {
+				DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+				bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+			} else {
+				if (SINGLE_MEDIA_DIRECT(params)) {
+					DP(NETIF_MSG_LINK, "1G Fiber\n");
+					fiber_mode = 1;
+				} else {
+					DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+					fiber_mode = 0;
+				}
+				bnx2x_warpcore_set_sgmii_speed(phy,
+								params,
+								fiber_mode,
+								0);
+			}
+
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_SFI:
+			/* Issue Module detection if module is plugged, or
+			 * enabled transmitter to avoid current leakage in case
+			 * no module is connected
+			 */
+			if ((params->loopback_mode == LOOPBACK_NONE) ||
+			    (params->loopback_mode == LOOPBACK_EXT)) {
+				if (bnx2x_is_sfp_module_plugged(phy, params))
+					bnx2x_sfp_module_detection(phy, params);
+				else
+					bnx2x_sfp_e3_set_transmitter(params,
+								     phy, 1);
+			}
+
+			bnx2x_warpcore_config_sfi(phy, params);
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+			if (vars->line_speed != SPEED_20000) {
+				DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+				return;
+			}
+			DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+			bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+			/* Issue Module detection */
+
+			bnx2x_sfp_module_detection(phy, params);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR2:
+			if (!params->loopback_mode) {
+				bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+			} else {
+				DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
+				bnx2x_warpcore_set_20G_force_KR2(phy, params);
+			}
+			break;
+		default:
+			DP(NETIF_MSG_LINK,
+			   "Unsupported Serdes Net Interface 0x%x\n",
+			   serdes_net_if);
+			return;
+		}
+	}
+
+	/* Take lane out of reset after configuration is finished */
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+	DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16, lane;
+	bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+	bnx2x_set_mdio_emac_per_phy(bp, params);
+	bnx2x_set_aer_mmd(params, phy);
+	/* Global register */
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+	/* Clear loopback settings (if any) */
+	/* 10G & 20G */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
+
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
+
+	/* Update those 1-copy registers */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+	/* Enable 1G MDIO (1-copy) */
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+				  ~0x10);
+
+	bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+				  MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	/* Disable CL36 PCS Tx */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+	val16 |= (0x11 << lane);
+	if (phy->flags & FLAGS_WC_DUAL_MODE)
+		val16 |= (0x22 << lane);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+	val16 &= ~(0x0303 << (lane << 1));
+	val16 |= (0x0101 << (lane << 1));
+	if (phy->flags & FLAGS_WC_DUAL_MODE) {
+		val16 &= ~(0x0c0c << (lane << 1));
+		val16 |= (0x0404 << (lane << 1));
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+	/* Restore AER */
+	bnx2x_set_aer_mmd(params, phy);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+	u32 lane;
+	DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+		       params->loopback_mode, phy->req_line_speed);
+
+	if (phy->req_line_speed < SPEED_10000 ||
+	    phy->supported & SUPPORTED_20000baseKR2_Full) {
+		/* 10/100/1000/20G-KR2 */
+
+		/* Update those 1-copy registers */
+		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+				  MDIO_AER_BLOCK_AER_REG, 0);
+		/* Enable 1G MDIO (1-copy) */
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+					 0x10);
+		/* Set 1G loopback based on lane (1-copy) */
+		lane = bnx2x_get_warpcore_lane(phy, params);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+		val16 |= (1<<lane);
+		if (phy->flags & FLAGS_WC_DUAL_MODE)
+			val16 |= (2<<lane);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+				 val16);
+
+		/* Switch back to 4-copy registers */
+		bnx2x_set_aer_mmd(params, phy);
+	} else {
+		/* 10G / 20G-DXGXS */
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+					 0x4000);
+		bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+					 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
+	}
+}
+
+
+
+static void bnx2x_sync_link(struct link_params *params,
+			     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_10g_plus;
+	if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+		vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+	vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+	if (vars->link_up) {
+		DP(NETIF_MSG_LINK, "phy link up\n");
+
+		vars->phy_link_up = 1;
+		vars->duplex = DUPLEX_FULL;
+		switch (vars->link_status &
+			LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+		case LINK_10THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_10TFD:
+			vars->line_speed = SPEED_10;
+			break;
+
+		case LINK_100TXHD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_100T4:
+		case LINK_100TXFD:
+			vars->line_speed = SPEED_100;
+			break;
+
+		case LINK_1000THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_1000TFD:
+			vars->line_speed = SPEED_1000;
+			break;
+
+		case LINK_2500THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_2500TFD:
+			vars->line_speed = SPEED_2500;
+			break;
+
+		case LINK_10GTFD:
+			vars->line_speed = SPEED_10000;
+			break;
+		case LINK_20GTFD:
+			vars->line_speed = SPEED_20000;
+			break;
+		default:
+			break;
+		}
+		vars->flow_ctrl = 0;
+		if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+			vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+		if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+			vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+		if (!vars->flow_ctrl)
+			vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+		if (vars->line_speed &&
+		    ((vars->line_speed == SPEED_10) ||
+		     (vars->line_speed == SPEED_100))) {
+			vars->phy_flags |= PHY_SGMII_FLAG;
+		} else {
+			vars->phy_flags &= ~PHY_SGMII_FLAG;
+		}
+		if (vars->line_speed &&
+		    USES_WARPCORE(bp) &&
+		    (vars->line_speed == SPEED_1000))
+			vars->phy_flags |= PHY_SGMII_FLAG;
+		/* Anything 10 and over uses the bmac */
+		link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+		if (link_10g_plus) {
+			if (USES_WARPCORE(bp))
+				vars->mac_type = MAC_TYPE_XMAC;
+			else
+				vars->mac_type = MAC_TYPE_BMAC;
+		} else {
+			if (USES_WARPCORE(bp))
+				vars->mac_type = MAC_TYPE_UMAC;
+			else
+				vars->mac_type = MAC_TYPE_EMAC;
+		}
+	} else { /* Link down */
+		DP(NETIF_MSG_LINK, "phy link down\n");
+
+		vars->phy_link_up = 0;
+
+		vars->line_speed = 0;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+		/* Indicate no mac active */
+		vars->mac_type = MAC_TYPE_NONE;
+		if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+			vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+		if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+			vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
+	}
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 sync_offset, media_types;
+	/* Update PHY configuration */
+	set_phy_vars(params, vars);
+
+	vars->link_status = REG_RD(bp, params->shmem_base +
+				   offsetof(struct shmem_region,
+					    port_mb[port].link_status));
+
+	/* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+	if (params->loopback_mode != LOOPBACK_NONE &&
+	    params->loopback_mode != LOOPBACK_EXT)
+		vars->link_status |= LINK_STATUS_LINK_UP;
+
+	if (bnx2x_eee_has_cap(params))
+		vars->eee_status = REG_RD(bp, params->shmem2_base +
+					  offsetof(struct shmem2_region,
+						   eee_status[params->port]));
+
+	vars->phy_flags = PHY_XGXS_FLAG;
+	bnx2x_sync_link(params, vars);
+	/* Sync media type */
+	sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].media_type);
+	media_types = REG_RD(bp, sync_offset);
+
+	params->phy[INT_PHY].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+	params->phy[EXT_PHY1].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+	params->phy[EXT_PHY2].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+	DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+	/* Sync AEU offset */
+	sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].aeu_int_mask);
+
+	vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+	/* Sync PFC status */
+	if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+		params->feature_config_flags |=
+					FEATURE_CONFIG_PFC_ENABLED;
+	else
+		params->feature_config_flags &=
+					~FEATURE_CONFIG_PFC_ENABLED;
+
+	if (SHMEM2_HAS(bp, link_attr_sync))
+		params->link_attr_sync = SHMEM2_RD(bp,
+						 link_attr_sync[params->port]);
+
+	DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
+		 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+	DP(NETIF_MSG_LINK, "line_speed %x  duplex %x  flow_ctrl 0x%x\n",
+		 vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+static void bnx2x_set_master_ln(struct link_params *params,
+				struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	u16 new_master_ln, ser_lane;
+	ser_lane = ((params->lane_config &
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+		    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+	/* Set the master_ln for AN */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_XGXS_BLOCK2,
+			  MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  &new_master_ln);
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_XGXS_BLOCK2 ,
+			  MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  (new_master_ln | ser_lane));
+}
+
+static int bnx2x_reset_unicore(struct link_params *params,
+			       struct bnx2x_phy *phy,
+			       u8 set_serdes)
+{
+	struct bnx2x *bp = params->bp;
+	u16 mii_control;
+	u16 i;
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+	/* Reset the unicore */
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL,
+			  (mii_control |
+			   MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+	if (set_serdes)
+		bnx2x_set_serdes_access(bp, params->port);
+
+	/* Wait for the reset to self clear */
+	for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+		udelay(5);
+
+		/* The reset erased the previous bank value */
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+
+		if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+			udelay(5);
+			return 0;
+		}
+	}
+
+	netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+			      " Port %d\n",
+			 params->port);
+	DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
+	return -EINVAL;
+
+}
+
+static void bnx2x_set_swap_lanes(struct link_params *params,
+				 struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	/* Each two bits represents a lane number:
+	 * No swap is 0123 => 0x1b no need to enable the swap
+	 */
+	u16 rx_lane_swap, tx_lane_swap;
+
+	rx_lane_swap = ((params->lane_config &
+			 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+			PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+	tx_lane_swap = ((params->lane_config &
+			 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+			PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+
+	if (rx_lane_swap != 0x1b) {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+				  (rx_lane_swap |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+	} else {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+	}
+
+	if (tx_lane_swap != 0x1b) {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+				  (tx_lane_swap |
+				   MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+	} else {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+	}
+}
+
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 control2;
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  &control2);
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+		control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	else
+		control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+		phy->speed_cap_mask, control2);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  control2);
+
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+	     (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+		DP(NETIF_MSG_LINK, "XGXS\n");
+
+		CL22_WR_OVER_CL45(bp, phy,
+				 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  &control2);
+
+
+		control2 |=
+		    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  control2);
+
+		/* Disable parallel detection of HiG */
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+	}
+}
+
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+			      struct link_params *params,
+			      struct link_vars *vars,
+			      u8 enable_cl73)
+{
+	struct bnx2x *bp = params->bp;
+	u16 reg_val;
+
+	/* CL37 Autoneg */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+
+	/* CL37 Autoneg Enabled */
+	if (vars->line_speed == SPEED_AUTO_NEG)
+		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+	else /* CL37 Autoneg Disabled */
+		reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+			     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+	/* Enable/Disable Autodetection */
+
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+	reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
+		    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+	reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
+	if (vars->line_speed == SPEED_AUTO_NEG)
+		reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+	else
+		reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+	/* Enable TetonII and BAM autoneg */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_BAM_NEXT_PAGE,
+			  MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			  &reg_val);
+	if (vars->line_speed == SPEED_AUTO_NEG) {
+		/* Enable BAM aneg Mode and TetonII aneg Mode */
+		reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+			    MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+	} else {
+		/* TetonII and BAM Autoneg Disabled */
+		reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+			     MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+	}
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_BAM_NEXT_PAGE,
+			  MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			  reg_val);
+
+	if (enable_cl73) {
+		/* Enable Cl73 FSM status bits */
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_USERB0,
+				  MDIO_CL73_USERB0_CL73_UCTRL,
+				  0xe);
+
+		/* Enable BAM Station Manager*/
+		CL22_WR_OVER_CL45(bp, phy,
+			MDIO_REG_BANK_CL73_USERB0,
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+
+		/* Advertise CL73 link speeds */
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV2,
+				  &reg_val);
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV2,
+				  reg_val);
+
+		/* CL73 Autoneg Enabled */
+		reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+
+	} else /* CL73 Autoneg Disabled */
+		reg_val = 0;
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB0,
+			  MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* Program SerDes, forced speed */
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 reg_val;
+
+	/* Program duplex, disable autoneg and sgmii*/
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+	reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+		     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+		     MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
+	if (phy->req_duplex == DUPLEX_FULL)
+		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+	/* Program speed
+	 *  - needed only if the speed is greater than 1G (2.5G or 10G)
+	 */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+	/* Clearing the speed value before setting the right speed */
+	DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+	reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+		     MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+	if (!((vars->line_speed == SPEED_1000) ||
+	      (vars->line_speed == SPEED_100) ||
+	      (vars->line_speed == SPEED_10))) {
+
+		reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+			    MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+		if (vars->line_speed == SPEED_10000)
+			reg_val |=
+				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+	}
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_MISC1, reg_val);
+
+}
+
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+					      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0;
+
+	/* Set extended capabilities */
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+		val |= MDIO_OVER_1G_UP1_2_5G;
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+		val |= MDIO_OVER_1G_UP1_10G;
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_UP1, val);
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_UP3, 0x400);
+}
+
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+					      struct link_params *params,
+					      u16 ieee_fc)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	/* For AN, we are always publishing full duplex */
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB1,
+			  MDIO_CL73_IEEEB1_AN_ADV1, &val);
+	val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+	val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB1,
+			  MDIO_CL73_IEEEB1_AN_ADV1, val);
+}
+
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  u8 enable_cl73)
+{
+	struct bnx2x *bp = params->bp;
+	u16 mii_control;
+
+	DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
+	/* Enable and restart BAM/CL37 aneg */
+
+	if (enable_cl73) {
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  &mii_control);
+
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  (mii_control |
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+	} else {
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		DP(NETIF_MSG_LINK,
+			 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
+			 mii_control);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  (mii_control |
+				   MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+				   MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+	}
+}
+
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+					   struct link_params *params,
+					   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 control1;
+
+	/* In SGMII mode, the unicore is always slave */
+
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  &control1);
+	control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+	/* Set sgmii mode (and not fiber) */
+	control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  control1);
+
+	/* If forced speed */
+	if (!(vars->line_speed == SPEED_AUTO_NEG)) {
+		/* Set speed, disable autoneg */
+		u16 mii_control;
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+				 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
+				 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+		switch (vars->line_speed) {
+		case SPEED_100:
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+			break;
+		case SPEED_1000:
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+			break;
+		case SPEED_10:
+			/* There is nothing to set for 10M */
+			break;
+		default:
+			/* Invalid speed for SGMII */
+			DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+				  vars->line_speed);
+			break;
+		}
+
+		/* Setting the full duplex */
+		if (phy->req_duplex == DUPLEX_FULL)
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  mii_control);
+
+	} else { /* AN mode */
+		/* Enable and restart AN */
+		bnx2x_restart_autoneg(phy, params, 0);
+	}
+}
+
+/* Link management
+ */
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+					     struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 pd_10g, status2_1000x;
+	if (phy->req_line_speed != SPEED_AUTO_NEG)
+		return 0;
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			  &status2_1000x);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			  &status2_1000x);
+	if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+		DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+			  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+			  &pd_10g);
+
+	if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+		DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+	return 0;
+}
+
+static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
+				struct link_params *params,
+				struct link_vars *vars,
+				u32 gp_status)
+{
+	u16 ld_pause;   /* local driver */
+	u16 lp_pause;   /* link partner */
+	u16 pause_result;
+	struct bnx2x *bp = params->bp;
+	if ((gp_status &
+	     (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+	      MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+	    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+	     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV1,
+				  &ld_pause);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_LP_ADV1,
+				  &lp_pause);
+		pause_result = (ld_pause &
+				MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
+		pause_result |= (lp_pause &
+				 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
+		DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result);
+	} else {
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+				  &ld_pause);
+		CL22_RD_OVER_CL45(bp, phy,
+			MDIO_REG_BANK_COMBO_IEEE0,
+			MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+			&lp_pause);
+		pause_result = (ld_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+		pause_result |= (lp_pause &
+				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+		DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
+	}
+	bnx2x_pause_resolve(phy, params, vars, pause_result);
+
+}
+
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+				    struct link_params *params,
+				    struct link_vars *vars,
+				    u32 gp_status)
+{
+	struct bnx2x *bp = params->bp;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+	/* Resolve from gp_status in case of AN complete and not sgmii */
+	if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+		/* Update the advertised flow-controled of LD/LP in AN */
+		if (phy->req_line_speed == SPEED_AUTO_NEG)
+			bnx2x_update_adv_fc(phy, params, vars, gp_status);
+		/* But set the flow-control result as the requested one */
+		vars->flow_ctrl = phy->req_flow_ctrl;
+	} else if (phy->req_line_speed != SPEED_AUTO_NEG)
+		vars->flow_ctrl = params->req_fc_auto_adv;
+	else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+		 (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+		if (bnx2x_direct_parallel_detect_used(phy, params)) {
+			vars->flow_ctrl = params->req_fc_auto_adv;
+			return;
+		}
+		bnx2x_update_adv_fc(phy, params, vars, gp_status);
+	}
+	DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
+}
+
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 rx_status, ustat_val, cl37_fsm_received;
+	DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
+	/* Step 1: Make sure signal is detected */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_RX0,
+			  MDIO_RX0_RX_STATUS,
+			  &rx_status);
+	if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
+	    (MDIO_RX0_RX_STATUS_SIGDET)) {
+		DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
+			     "rx_status(0x80b0) = 0x%x\n", rx_status);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+		return;
+	}
+	/* Step 2: Check CL73 state machine */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_USERB0,
+			  MDIO_CL73_USERB0_CL73_USTAT1,
+			  &ustat_val);
+	if ((ustat_val &
+	     (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+	      MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
+	    (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+	      MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+		DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
+			     "ustat_val(0x8371) = 0x%x\n", ustat_val);
+		return;
+	}
+	/* Step 3: Check CL37 Message Pages received to indicate LP
+	 * supports only CL37
+	 */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_REMOTE_PHY,
+			  MDIO_REMOTE_PHY_MISC_RX_STATUS,
+			  &cl37_fsm_received);
+	if ((cl37_fsm_received &
+	     (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+	     MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+	    (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+	      MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+		DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
+			     "misc_rx_status(0x8330) = 0x%x\n",
+			 cl37_fsm_received);
+		return;
+	}
+	/* The combined cl37/cl73 fsm state information indicating that
+	 * we are connected to a device which does not support cl73, but
+	 * does support cl37 BAM. In this case we disable cl73 and
+	 * restart cl37 auto-neg
+	 */
+
+	/* Disable CL73 */
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB0,
+			  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+			  0);
+	/* Restart CL37 autoneg */
+	bnx2x_restart_autoneg(phy, params, 0);
+	DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
+}
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars,
+				  u32 gp_status)
+{
+	if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+		vars->link_status |=
+			LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+	if (bnx2x_direct_parallel_detect_used(phy, params))
+		vars->link_status |=
+			LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				      struct link_vars *vars,
+				      u16 is_link_up,
+				      u16 speed_mask,
+				      u16 is_duplex)
+{
+	struct bnx2x *bp = params->bp;
+	if (phy->req_line_speed == SPEED_AUTO_NEG)
+		vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+	if (is_link_up) {
+		DP(NETIF_MSG_LINK, "phy link up\n");
+
+		vars->phy_link_up = 1;
+		vars->link_status |= LINK_STATUS_LINK_UP;
+
+		switch (speed_mask) {
+		case GP_STATUS_10M:
+			vars->line_speed = SPEED_10;
+			if (is_duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_10TFD;
+			else
+				vars->link_status |= LINK_10THD;
+			break;
+
+		case GP_STATUS_100M:
+			vars->line_speed = SPEED_100;
+			if (is_duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_100TXFD;
+			else
+				vars->link_status |= LINK_100TXHD;
+			break;
+
+		case GP_STATUS_1G:
+		case GP_STATUS_1G_KX:
+			vars->line_speed = SPEED_1000;
+			if (is_duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_1000TFD;
+			else
+				vars->link_status |= LINK_1000THD;
+			break;
+
+		case GP_STATUS_2_5G:
+			vars->line_speed = SPEED_2500;
+			if (is_duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_2500TFD;
+			else
+				vars->link_status |= LINK_2500THD;
+			break;
+
+		case GP_STATUS_5G:
+		case GP_STATUS_6G:
+			DP(NETIF_MSG_LINK,
+				 "link speed unsupported  gp_status 0x%x\n",
+				  speed_mask);
+			return -EINVAL;
+
+		case GP_STATUS_10G_KX4:
+		case GP_STATUS_10G_HIG:
+		case GP_STATUS_10G_CX4:
+		case GP_STATUS_10G_KR:
+		case GP_STATUS_10G_SFI:
+		case GP_STATUS_10G_XFI:
+			vars->line_speed = SPEED_10000;
+			vars->link_status |= LINK_10GTFD;
+			break;
+		case GP_STATUS_20G_DXGXS:
+		case GP_STATUS_20G_KR2:
+			vars->line_speed = SPEED_20000;
+			vars->link_status |= LINK_20GTFD;
+			break;
+		default:
+			DP(NETIF_MSG_LINK,
+				  "link speed unsupported gp_status 0x%x\n",
+				  speed_mask);
+			return -EINVAL;
+		}
+	} else { /* link_down */
+		DP(NETIF_MSG_LINK, "phy link down\n");
+
+		vars->phy_link_up = 0;
+
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_NONE;
+	}
+	DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+		    vars->phy_link_up, vars->line_speed);
+	return 0;
+}
+
+static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+
+	u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+	int rc = 0;
+
+	/* Read gp_status */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+		duplex = DUPLEX_FULL;
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+		link_up = 1;
+	speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+	DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+		       gp_status, link_up, speed_mask);
+	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+					 duplex);
+	if (rc == -EINVAL)
+		return rc;
+
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+		if (SINGLE_MEDIA_DIRECT(params)) {
+			vars->duplex = duplex;
+			bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+			if (phy->req_line_speed == SPEED_AUTO_NEG)
+				bnx2x_xgxs_an_resolve(phy, params, vars,
+						      gp_status);
+		}
+	} else { /* Link_down */
+		if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+		    SINGLE_MEDIA_DIRECT(params)) {
+			/* Check signal is detected */
+			bnx2x_check_fallback_to_cl37(phy, params);
+		}
+	}
+
+	/* Read LP advertised speeds*/
+	if (SINGLE_MEDIA_DIRECT(params) &&
+	    (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
+		u16 val;
+
+		CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_LP_ADV2, &val);
+
+		if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+		if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+			   MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+		CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G,
+				  MDIO_OVER_1G_LP_UP1, &val);
+
+		if (val & MDIO_OVER_1G_UP1_2_5G)
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+		if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+	}
+
+	DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
+		   vars->duplex, vars->flow_ctrl, vars->link_status);
+	return rc;
+}
+
+static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 lane;
+	u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+	int rc = 0;
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	/* Read gp_status */
+	if ((params->loopback_mode) &&
+	    (phy->flags & FLAGS_WC_DUAL_MODE)) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+		link_up &= 0x1;
+	} else if ((phy->req_line_speed > SPEED_10000) &&
+		(phy->supported & SUPPORTED_20000baseMLD2_Full)) {
+		u16 temp_link_up;
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				1, &temp_link_up);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				1, &link_up);
+		DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+			       temp_link_up, link_up);
+		link_up &= (1<<2);
+		if (link_up)
+			bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_1,
+				&gp_status1);
+		DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+		/* Check for either KR, 1G, or AN up. */
+		link_up = ((gp_status1 >> 8) |
+			   (gp_status1 >> 12) |
+			   (gp_status1)) &
+			(1 << lane);
+		if (phy->supported & SUPPORTED_20000baseKR2_Full) {
+			u16 an_link;
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_STATUS, &an_link);
+			bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+					MDIO_AN_REG_STATUS, &an_link);
+			link_up |= (an_link & (1<<2));
+		}
+		if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+			u16 pd, gp_status4;
+			if (phy->req_line_speed == SPEED_AUTO_NEG) {
+				/* Check Autoneg complete */
+				bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+						MDIO_WC_REG_GP2_STATUS_GP_2_4,
+						&gp_status4);
+				if (gp_status4 & ((1<<12)<<lane))
+					vars->link_status |=
+					LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+				/* Check parallel detect used */
+				bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+						MDIO_WC_REG_PAR_DET_10G_STATUS,
+						&pd);
+				if (pd & (1<<15))
+					vars->link_status |=
+					LINK_STATUS_PARALLEL_DETECTION_USED;
+			}
+			bnx2x_ext_phy_resolve_fc(phy, params, vars);
+			vars->duplex = duplex;
+		}
+	}
+
+	if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
+	    SINGLE_MEDIA_DIRECT(params)) {
+		u16 val;
+
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_LP_AUTO_NEG2, &val);
+
+		if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+		if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+			   MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
+
+		if (val & MDIO_OVER_1G_UP1_2_5G)
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+		if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+	}
+
+
+	if (lane < 2) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+	}
+	DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+	if ((lane & 1) == 0)
+		gp_speed <<= 8;
+	gp_speed &= 0x3f00;
+	link_up = !!link_up;
+
+	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+					 duplex);
+
+	/* In case of KR link down, start up the recovering procedure */
+	if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+	    (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+		vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
+	DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
+		   vars->duplex, vars->flow_ctrl, vars->link_status);
+	return rc;
+}
+static void bnx2x_set_gmii_tx_driver(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *phy = &params->phy[INT_PHY];
+	u16 lp_up2;
+	u16 tx_driver;
+	u16 bank;
+
+	/* Read precomp */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+	/* Bits [10:7] at lp_up2, positioned at [15:12] */
+	lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+		   MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+		  MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+	if (lp_up2 == 0)
+		return;
+
+	for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
+	      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+		CL22_RD_OVER_CL45(bp, phy,
+				  bank,
+				  MDIO_TX0_TX_DRIVER, &tx_driver);
+
+		/* Replace tx_driver bits [15:12] */
+		if (lp_up2 !=
+		    (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+			tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+			tx_driver |= lp_up2;
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_TX0_TX_DRIVER, tx_driver);
+		}
+	}
+}
+
+static int bnx2x_emac_program(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u16 mode = 0;
+
+	DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+	bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
+		       EMAC_REG_EMAC_MODE,
+		       (EMAC_MODE_25G_MODE |
+			EMAC_MODE_PORT_MII_10M |
+			EMAC_MODE_HALF_DUPLEX));
+	switch (vars->line_speed) {
+	case SPEED_10:
+		mode |= EMAC_MODE_PORT_MII_10M;
+		break;
+
+	case SPEED_100:
+		mode |= EMAC_MODE_PORT_MII;
+		break;
+
+	case SPEED_1000:
+		mode |= EMAC_MODE_PORT_GMII;
+		break;
+
+	case SPEED_2500:
+		mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+		break;
+
+	default:
+		/* 10G not valid for EMAC */
+		DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+			   vars->line_speed);
+		return -EINVAL;
+	}
+
+	if (vars->duplex == DUPLEX_HALF)
+		mode |= EMAC_MODE_HALF_DUPLEX;
+	bnx2x_bits_en(bp,
+		      GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+		      mode);
+
+	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+	return 0;
+}
+
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+
+	u16 bank, i = 0;
+	struct bnx2x *bp = params->bp;
+
+	for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+	      bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_RX0_RX_EQ_BOOST,
+					  phy->rx_preemphasis[i]);
+	}
+
+	for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+		      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_TX0_TX_DRIVER,
+					  phy->tx_preemphasis[i]);
+	}
+}
+
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+			  (params->loopback_mode == LOOPBACK_XGXS));
+	if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+		if (SINGLE_MEDIA_DIRECT(params) &&
+		    (params->feature_config_flags &
+		     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+			bnx2x_set_preemphasis(phy, params);
+
+		/* Forced speed requested? */
+		if (vars->line_speed != SPEED_AUTO_NEG ||
+		    (SINGLE_MEDIA_DIRECT(params) &&
+		     params->loopback_mode == LOOPBACK_EXT)) {
+			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+			/* Disable autoneg */
+			bnx2x_set_autoneg(phy, params, vars, 0);
+
+			/* Program speed and duplex */
+			bnx2x_program_serdes(phy, params, vars);
+
+		} else { /* AN_mode */
+			DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+			/* AN enabled */
+			bnx2x_set_brcm_cl37_advertisement(phy, params);
+
+			/* Program duplex & pause advertisement (for aneg) */
+			bnx2x_set_ieee_aneg_advertisement(phy, params,
+							  vars->ieee_fc);
+
+			/* Enable autoneg */
+			bnx2x_set_autoneg(phy, params, vars, enable_cl73);
+
+			/* Enable and restart AN */
+			bnx2x_restart_autoneg(phy, params, enable_cl73);
+		}
+
+	} else { /* SGMII mode */
+		DP(NETIF_MSG_LINK, "SGMII\n");
+
+		bnx2x_initialize_sgmii_process(phy, params, vars);
+	}
+}
+
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
+			  struct link_params *params,
+			  struct link_vars *vars)
+{
+	int rc;
+	vars->phy_flags |= PHY_XGXS_FLAG;
+	if ((phy->req_line_speed &&
+	     ((phy->req_line_speed == SPEED_100) ||
+	      (phy->req_line_speed == SPEED_10))) ||
+	    (!phy->req_line_speed &&
+	     (phy->speed_cap_mask >=
+	      PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+	     (phy->speed_cap_mask <
+	      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
+		vars->phy_flags |= PHY_SGMII_FLAG;
+	else
+		vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	bnx2x_set_aer_mmd(params, phy);
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+		bnx2x_set_master_ln(params, phy);
+
+	rc = bnx2x_reset_unicore(params, phy, 0);
+	/* Reset the SerDes and wait for reset bit return low */
+	if (rc)
+		return rc;
+
+	bnx2x_set_aer_mmd(params, phy);
+	/* Setting the masterLn_def again after the reset */
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+		bnx2x_set_master_ln(params, phy);
+		bnx2x_set_swap_lanes(params, phy);
+	}
+
+	return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+				     struct bnx2x_phy *phy,
+				     struct link_params *params)
+{
+	u16 cnt, ctrl;
+	/* Wait for soft reset to get cleared up to 1 sec */
+	for (cnt = 0; cnt < 1000; cnt++) {
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+			bnx2x_cl22_read(bp, phy,
+				MDIO_PMA_REG_CTRL, &ctrl);
+		else
+			bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, &ctrl);
+		if (!(ctrl & (1<<15)))
+			break;
+		usleep_range(1000, 2000);
+	}
+
+	if (cnt == 1000)
+		netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+				      " Port %d\n",
+			 params->port);
+	DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+	return cnt;
+}
+
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+	u8 port = params->port;
+	u32 mask;
+	struct bnx2x *bp = params->bp;
+
+	/* Setting the status to report on link up for either XGXS or SerDes */
+	if (CHIP_IS_E3(bp)) {
+		mask = NIG_MASK_XGXS0_LINK_STATUS;
+		if (!(SINGLE_MEDIA_DIRECT(params)))
+			mask |= NIG_MASK_MI_INT;
+	} else if (params->switch_cfg == SWITCH_CFG_10G) {
+		mask = (NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_XGXS0_LINK_STATUS);
+		DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+		if (!(SINGLE_MEDIA_DIRECT(params)) &&
+			params->phy[INT_PHY].type !=
+				PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+			mask |= NIG_MASK_MI_INT;
+			DP(NETIF_MSG_LINK, "enabled external phy int\n");
+		}
+
+	} else { /* SerDes */
+		mask = NIG_MASK_SERDES0_LINK_STATUS;
+		DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+		if (!(SINGLE_MEDIA_DIRECT(params)) &&
+			params->phy[INT_PHY].type !=
+				PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+			mask |= NIG_MASK_MI_INT;
+			DP(NETIF_MSG_LINK, "enabled external phy int\n");
+		}
+	}
+	bnx2x_bits_en(bp,
+		      NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+		      mask);
+
+	DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+		 (params->switch_cfg == SWITCH_CFG_10G),
+		 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+	DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+		 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+		 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+		 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+	DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+	   REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+	   REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
+
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+				     u8 exp_mi_int)
+{
+	u32 latch_status = 0;
+
+	/* Disable the MI INT ( external phy int ) by writing 1 to the
+	 * status register. Link down indication is high-active-signal,
+	 * so in this case we need to write the status to clear the XOR
+	 */
+	/* Read Latched signals */
+	latch_status = REG_RD(bp,
+				    NIG_REG_LATCH_STATUS_0 + port*8);
+	DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+	/* Handle only those with latched-signal=up.*/
+	if (exp_mi_int)
+		bnx2x_bits_en(bp,
+			      NIG_REG_STATUS_INTERRUPT_PORT0
+			      + port*4,
+			      NIG_STATUS_EMAC0_MI_INT);
+	else
+		bnx2x_bits_dis(bp,
+			       NIG_REG_STATUS_INTERRUPT_PORT0
+			       + port*4,
+			       NIG_STATUS_EMAC0_MI_INT);
+
+	if (latch_status & 1) {
+
+		/* For all latched-signal=up : Re-Arm Latch signals */
+		REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+		       (latch_status & 0xfffe) | (latch_status & 1));
+	}
+	/* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+			       struct link_vars *vars, u8 is_10g_plus)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 mask;
+	/* First reset all status we assume only one line will be
+	 * change at a time
+	 */
+	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+		       (NIG_STATUS_XGXS0_LINK10G |
+			NIG_STATUS_XGXS0_LINK_STATUS |
+			NIG_STATUS_SERDES0_LINK_STATUS));
+	if (vars->phy_link_up) {
+		if (USES_WARPCORE(bp))
+			mask = NIG_STATUS_XGXS0_LINK_STATUS;
+		else {
+			if (is_10g_plus)
+				mask = NIG_STATUS_XGXS0_LINK10G;
+			else if (params->switch_cfg == SWITCH_CFG_10G) {
+				/* Disable the link interrupt by writing 1 to
+				 * the relevant lane in the status register
+				 */
+				u32 ser_lane =
+					((params->lane_config &
+				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+				mask = ((1 << ser_lane) <<
+				       NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+			} else
+				mask = NIG_STATUS_SERDES0_LINK_STATUS;
+		}
+		DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+			       mask);
+		bnx2x_bits_en(bp,
+			      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+			      mask);
+	}
+}
+
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+	u8 *str_ptr = str;
+	u32 mask = 0xf0000000;
+	u8 shift = 8*4;
+	u8 digit;
+	u8 remove_leading_zeros = 1;
+	if (*len < 10) {
+		/* Need more than 10chars for this format */
+		*str_ptr = '\0';
+		(*len)--;
+		return -EINVAL;
+	}
+	while (shift > 0) {
+
+		shift -= 4;
+		digit = ((num & mask) >> shift);
+		if (digit == 0 && remove_leading_zeros) {
+			mask = mask >> 4;
+			continue;
+		} else if (digit < 0xa)
+			*str_ptr = digit + '0';
+		else
+			*str_ptr = digit - 0xa + 'a';
+		remove_leading_zeros = 0;
+		str_ptr++;
+		(*len)--;
+		mask = mask >> 4;
+		if (shift == 4*4) {
+			*str_ptr = '.';
+			str_ptr++;
+			(*len)--;
+			remove_leading_zeros = 1;
+		}
+	}
+	return 0;
+}
+
+
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+	str[0] = '\0';
+	(*len)--;
+	return 0;
+}
+
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+				 u16 len)
+{
+	struct bnx2x *bp;
+	u32 spirom_ver = 0;
+	int status = 0;
+	u8 *ver_p = version;
+	u16 remain_len = len;
+	if (version == NULL || params == NULL)
+		return -EINVAL;
+	bp = params->bp;
+
+	/* Extract first external phy*/
+	version[0] = '\0';
+	spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+	if (params->phy[EXT_PHY1].format_fw_ver) {
+		status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+							      ver_p,
+							      &remain_len);
+		ver_p += (len - remain_len);
+	}
+	if ((params->num_phys == MAX_PHYS) &&
+	    (params->phy[EXT_PHY2].ver_addr != 0)) {
+		spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
+		if (params->phy[EXT_PHY2].format_fw_ver) {
+			*ver_p = '/';
+			ver_p++;
+			remain_len--;
+			status |= params->phy[EXT_PHY2].format_fw_ver(
+				spirom_ver,
+				ver_p,
+				&remain_len);
+			ver_p = version + (len - remain_len);
+		}
+	}
+	*ver_p = '\0';
+	return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+				    struct link_params *params)
+{
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+
+	if (phy->req_line_speed != SPEED_1000) {
+		u32 md_devad = 0;
+
+		DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+		if (!CHIP_IS_E3(bp)) {
+			/* Change the uni_phy_addr in the nig */
+			md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+					       port*0x18));
+
+			REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+			       0x5);
+		}
+
+		bnx2x_cl45_write(bp, phy,
+				 5,
+				 (MDIO_REG_BANK_AER_BLOCK +
+				  (MDIO_AER_BLOCK_AER_REG & 0xf)),
+				 0x2800);
+
+		bnx2x_cl45_write(bp, phy,
+				 5,
+				 (MDIO_REG_BANK_CL73_IEEEB0 +
+				  (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+				 0x6041);
+		msleep(200);
+		/* Set aer mmd back */
+		bnx2x_set_aer_mmd(params, phy);
+
+		if (!CHIP_IS_E3(bp)) {
+			/* And md_devad */
+			REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+			       md_devad);
+		}
+	} else {
+		u16 mii_ctrl;
+		DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+		bnx2x_cl45_read(bp, phy, 5,
+				(MDIO_REG_BANK_COMBO_IEEE0 +
+				(MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+				&mii_ctrl);
+		bnx2x_cl45_write(bp, phy, 5,
+				 (MDIO_REG_BANK_COMBO_IEEE0 +
+				 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+				 mii_ctrl |
+				 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+	}
+}
+
+int bnx2x_set_led(struct link_params *params,
+		  struct link_vars *vars, u8 mode, u32 speed)
+{
+	u8 port = params->port;
+	u16 hw_led_mode = params->hw_led_mode;
+	int rc = 0;
+	u8 phy_idx;
+	u32 tmp;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+	DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+		 speed, hw_led_mode);
+	/* In case */
+	for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].set_link_led) {
+			params->phy[phy_idx].set_link_led(
+				&params->phy[phy_idx], params, mode);
+		}
+	}
+
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+		REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+		       SHARED_HW_CFG_LED_MAC1);
+
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		if (params->phy[EXT_PHY1].type ==
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+			tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
+				EMAC_LED_100MB_OVERRIDE |
+				EMAC_LED_10MB_OVERRIDE);
+		else
+			tmp |= EMAC_LED_OVERRIDE;
+
+		EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
+		break;
+
+	case LED_MODE_OPER:
+		/* For all other phys, OPER mode is same as ON, so in case
+		 * link is down, do nothing
+		 */
+		if (!vars->link_up)
+			break;
+	case LED_MODE_ON:
+		if (((params->phy[EXT_PHY1].type ==
+			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+			 (params->phy[EXT_PHY1].type ==
+			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
+		    CHIP_IS_E2(bp) && params->num_phys == 2) {
+			/* This is a work-around for E2+8727 Configurations */
+			if (mode == LED_MODE_ON ||
+				speed == SPEED_10000){
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+				tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+				EMAC_WR(bp, EMAC_REG_EMAC_LED,
+					(tmp | EMAC_LED_OVERRIDE));
+				/* Return here without enabling traffic
+				 * LED blink and setting rate in ON mode.
+				 * In oper mode, enabling LED blink
+				 * and setting rate is needed.
+				 */
+				if (mode == LED_MODE_ON)
+					return rc;
+			}
+		} else if (SINGLE_MEDIA_DIRECT(params)) {
+			/* This is a work-around for HW issue found when link
+			 * is up in CL73
+			 */
+			if ((!CHIP_IS_E3(bp)) ||
+			    (CHIP_IS_E3(bp) &&
+			     mode == LED_MODE_ON))
+				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+			if (CHIP_IS_E1x(bp) ||
+			    CHIP_IS_E2(bp) ||
+			    (mode == LED_MODE_ON))
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+			else
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+				       hw_led_mode);
+		} else if ((params->phy[EXT_PHY1].type ==
+			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
+			   (mode == LED_MODE_ON)) {
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+			tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+			EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
+				EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
+			/* Break here; otherwise, it'll disable the
+			 * intended override.
+			 */
+			break;
+		} else {
+			u32 nig_led_mode = ((params->hw_led_mode <<
+					     SHARED_HW_CFG_LED_MODE_SHIFT) ==
+					    SHARED_HW_CFG_LED_EXTPHY2) ?
+				(SHARED_HW_CFG_LED_PHY1 >>
+				 SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+			       nig_led_mode);
+		}
+
+		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+		/* Set blinking rate to ~15.9Hz */
+		if (CHIP_IS_E3(bp))
+			REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+			       LED_BLINK_RATE_VAL_E3);
+		else
+			REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+			       LED_BLINK_RATE_VAL_E1X_E2);
+		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+		       port*4, 1);
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		EMAC_WR(bp, EMAC_REG_EMAC_LED,
+			(tmp & (~EMAC_LED_OVERRIDE)));
+
+		if (CHIP_IS_E1(bp) &&
+		    ((speed == SPEED_2500) ||
+		     (speed == SPEED_1000) ||
+		     (speed == SPEED_100) ||
+		     (speed == SPEED_10))) {
+			/* For speeds less than 10G LED scheme is different */
+			REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+			       + port*4, 1);
+			REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+			       port*4, 0);
+			REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+			       port*4, 1);
+		}
+		break;
+
+	default:
+		rc = -EINVAL;
+		DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+			 mode);
+		break;
+	}
+	return rc;
+
+}
+
+/* This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+		    u8 is_serdes)
+{
+	struct bnx2x *bp = params->bp;
+	u16 gp_status = 0, phy_index = 0;
+	u8 ext_phy_link_up = 0, serdes_phy_type;
+	struct link_vars temp_vars;
+	struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+
+	if (CHIP_IS_E3(bp)) {
+		u16 link_up;
+		if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+		    > SPEED_10000) {
+			/* Check 20G link */
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					1, &link_up);
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					1, &link_up);
+			link_up &= (1<<2);
+		} else {
+			/* Check 10G link and below*/
+			u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					MDIO_WC_REG_GP2_STATUS_GP_2_1,
+					&gp_status);
+			gp_status = ((gp_status >> 8) & 0xf) |
+				((gp_status >> 12) & 0xf);
+			link_up = gp_status & (1 << lane);
+		}
+		if (!link_up)
+			return -ESRCH;
+	} else {
+		CL22_RD_OVER_CL45(bp, int_phy,
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
+	/* Link is up only if both local phy and external phy are up */
+	if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+		return -ESRCH;
+	}
+	/* In XGXS loopback mode, do not check external PHY */
+	if (params->loopback_mode == LOOPBACK_XGXS)
+		return 0;
+
+	switch (params->num_phys) {
+	case 1:
+		/* No external PHY */
+		return 0;
+	case 2:
+		ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+			&params->phy[EXT_PHY1],
+			params, &temp_vars);
+		break;
+	case 3: /* Dual Media */
+		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+		      phy_index++) {
+			serdes_phy_type = ((params->phy[phy_index].media_type ==
+					    ETH_PHY_SFPP_10G_FIBER) ||
+					   (params->phy[phy_index].media_type ==
+					    ETH_PHY_SFP_1G_FIBER) ||
+					   (params->phy[phy_index].media_type ==
+					    ETH_PHY_XFP_FIBER) ||
+					   (params->phy[phy_index].media_type ==
+					    ETH_PHY_DA_TWINAX));
+
+			if (is_serdes != serdes_phy_type)
+				continue;
+			if (params->phy[phy_index].read_status) {
+				ext_phy_link_up |=
+					params->phy[phy_index].read_status(
+						&params->phy[phy_index],
+						params, &temp_vars);
+			}
+		}
+		break;
+	}
+	if (ext_phy_link_up)
+		return 0;
+	return -ESRCH;
+}
+
+static int bnx2x_link_initialize(struct link_params *params,
+				 struct link_vars *vars)
+{
+	u8 phy_index, non_ext_phy;
+	struct bnx2x *bp = params->bp;
+	/* In case of external phy existence, the line speed would be the
+	 * line speed linked up by the external phy. In case it is direct
+	 * only, then the line_speed during initialization will be
+	 * equal to the req_line_speed
+	 */
+	vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+	/* Initialize the internal phy in case this is a direct board
+	 * (no external phys), or this board has external phy which requires
+	 * to first.
+	 */
+	if (!USES_WARPCORE(bp))
+		bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
+	/* init ext phy and enable link state int */
+	non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+		       (params->loopback_mode == LOOPBACK_XGXS));
+
+	if (non_ext_phy ||
+	    (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+	    (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+		struct bnx2x_phy *phy = &params->phy[INT_PHY];
+		if (vars->line_speed == SPEED_AUTO_NEG &&
+		    (CHIP_IS_E1x(bp) ||
+		     CHIP_IS_E2(bp)))
+			bnx2x_set_parallel_detection(phy, params);
+		if (params->phy[INT_PHY].config_init)
+			params->phy[INT_PHY].config_init(phy, params, vars);
+	}
+
+	/* Re-read this value in case it was changed inside config_init due to
+	 * limitations of optic module
+	 */
+	vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+	/* Init external phy*/
+	if (non_ext_phy) {
+		if (params->phy[INT_PHY].supported &
+		    SUPPORTED_FIBRE)
+			vars->link_status |= LINK_STATUS_SERDES_LINK;
+	} else {
+		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+		      phy_index++) {
+			/* No need to initialize second phy in case of first
+			 * phy only selection. In case of second phy, we do
+			 * need to initialize the first phy, since they are
+			 * connected.
+			 */
+			if (params->phy[phy_index].supported &
+			    SUPPORTED_FIBRE)
+				vars->link_status |= LINK_STATUS_SERDES_LINK;
+
+			if (phy_index == EXT_PHY2 &&
+			    (bnx2x_phy_selection(params) ==
+			     PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+				DP(NETIF_MSG_LINK,
+				   "Not initializing second phy\n");
+				continue;
+			}
+			params->phy[phy_index].config_init(
+				&params->phy[phy_index],
+				params, vars);
+		}
+	}
+	/* Reset the interrupt indication after phy was initialized */
+	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+		       params->port*4,
+		       (NIG_STATUS_XGXS0_LINK10G |
+			NIG_STATUS_XGXS0_LINK_STATUS |
+			NIG_STATUS_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+	return 0;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+				 struct link_params *params)
+{
+	/* Reset the SerDes/XGXS */
+	REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+	       (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_port;
+	/* HW reset */
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
+	DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static int bnx2x_update_link_down(struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+
+	DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+	vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
+	/* Indicate no mac active */
+	vars->mac_type = MAC_TYPE_NONE;
+
+	/* Update shared memory */
+	vars->link_status &= ~LINK_UPDATE_MASK;
+	vars->line_speed = 0;
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* Activate nig drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+	/* Disable emac */
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+	usleep_range(10000, 20000);
+	/* Reset BigMac/Xmac */
+	if (CHIP_IS_E1x(bp) ||
+	    CHIP_IS_E2(bp))
+		bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+	if (CHIP_IS_E3(bp)) {
+		/* Prevent LPI Generation by chip */
+		REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+		       0);
+		REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+		       0);
+		vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+				      SHMEM_EEE_ACTIVE_BIT);
+
+		bnx2x_update_mng_eee(params, vars->eee_status);
+		bnx2x_set_xmac_rxtx(params, 0);
+		bnx2x_set_umac_rxtx(params, 0);
+	}
+
+	return 0;
+}
+
+static int bnx2x_update_link_up(struct link_params *params,
+				struct link_vars *vars,
+				u8 link_10g)
+{
+	struct bnx2x *bp = params->bp;
+	u8 phy_idx, port = params->port;
+	int rc = 0;
+
+	vars->link_status |= (LINK_STATUS_LINK_UP |
+			      LINK_STATUS_PHYSICAL_LINK_FLAG);
+	vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+	if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+		vars->link_status |=
+			LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+	if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+		vars->link_status |=
+			LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+	if (USES_WARPCORE(bp)) {
+		if (link_10g) {
+			if (bnx2x_xmac_enable(params, vars, 0) ==
+			    -ESRCH) {
+				DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+				vars->link_up = 0;
+				vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+				vars->link_status &= ~LINK_STATUS_LINK_UP;
+			}
+		} else
+			bnx2x_umac_enable(params, vars, 0);
+		bnx2x_set_led(params, vars,
+			      LED_MODE_OPER, vars->line_speed);
+
+		if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+		    (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+			DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+			REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+			       (params->port << 2), 1);
+			REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+			REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+			       (params->port << 2), 0xfc20);
+		}
+	}
+	if ((CHIP_IS_E1x(bp) ||
+	     CHIP_IS_E2(bp))) {
+		if (link_10g) {
+			if (bnx2x_bmac_enable(params, vars, 0, 1) ==
+			    -ESRCH) {
+				DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+				vars->link_up = 0;
+				vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+				vars->link_status &= ~LINK_STATUS_LINK_UP;
+			}
+
+			bnx2x_set_led(params, vars,
+				      LED_MODE_OPER, SPEED_10000);
+		} else {
+			rc = bnx2x_emac_program(params, vars);
+			bnx2x_emac_enable(params, vars, 0);
+
+			/* AN complete? */
+			if ((vars->link_status &
+			     LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+			    && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+			    SINGLE_MEDIA_DIRECT(params))
+				bnx2x_set_gmii_tx_driver(params);
+		}
+	}
+
+	/* PBF - link up */
+	if (CHIP_IS_E1x(bp))
+		rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+				       vars->line_speed);
+
+	/* Disable drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+	/* Update shared memory */
+	bnx2x_update_mng(params, vars->link_status);
+	bnx2x_update_mng_eee(params, vars->eee_status);
+	/* Check remote fault */
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+			bnx2x_check_half_open_conn(params, vars, 0);
+			break;
+		}
+	}
+	msleep(20);
+	return rc;
+}
+
+static void bnx2x_chng_link_count(struct link_params *params, bool clear)
+{
+	struct bnx2x *bp = params->bp;
+	u32 addr, val;
+
+	/* Verify the link_change_count is supported by the MFW */
+	if (!(SHMEM2_HAS(bp, link_change_count)))
+		return;
+
+	addr = params->shmem2_base +
+		offsetof(struct shmem2_region, link_change_count[params->port]);
+	if (clear)
+		val = 0;
+	else
+		val = REG_RD(bp, addr) + 1;
+	REG_WR(bp, addr, val);
+}
+
+/* The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ *   to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ *   phy (XGXS) need to up as well as the external link of the
+ *   phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ *   external phy needs to be up, and at least one of the 2
+ *   external phy link must be up.
+ */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	struct link_vars phy_vars[MAX_PHYS];
+	u8 port = params->port;
+	u8 link_10g_plus, phy_index;
+	u32 prev_link_status = vars->link_status;
+	u8 ext_phy_link_up = 0, cur_link_up;
+	int rc = 0;
+	u8 is_mi_int = 0;
+	u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+	u8 active_external_phy = INT_PHY;
+	vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+	vars->link_status &= ~LINK_UPDATE_MASK;
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++) {
+		phy_vars[phy_index].flow_ctrl = 0;
+		phy_vars[phy_index].link_status = 0;
+		phy_vars[phy_index].line_speed = 0;
+		phy_vars[phy_index].duplex = DUPLEX_FULL;
+		phy_vars[phy_index].phy_link_up = 0;
+		phy_vars[phy_index].link_up = 0;
+		phy_vars[phy_index].fault_detected = 0;
+		/* different consideration, since vars holds inner state */
+		phy_vars[phy_index].eee_status = vars->eee_status;
+	}
+
+	if (USES_WARPCORE(bp))
+		bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
+
+	DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+		 port, (vars->phy_flags & PHY_XGXS_FLAG),
+		 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+	is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+				port*0x18) > 0);
+	DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+		 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+		 is_mi_int,
+		 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+	DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+	/* Disable emac */
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+	/* Step 1:
+	 * Check external link change only for external phys, and apply
+	 * priority selection between them in case the link on both phys
+	 * is up. Note that instead of the common vars, a temporary
+	 * vars argument is used since each phy may have different link/
+	 * speed/duplex result
+	 */
+	for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+	      phy_index++) {
+		struct bnx2x_phy *phy = &params->phy[phy_index];
+		if (!phy->read_status)
+			continue;
+		/* Read link status and params of this ext phy */
+		cur_link_up = phy->read_status(phy, params,
+					       &phy_vars[phy_index]);
+		if (cur_link_up) {
+			DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+				   phy_index);
+		} else {
+			DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+				   phy_index);
+			continue;
+		}
+
+		if (!ext_phy_link_up) {
+			ext_phy_link_up = 1;
+			active_external_phy = phy_index;
+		} else {
+			switch (bnx2x_phy_selection(params)) {
+			case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+			case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+			/* In this option, the first PHY makes sure to pass the
+			 * traffic through itself only.
+			 * Its not clear how to reset the link on the second phy
+			 */
+				active_external_phy = EXT_PHY1;
+				break;
+			case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+			/* In this option, the first PHY makes sure to pass the
+			 * traffic through the second PHY.
+			 */
+				active_external_phy = EXT_PHY2;
+				break;
+			default:
+			/* Link indication on both PHYs with the following cases
+			 * is invalid:
+			 * - FIRST_PHY means that second phy wasn't initialized,
+			 * hence its link is expected to be down
+			 * - SECOND_PHY means that first phy should not be able
+			 * to link up by itself (using configuration)
+			 * - DEFAULT should be overriden during initialiazation
+			 */
+				DP(NETIF_MSG_LINK, "Invalid link indication"
+					   "mpc=0x%x. DISABLING LINK !!!\n",
+					   params->multi_phy_config);
+				ext_phy_link_up = 0;
+				break;
+			}
+		}
+	}
+	prev_line_speed = vars->line_speed;
+	/* Step 2:
+	 * Read the status of the internal phy. In case of
+	 * DIRECT_SINGLE_MEDIA board, this link is the external link,
+	 * otherwise this is the link between the 577xx and the first
+	 * external phy
+	 */
+	if (params->phy[INT_PHY].read_status)
+		params->phy[INT_PHY].read_status(
+			&params->phy[INT_PHY],
+			params, vars);
+	/* The INT_PHY flow control reside in the vars. This include the
+	 * case where the speed or flow control are not set to AUTO.
+	 * Otherwise, the active external phy flow control result is set
+	 * to the vars. The ext_phy_line_speed is needed to check if the
+	 * speed is different between the internal phy and external phy.
+	 * This case may be result of intermediate link speed change.
+	 */
+	if (active_external_phy > INT_PHY) {
+		vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+		/* Link speed is taken from the XGXS. AN and FC result from
+		 * the external phy.
+		 */
+		vars->link_status |= phy_vars[active_external_phy].link_status;
+
+		/* if active_external_phy is first PHY and link is up - disable
+		 * disable TX on second external PHY
+		 */
+		if (active_external_phy == EXT_PHY1) {
+			if (params->phy[EXT_PHY2].phy_specific_func) {
+				DP(NETIF_MSG_LINK,
+				   "Disabling TX on EXT_PHY2\n");
+				params->phy[EXT_PHY2].phy_specific_func(
+					&params->phy[EXT_PHY2],
+					params, DISABLE_TX);
+			}
+		}
+
+		ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+		vars->duplex = phy_vars[active_external_phy].duplex;
+		if (params->phy[active_external_phy].supported &
+		    SUPPORTED_FIBRE)
+			vars->link_status |= LINK_STATUS_SERDES_LINK;
+		else
+			vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+		vars->eee_status = phy_vars[active_external_phy].eee_status;
+
+		DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+			   active_external_phy);
+	}
+
+	for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+	      phy_index++) {
+		if (params->phy[phy_index].flags &
+		    FLAGS_REARM_LATCH_SIGNAL) {
+			bnx2x_rearm_latch_signal(bp, port,
+						 phy_index ==
+						 active_external_phy);
+			break;
+		}
+	}
+	DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+		   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+		   vars->link_status, ext_phy_line_speed);
+	/* Upon link speed change set the NIG into drain mode. Comes to
+	 * deals with possible FIFO glitch due to clk change when speed
+	 * is decreased without link down indicator
+	 */
+
+	if (vars->phy_link_up) {
+		if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+		    (ext_phy_line_speed != vars->line_speed)) {
+			DP(NETIF_MSG_LINK, "Internal link speed %d is"
+				   " different than the external"
+				   " link speed %d\n", vars->line_speed,
+				   ext_phy_line_speed);
+			vars->phy_link_up = 0;
+		} else if (prev_line_speed != vars->line_speed) {
+			REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+			       0);
+			usleep_range(1000, 2000);
+		}
+	}
+
+	/* Anything 10 and over uses the bmac */
+	link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+	bnx2x_link_int_ack(params, vars, link_10g_plus);
+
+	/* In case external phy link is up, and internal link is down
+	 * (not initialized yet probably after link initialization, it
+	 * needs to be initialized.
+	 * Note that after link down-up as result of cable plug, the xgxs
+	 * link would probably become up again without the need
+	 * initialize it
+	 */
+	if (!(SINGLE_MEDIA_DIRECT(params))) {
+		DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+			   " init_preceding = %d\n", ext_phy_link_up,
+			   vars->phy_link_up,
+			   params->phy[EXT_PHY1].flags &
+			   FLAGS_INIT_XGXS_FIRST);
+		if (!(params->phy[EXT_PHY1].flags &
+		      FLAGS_INIT_XGXS_FIRST)
+		    && ext_phy_link_up && !vars->phy_link_up) {
+			vars->line_speed = ext_phy_line_speed;
+			if (vars->line_speed < SPEED_1000)
+				vars->phy_flags |= PHY_SGMII_FLAG;
+			else
+				vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+			if (params->phy[INT_PHY].config_init)
+				params->phy[INT_PHY].config_init(
+					&params->phy[INT_PHY], params,
+						vars);
+		}
+	}
+	/* Link is up only if both local phy and external phy (in case of
+	 * non-direct board) are up and no fault detected on active PHY.
+	 */
+	vars->link_up = (vars->phy_link_up &&
+			 (ext_phy_link_up ||
+			  SINGLE_MEDIA_DIRECT(params)) &&
+			 (phy_vars[active_external_phy].fault_detected == 0));
+
+	/* Update the PFC configuration in case it was changed */
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		vars->link_status |= LINK_STATUS_PFC_ENABLED;
+	else
+		vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+	if (vars->link_up)
+		rc = bnx2x_update_link_up(params, vars, link_10g_plus);
+	else
+		rc = bnx2x_update_link_down(params, vars);
+
+	if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP)
+		bnx2x_chng_link_count(params, false);
+
+	/* Update MCP link status was changed */
+	if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
+
+	return rc;
+}
+
+/*****************************************************************************/
+/*			    External Phy section			     */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+	usleep_range(1000, 2000);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+}
+
+static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
+				      u32 spirom_ver, u32 ver_addr)
+{
+	DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
+		 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
+
+	if (ver_addr)
+		REG_WR(bp, ver_addr, spirom_ver);
+}
+
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 port)
+{
+	u16 fw_ver1, fw_ver2;
+
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+	bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+				  phy->ver_addr);
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+				       struct bnx2x_phy *phy,
+				       struct link_vars *vars)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD,
+			MDIO_AN_REG_STATUS, &val);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD,
+			MDIO_AN_REG_STATUS, &val);
+	if (val & (1<<5))
+		vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+	if ((val & (1<<0)) == 0)
+		vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/*		common BCM8073/BCM8727 PHY SECTION		  */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	if (phy->req_line_speed == SPEED_10 ||
+	    phy->req_line_speed == SPEED_100) {
+		vars->flow_ctrl = phy->req_flow_ctrl;
+		return;
+	}
+
+	if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+	    (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+		u16 pause_result;
+		u16 ld_pause;		/* local */
+		u16 lp_pause;		/* link partner */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+		pause_result = (ld_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+		pause_result |= (lp_pause &
+				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+		bnx2x_pause_resolve(phy, params, vars, pause_result);
+		DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+			   pause_result);
+	}
+}
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+					     struct bnx2x_phy *phy,
+					     u8 port)
+{
+	u32 count = 0;
+	u16 fw_ver1, fw_msgout;
+	int rc = 0;
+
+	/* Boot port from external ROM  */
+	/* EDC grst */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 0x0001);
+
+	/* Ucode reboot and rst */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 0x008c);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+	/* Reset internal microprocessor */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+	/* Release srst bit */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+	/* Delay 100ms per the PHY specifications */
+	msleep(100);
+
+	/* 8073 sometimes taking longer to download */
+	do {
+		count++;
+		if (count > 300) {
+			DP(NETIF_MSG_LINK,
+				 "bnx2x_8073_8727_external_rom_boot port %x:"
+				 "Download failed. fw version = 0x%x\n",
+				 port, fw_ver1);
+			rc = -EINVAL;
+			break;
+		}
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+		usleep_range(1000, 2000);
+	} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+			((fw_msgout & 0xff) != 0x03 && (phy->type ==
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
+
+	/* Clear ser_boot_ctl bit */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+	bnx2x_save_bcm_spirom_ver(bp, phy, port);
+
+	DP(NETIF_MSG_LINK,
+		 "bnx2x_8073_8727_external_rom_boot port %x:"
+		 "Download complete. fw version = 0x%x\n",
+		 port, fw_ver1);
+
+	return rc;
+}
+
+/******************************************************************/
+/*			BCM8073 PHY SECTION			  */
+/******************************************************************/
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	/* This is only required for 8073A1, version 102 only */
+	u16 val;
+
+	/* Read 8073 HW revision*/
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+	if (val != 1) {
+		/* No need to workaround in 8073 A1 */
+		return 0;
+	}
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2, &val);
+
+	/* SNR should be applied only for version 0x102 */
+	if (val != 0x102)
+		return 0;
+
+	return 1;
+}
+
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	u16 val, cnt, cnt1 ;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+	if (val > 0) {
+		/* No need to workaround in 8073 A1 */
+		return 0;
+	}
+	/* XAUI workaround in 8073 A0: */
+
+	/* After loading the boot ROM and restarting Autoneg, poll
+	 * Dev1, Reg $C820:
+	 */
+
+	for (cnt = 0; cnt < 1000; cnt++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+				&val);
+		  /* If bit [14] = 0 or bit [13] = 0, continue on with
+		   * system initialization (XAUI work-around not required, as
+		   * these bits indicate 2.5G or 1G link up).
+		   */
+		if (!(val & (1<<14)) || !(val & (1<<13))) {
+			DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
+			return 0;
+		} else if (!(val & (1<<15))) {
+			DP(NETIF_MSG_LINK, "bit 15 went off\n");
+			/* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+			 * MSB (bit15) goes to 1 (indicating that the XAUI
+			 * workaround has completed), then continue on with
+			 * system initialization.
+			 */
+			for (cnt1 = 0; cnt1 < 1000; cnt1++) {
+				bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8073_XAUI_WA, &val);
+				if (val & (1<<15)) {
+					DP(NETIF_MSG_LINK,
+					  "XAUI workaround has completed\n");
+					return 0;
+				 }
+				 usleep_range(3000, 6000);
+			}
+			break;
+		}
+		usleep_range(3000, 6000);
+	}
+	DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
+	return -EINVAL;
+}
+
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	/* Force KR or KX */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+				      struct bnx2x_phy *phy,
+				      struct link_vars *vars)
+{
+	u16 cl37_val;
+	struct bnx2x *bp = params->bp;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+	cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+		cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+		cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+		cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+	}
+	DP(NETIF_MSG_LINK,
+		 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+	msleep(500);
+}
+
+static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     u32 action)
+{
+	struct bnx2x *bp = params->bp;
+	switch (action) {
+	case PHY_INIT:
+		/* Enable LASI */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
+		break;
+	}
+}
+
+static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0, tmp1;
+	u8 gpio_port;
+	DP(NETIF_MSG_LINK, "Init 8073\n");
+
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+	bnx2x_8073_specific_func(phy, params, PHY_INIT);
+	bnx2x_8073_set_pause_cl37(params, phy, vars);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+
+	/* Swap polarity if required - Must be done only in non-1G mode */
+	if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+		/* Configure the 8073 to swap _P and _N of the KR lines */
+		DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+		/* 10G Rx/Tx and 1G Tx signal polarity swap */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+				 (val | (3<<9)));
+	}
+
+
+	/* Enable CL37 BAM */
+	if (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+	    PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8073_BAM, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8073_BAM, val | 1);
+		DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+	}
+	if (params->loopback_mode == LOOPBACK_EXT) {
+		bnx2x_807x_force_10G(bp, phy);
+		DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+		return 0;
+	} else {
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+	}
+	if (phy->req_line_speed != SPEED_AUTO_NEG) {
+		if (phy->req_line_speed == SPEED_10000) {
+			val = (1<<7);
+		} else if (phy->req_line_speed ==  SPEED_2500) {
+			val = (1<<5);
+			/* Note that 2.5G works only when used with 1G
+			 * advertisement
+			 */
+		} else
+			val = (1<<5);
+	} else {
+		val = 0;
+		if (phy->speed_cap_mask &
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+			val |= (1<<7);
+
+		/* Note that 2.5G works only when used with 1G advertisement */
+		if (phy->speed_cap_mask &
+			(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+			 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+			val |= (1<<5);
+		DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+	if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+	     (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+	    (phy->req_line_speed == SPEED_2500)) {
+		u16 phy_ver;
+		/* Allow 2.5G for A1 and above */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+				&phy_ver);
+		DP(NETIF_MSG_LINK, "Add 2.5G\n");
+		if (phy_ver > 0)
+			tmp1 |= 1;
+		else
+			tmp1 &= 0xfffe;
+	} else {
+		DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+		tmp1 &= 0xfffe;
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+	/* Add support for CL37 (passive mode) II */
+
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+			 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+				  0x20 : 0x40)));
+
+	/* Add support for CL37 (passive mode) III */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+
+	/* The SNR will improve about 2db by changing BW and FEE main
+	 * tap. Rest commands are executed after link is up
+	 * Change FFE main cursor to 5 in EDC register
+	 */
+	if (bnx2x_8073_is_snr_needed(bp, phy))
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+				 0xFB0C);
+
+	/* Enable FEC (Forware Error Correction) Request in the AN */
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+	tmp1 |= (1<<15);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+
+	/* Restart autoneg */
+	msleep(500);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+	DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+		   ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+	return 0;
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_up = 0;
+	u16 val1, val2;
+	u16 link_status = 0;
+	u16 an1000_status = 0;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+	DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+	/* Clear the interrupt LASI status register */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+	/* Clear MSG-OUT */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+	/* Check the LASI */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+	DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+	/* Check the link status */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+	DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+	link_up = ((val1 & 4) == 4);
+	DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+	if (link_up &&
+	     ((phy->req_line_speed != SPEED_10000))) {
+		if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+			return 0;
+	}
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+	/* Check the link status on 1.1.2 */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+		   "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+	link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+	if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+		/* The SNR will improve about 2dbby changing the BW and FEE main
+		 * tap. The 1st write to change FFE main tap is set before
+		 * restart AN. Change PLL Bandwidth in EDC register
+		 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+				 0x26BC);
+
+		/* Change CDR Bandwidth in EDC register */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+				 0x0333);
+	}
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+			&link_status);
+
+	/* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_10000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+			   params->port);
+	} else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_2500;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+			   params->port);
+	} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_1000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+			   params->port);
+	} else {
+		link_up = 0;
+		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+			   params->port);
+	}
+
+	if (link_up) {
+		/* Swap polarity if required */
+		if (params->lane_config &
+		    PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+			/* Configure the 8073 to swap P and N of the KR lines */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_XS_DEVAD,
+					MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+			/* Set bit 3 to invert Rx in 1G mode and clear this bit
+			 * when it`s in 10G mode.
+			 */
+			if (vars->line_speed == SPEED_1000) {
+				DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+					      "the 8073\n");
+				val1 |= (1<<3);
+			} else
+				val1 &= ~(1<<3);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_XS_DEVAD,
+					 MDIO_XS_REG_8073_RX_CTRL_PCIE,
+					 val1);
+		}
+		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+		bnx2x_8073_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+	}
+
+	if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_LP_AUTO_NEG2, &val1);
+
+		if (val1 & (1<<5))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+		if (val1 & (1<<7))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+	}
+
+	return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_port;
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+	   gpio_port);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
+}
+
+/******************************************************************/
+/*			BCM8705 PHY SECTION			  */
+/******************************************************************/
+static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "init 8705\n");
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+	/* BCM8705 doesn't have microcode, hence the 0 */
+	bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+	return 0;
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	u8 link_up = 0;
+	u16 val1, rx_sd;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "read status 8705\n");
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_PMA_DEVAD, 0xc809, &val1);
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_PMA_DEVAD, 0xc809, &val1);
+
+	DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+	link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+	if (link_up) {
+		vars->line_speed = SPEED_10000;
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	}
+	return link_up;
+}
+
+/******************************************************************/
+/*			SFP+ module Section			  */
+/******************************************************************/
+static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
+					   struct bnx2x_phy *phy,
+					   u8 pmd_dis)
+{
+	struct bnx2x *bp = params->bp;
+	/* Disable transmitter only for bootcodes which can enable it afterwards
+	 * (for D3 link)
+	 */
+	if (pmd_dis) {
+		if (params->feature_config_flags &
+		     FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
+			DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
+		else {
+			DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
+			return;
+		}
+	} else
+		DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+	u8 gpio_port;
+	u32 swap_val, swap_override;
+	struct bnx2x *bp = params->bp;
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	return gpio_port ^ (swap_val && swap_override);
+}
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+					   struct bnx2x_phy *phy,
+					   u8 tx_en)
+{
+	u16 val;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 tx_en_mode;
+
+	/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				     dev_info.port_hw_config[port].sfp_ctrl)) &
+		PORT_HW_CFG_TX_LASER_MASK;
+	DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+			   "mode = %x\n", tx_en, port, tx_en_mode);
+	switch (tx_en_mode) {
+	case PORT_HW_CFG_TX_LASER_MDIO:
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_PHY_IDENTIFIER,
+				&val);
+
+		if (tx_en)
+			val &= ~(1<<15);
+		else
+			val |= (1<<15);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER,
+				 val);
+	break;
+	case PORT_HW_CFG_TX_LASER_GPIO0:
+	case PORT_HW_CFG_TX_LASER_GPIO1:
+	case PORT_HW_CFG_TX_LASER_GPIO2:
+	case PORT_HW_CFG_TX_LASER_GPIO3:
+	{
+		u16 gpio_pin;
+		u8 gpio_port, gpio_mode;
+		if (tx_en)
+			gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+		else
+			gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+		gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+		gpio_port = bnx2x_get_gpio_port(params);
+		bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+		break;
+	}
+	default:
+		DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+		break;
+	}
+}
+
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+				      struct bnx2x_phy *phy,
+				      u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+	if (CHIP_IS_E3(bp))
+		bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+	else
+		bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u8 dev_addr, u16 addr, u8 byte_cnt,
+					     u8 *o_buf, u8 is_init)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0;
+	u16 i;
+	if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
+		DP(NETIF_MSG_LINK,
+		   "Reading from eeprom is limited to 0xf\n");
+		return -EINVAL;
+	}
+	/* Set the read command byte count */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+			 (byte_cnt | (dev_addr << 8)));
+
+	/* Set the read command address */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+			 addr);
+
+	/* Activate read command */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			 0x2c0f);
+
+	/* Wait up to 500us for command complete status */
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+			break;
+		udelay(5);
+	}
+
+	if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+		DP(NETIF_MSG_LINK,
+			 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+			 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+		return -EINVAL;
+	}
+
+	/* Read the buffer */
+	for (i = 0; i < byte_cnt; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+		o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+	}
+
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+			return 0;
+		usleep_range(1000, 2000);
+	}
+	return -EINVAL;
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+					u8 power)
+{
+	u32 pin_cfg;
+	struct bnx2x *bp = params->bp;
+
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+			PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+			PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+	if (pin_cfg == PIN_CFG_NA)
+		return;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+		       power, pin_cfg);
+	/* Low ==> corresponding SFP+ module is powered
+	 * high ==> the SFP+ module is powered down
+	 */
+	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+						 struct link_params *params,
+						 u8 dev_addr,
+						 u16 addr, u8 byte_cnt,
+						 u8 *o_buf, u8 is_init)
+{
+	int rc = 0;
+	u8 i, j = 0, cnt = 0;
+	u32 data_array[4];
+	u16 addr32;
+	struct bnx2x *bp = params->bp;
+
+	if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
+		DP(NETIF_MSG_LINK,
+		   "Reading from eeprom is limited to 16 bytes\n");
+		return -EINVAL;
+	}
+
+	/* 4 byte aligned address */
+	addr32 = addr & (~0x3);
+	do {
+		if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
+			bnx2x_warpcore_power_module(params, 0);
+			/* Note that 100us are not enough here */
+			usleep_range(1000, 2000);
+			bnx2x_warpcore_power_module(params, 1);
+		}
+		rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
+				    data_array);
+	} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+	if (rc == 0) {
+		for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+			o_buf[j] = *((u8 *)data_array + i);
+			j++;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u8 dev_addr, u16 addr, u8 byte_cnt,
+					     u8 *o_buf, u8 is_init)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val, i;
+
+	if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
+		DP(NETIF_MSG_LINK,
+		   "Reading from eeprom is limited to 0xf\n");
+		return -EINVAL;
+	}
+
+	/* Set 2-wire transfer rate of SFP+ module EEPROM
+	 * to 100Khz since some DACs(direct attached cables) do
+	 * not work at 400Khz.
+	 */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+			 ((dev_addr << 8) | 1));
+
+	/* Need to read from 1.8000 to clear it */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			&val);
+
+	/* Set the read command byte count */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+			 ((byte_cnt < 2) ? 2 : byte_cnt));
+
+	/* Set the read command address */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+			 addr);
+	/* Set the destination address */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 0x8004,
+			 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+
+	/* Activate read command */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			 0x8002);
+	/* Wait appropriate time for two-wire command to finish before
+	 * polling the status register
+	 */
+	usleep_range(1000, 2000);
+
+	/* Wait up to 500us for command complete status */
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+			break;
+		udelay(5);
+	}
+
+	if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+		DP(NETIF_MSG_LINK,
+			 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+			 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+		return -EFAULT;
+	}
+
+	/* Read the buffer */
+	for (i = 0; i < byte_cnt; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+		o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+	}
+
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+			return 0;
+		usleep_range(1000, 2000);
+	}
+
+	return -EINVAL;
+}
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				 struct link_params *params, u8 dev_addr,
+				 u16 addr, u16 byte_cnt, u8 *o_buf)
+{
+	int rc = 0;
+	struct bnx2x *bp = params->bp;
+	u8 xfer_size;
+	u8 *user_data = o_buf;
+	read_sfp_module_eeprom_func_p read_func;
+
+	if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+		DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+		return -EINVAL;
+	}
+
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		read_func = bnx2x_8726_read_sfp_module_eeprom;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		read_func = bnx2x_8727_read_sfp_module_eeprom;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	while (!rc && (byte_cnt > 0)) {
+		xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+			SFP_EEPROM_PAGE_SIZE : byte_cnt;
+		rc = read_func(phy, params, dev_addr, addr, xfer_size,
+			       user_data, 0);
+		byte_cnt -= xfer_size;
+		user_data += xfer_size;
+		addr += xfer_size;
+	}
+	return rc;
+}
+
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+			      struct link_params *params,
+			      u16 *edc_mode)
+{
+	struct bnx2x *bp = params->bp;
+	u32 sync_offset = 0, phy_idx, media_types;
+	u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
+	*edc_mode = EDC_MODE_LIMITING;
+	phy->media_type = ETH_PHY_UNSPECIFIED;
+	/* First check for copper cable */
+	if (bnx2x_read_sfp_module_eeprom(phy,
+					 params,
+					 I2C_DEV_ADDR_A0,
+					 0,
+					 SFP_EEPROM_FC_TX_TECH_ADDR + 1,
+					 (u8 *)val) != 0) {
+		DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
+		return -EINVAL;
+	}
+	params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+	params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+		LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+	bnx2x_update_link_attr(params, params->link_attr_sync);
+	switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
+	case SFP_EEPROM_CON_TYPE_VAL_COPPER:
+	{
+		u8 copper_module_type;
+		phy->media_type = ETH_PHY_DA_TWINAX;
+		/* Check if its active cable (includes SFP+ module)
+		 * of passive cable
+		 */
+		copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
+
+		if (copper_module_type &
+		    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+			DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
+			if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+				*edc_mode = EDC_MODE_ACTIVE_DAC;
+			else
+				check_limiting_mode = 1;
+		} else {
+			*edc_mode = EDC_MODE_PASSIVE_DAC;
+			/* Even in case PASSIVE_DAC indication is not set,
+			 * treat it as a passive DAC cable, since some cables
+			 * don't have this indication.
+			 */
+			if (copper_module_type &
+			    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+				DP(NETIF_MSG_LINK,
+				   "Passive Copper cable detected\n");
+			} else {
+				DP(NETIF_MSG_LINK,
+				   "Unknown copper-cable-type\n");
+			}
+		}
+		break;
+	}
+	case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
+	case SFP_EEPROM_CON_TYPE_VAL_LC:
+	case SFP_EEPROM_CON_TYPE_VAL_RJ45:
+		check_limiting_mode = 1;
+		if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+		     (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+		      SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+		       SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) &&
+		    (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) {
+			DP(NETIF_MSG_LINK, "1G SFP module detected\n");
+			phy->media_type = ETH_PHY_SFP_1G_FIBER;
+			if (phy->req_line_speed != SPEED_1000) {
+				u8 gport = params->port;
+				phy->req_line_speed = SPEED_1000;
+				if (!CHIP_IS_E1x(bp)) {
+					gport = BP_PATH(bp) +
+					(params->port << 1);
+				}
+				netdev_err(bp->dev,
+					   "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+					   gport);
+			}
+			if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+			    SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+				bnx2x_sfp_set_transmitter(params, phy, 0);
+				msleep(40);
+				bnx2x_sfp_set_transmitter(params, phy, 1);
+			}
+		} else {
+			int idx, cfg_idx = 0;
+			DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+			for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+				if (params->phy[idx].type == phy->type) {
+					cfg_idx = LINK_CONFIG_IDX(idx);
+					break;
+				}
+			}
+			phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+			phy->req_line_speed = params->req_line_speed[cfg_idx];
+		}
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
+			 val[SFP_EEPROM_CON_TYPE_ADDR]);
+		return -EINVAL;
+	}
+	sync_offset = params->shmem_base +
+		offsetof(struct shmem_region,
+			 dev_info.port_hw_config[params->port].media_type);
+	media_types = REG_RD(bp, sync_offset);
+	/* Update media type for non-PMF sync */
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (&(params->phy[phy_idx]) == phy) {
+			media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+			media_types |= ((phy->media_type &
+					PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+			break;
+		}
+	}
+	REG_WR(bp, sync_offset, media_types);
+	if (check_limiting_mode) {
+		u8 options[SFP_EEPROM_OPTIONS_SIZE];
+		if (bnx2x_read_sfp_module_eeprom(phy,
+						 params,
+						 I2C_DEV_ADDR_A0,
+						 SFP_EEPROM_OPTIONS_ADDR,
+						 SFP_EEPROM_OPTIONS_SIZE,
+						 options) != 0) {
+			DP(NETIF_MSG_LINK,
+			   "Failed to read Option field from module EEPROM\n");
+			return -EINVAL;
+		}
+		if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
+			*edc_mode = EDC_MODE_LINEAR;
+		else
+			*edc_mode = EDC_MODE_LIMITING;
+	}
+	DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
+	return 0;
+}
+/* This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+				   struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u32 val, cmd;
+	u32 fw_resp, fw_cmd_param;
+	char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
+	char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
+	phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
+	val = REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_feature_config[params->port].config));
+	if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
+		DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
+		return 0;
+	}
+
+	if (params->feature_config_flags &
+	    FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+		/* Use specific phy request */
+		cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+	} else if (params->feature_config_flags &
+		   FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+		/* Use first phy request only in case of non-dual media*/
+		if (DUAL_MEDIA(params)) {
+			DP(NETIF_MSG_LINK,
+			   "FW does not support OPT MDL verification\n");
+			return -EINVAL;
+		}
+		cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+	} else {
+		/* No support in OPT MDL detection */
+		DP(NETIF_MSG_LINK,
+		   "FW does not support OPT MDL verification\n");
+		return -EINVAL;
+	}
+
+	fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+	fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
+	if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
+		DP(NETIF_MSG_LINK, "Approved module\n");
+		return 0;
+	}
+
+	/* Format the warning message */
+	if (bnx2x_read_sfp_module_eeprom(phy,
+					 params,
+					 I2C_DEV_ADDR_A0,
+					 SFP_EEPROM_VENDOR_NAME_ADDR,
+					 SFP_EEPROM_VENDOR_NAME_SIZE,
+					 (u8 *)vendor_name))
+		vendor_name[0] = '\0';
+	else
+		vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
+	if (bnx2x_read_sfp_module_eeprom(phy,
+					 params,
+					 I2C_DEV_ADDR_A0,
+					 SFP_EEPROM_PART_NO_ADDR,
+					 SFP_EEPROM_PART_NO_SIZE,
+					 (u8 *)vendor_pn))
+		vendor_pn[0] = '\0';
+	else
+		vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
+
+	netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
+			      " Port %d from %s part number %s\n",
+			 params->port, vendor_name, vendor_pn);
+	if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
+		phy->flags |= FLAGS_SFP_NOT_APPROVED;
+	return -EINVAL;
+}
+
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+						 struct link_params *params)
+
+{
+	u8 val;
+	int rc;
+	struct bnx2x *bp = params->bp;
+	u16 timeout;
+	/* Initialization time after hot-plug may take up to 300ms for
+	 * some phys type ( e.g. JDSU )
+	 */
+
+	for (timeout = 0; timeout < 60; timeout++) {
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+			rc = bnx2x_warpcore_read_sfp_module_eeprom(
+				phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+				1);
+		else
+			rc = bnx2x_read_sfp_module_eeprom(phy, params,
+							  I2C_DEV_ADDR_A0,
+							  1, 1, &val);
+		if (rc == 0) {
+			DP(NETIF_MSG_LINK,
+			   "SFP+ module initialization took %d ms\n",
+			   timeout * 5);
+			return 0;
+		}
+		usleep_range(5000, 10000);
+	}
+	rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+					  1, 1, &val);
+	return rc;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+				    struct bnx2x_phy *phy,
+				    u8 is_power_up) {
+	/* Make sure GPIOs are not using for LED mode */
+	u16 val;
+	/* In the GPIO register, bit 4 is use to determine if the GPIOs are
+	 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+	 * output
+	 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+	 * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
+	 * where the 1st bit is the over-current(only input), and 2nd bit is
+	 * for power( only output )
+	 *
+	 * In case of NOC feature is disabled and power is up, set GPIO control
+	 *  as input to enable listening of over-current indication
+	 */
+	if (phy->flags & FLAGS_NOC)
+		return;
+	if (is_power_up)
+		val = (1<<4);
+	else
+		/* Set GPIO control to OUTPUT, and set the power bit
+		 * to according to the is_power_up
+		 */
+		val = (1<<1);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_GPIO_CTRL,
+			 val);
+}
+
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+					struct bnx2x_phy *phy,
+					u16 edc_mode)
+{
+	u16 cur_limiting_mode;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2,
+			&cur_limiting_mode);
+	DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
+		 cur_limiting_mode);
+
+	if (edc_mode == EDC_MODE_LIMITING) {
+		DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_ROM_VER2,
+				 EDC_MODE_LIMITING);
+	} else { /* LRM mode ( default )*/
+
+		DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
+
+		/* Changing to LRM mode takes quite few seconds. So do it only
+		 * if current mode is limiting (default is LRM)
+		 */
+		if (cur_limiting_mode != EDC_MODE_LIMITING)
+			return 0;
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_LRM_MODE,
+				 0);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_ROM_VER2,
+				 0x128);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_MISC_CTRL0,
+				 0x4008);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_LRM_MODE,
+				 0xaaaa);
+	}
+	return 0;
+}
+
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+					struct bnx2x_phy *phy,
+					u16 edc_mode)
+{
+	u16 phy_identifier;
+	u16 rom_ver2_val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_PHY_IDENTIFIER,
+			&phy_identifier);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_PHY_IDENTIFIER,
+			 (phy_identifier & ~(1<<9)));
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2,
+			&rom_ver2_val);
+	/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_ROM_VER2,
+			 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_PHY_IDENTIFIER,
+			 (phy_identifier | (1<<9)));
+
+	return 0;
+}
+
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     u32 action)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	switch (action) {
+	case DISABLE_TX:
+		bnx2x_sfp_set_transmitter(params, phy, 0);
+		break;
+	case ENABLE_TX:
+		if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+			bnx2x_sfp_set_transmitter(params, phy, 1);
+		break;
+	case PHY_INIT:
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 (1<<2) | (1<<5));
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+				 0);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+		/* Make MOD_ABS give interrupt on change */
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+				&val);
+		val |= (1<<12);
+		if (phy->flags & FLAGS_NOC)
+			val |= (3<<5);
+		/* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+		 * status which reflect SFP+ module over-current
+		 */
+		if (!(phy->flags & FLAGS_NOC))
+			val &= 0xff8f; /* Reset bits 4-6 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+				 val);
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+		   action);
+		return;
+	}
+}
+
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
+					   u8 gpio_mode)
+{
+	struct bnx2x *bp = params->bp;
+
+	u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].sfp_ctrl)) &
+		PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+	switch (fault_led_gpio) {
+	case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+		return;
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+	{
+		u8 gpio_port = bnx2x_get_gpio_port(params);
+		u16 gpio_pin = fault_led_gpio -
+			PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+		DP(NETIF_MSG_LINK, "Set fault module-detected led "
+				   "pin %x port %x mode %x\n",
+			       gpio_pin, gpio_port, gpio_mode);
+		bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+	}
+	break;
+	default:
+		DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+			       fault_led_gpio);
+	}
+}
+
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+					  u8 gpio_mode)
+{
+	u32 pin_cfg;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region,
+				  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+		PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+		PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+	DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+		       gpio_mode, pin_cfg);
+	bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+					   u8 gpio_mode)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+	if (CHIP_IS_E3(bp)) {
+		/* Low ==> if SFP+ module is supported otherwise
+		 * High ==> if SFP+ module is not on the approved vendor list
+		 */
+		bnx2x_set_e3_module_fault_led(params, gpio_mode);
+	} else
+		bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
+				    struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	bnx2x_warpcore_power_module(params, 0);
+	/* Put Warpcore in low power mode */
+	REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+	/* Put LCPLL in low power mode */
+	REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+	REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+	REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+				   struct bnx2x_phy *phy,
+				   u8 power)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		bnx2x_8727_power_module(params->bp, phy, power);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		bnx2x_warpcore_power_module(params, power);
+		break;
+	default:
+		break;
+	}
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+					     struct bnx2x_phy *phy,
+					     u16 edc_mode)
+{
+	u16 val = 0;
+	u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+	struct bnx2x *bp = params->bp;
+
+	u8 lane = bnx2x_get_warpcore_lane(phy, params);
+	/* This is a global register which controls all lanes */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+	val &= ~(0xf << (lane << 2));
+
+	switch (edc_mode) {
+	case EDC_MODE_LINEAR:
+	case EDC_MODE_LIMITING:
+		mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+		break;
+	case EDC_MODE_PASSIVE_DAC:
+	case EDC_MODE_ACTIVE_DAC:
+		mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+		break;
+	default:
+		break;
+	}
+
+	val |= (mode << (lane << 2));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+	/* A must read */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+	/* Restart microcode to re-read the new mode */
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+				    struct bnx2x_phy *phy,
+				    u16 edc_mode)
+{
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+		break;
+	}
+}
+
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 edc_mode;
+	int rc = 0;
+
+	u32 val = REG_RD(bp, params->shmem_base +
+			     offsetof(struct shmem_region, dev_info.
+				     port_feature_config[params->port].config));
+	/* Enabled transmitter by default */
+	bnx2x_sfp_set_transmitter(params, phy, 1);
+	DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
+		 params->port);
+	/* Power up module */
+	bnx2x_power_sfp_module(params, phy, 1);
+	if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
+		DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
+		return -EINVAL;
+	} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
+		/* Check SFP+ module compatibility */
+		DP(NETIF_MSG_LINK, "Module verification failed!!\n");
+		rc = -EINVAL;
+		/* Turn on fault module-detected led */
+		bnx2x_set_sfp_module_fault_led(params,
+					       MISC_REGISTERS_GPIO_HIGH);
+
+		/* Check if need to power down the SFP+ module */
+		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+			DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
+			bnx2x_power_sfp_module(params, phy, 0);
+			return rc;
+		}
+	} else {
+		/* Turn off fault module-detected led */
+		bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
+	}
+
+	/* Check and set limiting mode / LRM mode on 8726. On 8727 it
+	 * is done automatically
+	 */
+	bnx2x_set_limiting_mode(params, phy, edc_mode);
+
+	/* Disable transmit for this module if the module is not approved, and
+	 * laser needs to be disabled.
+	 */
+	if ((rc) &&
+	    ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+	     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
+		bnx2x_sfp_set_transmitter(params, phy, 0);
+
+	return rc;
+}
+
+void bnx2x_handle_module_detect_int(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *phy;
+	u32 gpio_val;
+	u8 gpio_num, gpio_port;
+	if (CHIP_IS_E3(bp)) {
+		phy = &params->phy[INT_PHY];
+		/* Always enable TX laser,will be disabled in case of fault */
+		bnx2x_sfp_set_transmitter(params, phy, 1);
+	} else {
+		phy = &params->phy[EXT_PHY1];
+	}
+	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+				      params->port, &gpio_num, &gpio_port) ==
+	    -EINVAL) {
+		DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+		return;
+	}
+
+	/* Set valid module led off */
+	bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
+
+	/* Get current gpio val reflecting module plugged in / out*/
+	gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+	/* Call the handling function in case module is detected */
+	if (gpio_val == 0) {
+		bnx2x_set_mdio_emac_per_phy(bp, params);
+		bnx2x_set_aer_mmd(params, phy);
+
+		bnx2x_power_sfp_module(params, phy, 1);
+		bnx2x_set_gpio_int(bp, gpio_num,
+				   MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+				   gpio_port);
+		if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
+			bnx2x_sfp_module_detection(phy, params);
+			if (CHIP_IS_E3(bp)) {
+				u16 rx_tx_in_reset;
+				/* In case WC is out of reset, reconfigure the
+				 * link speed while taking into account 1G
+				 * module limitation.
+				 */
+				bnx2x_cl45_read(bp, phy,
+						MDIO_WC_DEVAD,
+						MDIO_WC_REG_DIGITAL5_MISC6,
+						&rx_tx_in_reset);
+				if ((!rx_tx_in_reset) &&
+				    (params->link_flags &
+				     PHY_INITIALIZED)) {
+					bnx2x_warpcore_reset_lane(bp, phy, 1);
+					bnx2x_warpcore_config_sfi(phy, params);
+					bnx2x_warpcore_reset_lane(bp, phy, 0);
+				}
+			}
+		} else {
+			DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+		}
+	} else {
+		bnx2x_set_gpio_int(bp, gpio_num,
+				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+				   gpio_port);
+		/* Module was plugged out.
+		 * Disable transmit for this module
+		 */
+		phy->media_type = ETH_PHY_NOT_PRESENT;
+	}
+}
+
+/******************************************************************/
+/*		Used by 8706 and 8727                             */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+				 struct bnx2x_phy *phy,
+				 u16 alarm_status_offset,
+				 u16 alarm_ctrl_offset)
+{
+	u16 alarm_status, val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, alarm_status_offset,
+			&alarm_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, alarm_status_offset,
+			&alarm_status);
+	/* Mask or enable the fault event. */
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+	if (alarm_status & (1<<0))
+		val &= ~(1<<0);
+	else
+		val |= (1<<0);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
+/*		common BCM8706/BCM8726 PHY SECTION		  */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+	u8 link_up = 0;
+	u16 val1, val2, rx_sd, pcs_status;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+	/* Clear RX Alarm*/
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+	bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+			     MDIO_PMA_LASI_TXCTRL);
+
+	/* Clear LASI indication*/
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+	DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+	DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+			" link_status 0x%x\n", rx_sd, pcs_status, val2);
+	/* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+	 * are set, or if the autoneg bit 1 is set
+	 */
+	link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+	if (link_up) {
+		if (val2 & (1<<1))
+			vars->line_speed = SPEED_1000;
+		else
+			vars->line_speed = SPEED_10000;
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+	}
+
+	/* Capture 10G link fault. Read twice to clear stale value. */
+	if (vars->line_speed == SPEED_10000) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+		if (val1 & (1<<0))
+			vars->fault_detected = 1;
+	}
+
+	return link_up;
+}
+
+/******************************************************************/
+/*			BCM8706 PHY SECTION			  */
+/******************************************************************/
+static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	u32 tx_en_mode;
+	u16 cnt, val, tmp1;
+	struct bnx2x *bp = params->bp;
+
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/* Wait until fw is loaded */
+	for (cnt = 0; cnt < 100; cnt++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+		if (val)
+			break;
+		usleep_range(10000, 20000);
+	}
+	DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+	if ((params->feature_config_flags &
+	     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+		u8 i;
+		u16 reg;
+		for (i = 0; i < 4; i++) {
+			reg = MDIO_XS_8706_REG_BANK_RX0 +
+				i*(MDIO_XS_8706_REG_BANK_RX1 -
+				   MDIO_XS_8706_REG_BANK_RX0);
+			bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+			/* Clear first 3 bits of the control */
+			val &= ~0x7;
+			/* Set control bits according to configuration */
+			val |= (phy->rx_preemphasis[i] & 0x7);
+			DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+				   " reg 0x%x <-- val 0x%x\n", reg, val);
+			bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+		}
+	}
+	/* Force speed */
+	if (phy->req_line_speed == SPEED_10000) {
+		DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+				 0);
+		/* Arm LASI for link and Tx fault. */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
+	} else {
+		/* Force 1Gbps using autoneg with 1G advertisement */
+
+		/* Allow CL37 through CL73 */
+		DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+		/* Enable Full-Duplex advertisement on CL37 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+		/* Enable CL37 AN */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+		/* 1G support */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+		/* Enable clause 73 AN */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 0x0400);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+				 0x0004);
+	}
+	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+	/* If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	 * power mode, if TX Laser is disabled
+	 */
+
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				dev_info.port_hw_config[params->port].sfp_ctrl))
+			& PORT_HW_CFG_TX_LASER_MASK;
+
+	if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+		DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+		bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+		tmp1 |= 0x1;
+		bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+	}
+
+	return 0;
+}
+
+static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	return bnx2x_8706_8726_read_status(phy, params, vars);
+}
+
+/******************************************************************/
+/*			BCM8726 PHY SECTION			  */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
+}
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	/* Need to wait 100ms after reset */
+	msleep(100);
+
+	/* Micro controller re-boot */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+
+	/* Set soft reset */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+	/* Wait for 150ms for microcode load */
+	msleep(150);
+
+	/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+
+	msleep(200);
+	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+}
+
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val1;
+	u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+	if (link_up) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+				&val1);
+		if (val1 & (1<<15)) {
+			DP(NETIF_MSG_LINK, "Tx is disabled\n");
+			link_up = 0;
+			vars->line_speed = 0;
+		}
+	}
+	return link_up;
+}
+
+
+static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_8726_external_rom_boot(phy, params);
+
+	/* Need to call module detected on initialization since the module
+	 * detection triggered by actual module insertion might occur before
+	 * driver is loaded, and when driver is loaded, it reset all
+	 * registers, including the transmitter
+	 */
+	bnx2x_sfp_module_detection(phy, params);
+
+	if (phy->req_line_speed == SPEED_1000) {
+		DP(NETIF_MSG_LINK, "Setting 1G force\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 0x400);
+	} else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+		   (phy->speed_cap_mask &
+		      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+		   ((phy->speed_cap_mask &
+		      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+		DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+		/* Set Flow control */
+		bnx2x_ext_phy_set_pause(params, phy, vars);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+		bnx2x_cl45_write(bp, phy,
+				MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+		/* Enable RX-ALARM control to receive interrupt for 1G speed
+		 * change
+		 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 0x400);
+
+	} else { /* Default 10G. Set only LASI control */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
+	}
+
+	/* Set TX PreEmphasis if needed */
+	if ((params->feature_config_flags &
+	     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+		DP(NETIF_MSG_LINK,
+		   "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+			 phy->tx_preemphasis[0],
+			 phy->tx_preemphasis[1]);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8726_TX_CTRL1,
+				 phy->tx_preemphasis[0]);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8726_TX_CTRL2,
+				 phy->tx_preemphasis[1]);
+	}
+
+	return 0;
+
+}
+
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+	/* Set serial boot control for external load */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
+
+/******************************************************************/
+/*			BCM8727 PHY SECTION			  */
+/******************************************************************/
+
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+				    struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 led_mode_bitmask = 0;
+	u16 gpio_pins_bitmask = 0;
+	u16 val;
+	/* Only NOC flavor requires to set the LED specifically */
+	if (!(phy->flags & FLAGS_NOC))
+		return;
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		led_mode_bitmask = 0;
+		gpio_pins_bitmask = 0x03;
+		break;
+	case LED_MODE_ON:
+		led_mode_bitmask = 0;
+		gpio_pins_bitmask = 0x02;
+		break;
+	case LED_MODE_OPER:
+		led_mode_bitmask = 0x60;
+		gpio_pins_bitmask = 0x11;
+		break;
+	}
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+			&val);
+	val &= 0xff8f;
+	val |= led_mode_bitmask;
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+			 val);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8727_GPIO_CTRL,
+			&val);
+	val &= 0xffe0;
+	val |= gpio_pins_bitmask;
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_GPIO_CTRL,
+			 val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+				struct link_params *params) {
+	u32 swap_val, swap_override;
+	u8 port;
+	/* The PHY reset is controlled by GPIO 1. Fake the port number
+	 * to cancel the swap done in set_gpio()
+	 */
+	struct bnx2x *bp = params->bp;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	port = (swap_val && swap_override) ^ 1;
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+}
+
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+				    struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 tmp1, val;
+	/* Set option 1G speed */
+	if ((phy->req_line_speed == SPEED_1000) ||
+	    (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+		DP(NETIF_MSG_LINK, "Setting 1G force\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+		DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+		/* Power down the XAUI until link is up in case of dual-media
+		 * and 1G
+		 */
+		if (DUAL_MEDIA(params)) {
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8727_PCS_GP, &val);
+			val |= (3<<10);
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8727_PCS_GP, val);
+		}
+	} else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+		   ((phy->speed_cap_mask &
+		     PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+		   ((phy->speed_cap_mask &
+		      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+		   PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+		DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+	} else {
+		/* Since the 8727 has only single reset pin, need to set the 10G
+		 * registers although it is default
+		 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+				 0x0020);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+				 0x0008);
+	}
+}
+
+static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	u32 tx_en_mode;
+	u16 tmp1, mod_abs, tmp2;
+	struct bnx2x *bp = params->bp;
+	/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+
+	bnx2x_8727_specific_func(phy, params, PHY_INIT);
+	/* Initially configure MOD_ABS to interrupt when module is
+	 * presence( bit 8)
+	 */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+	/* Set EDC off by setting OPTXLOS signal input to low (bit 9).
+	 * When the EDC is off it locks onto a reference clock and avoids
+	 * becoming 'lost'
+	 */
+	mod_abs &= ~(1<<8);
+	if (!(phy->flags & FLAGS_NOC))
+		mod_abs &= ~(1<<9);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+	/* Enable/Disable PHY transmitter output */
+	bnx2x_set_disable_pmd_transmit(params, phy, 0);
+
+	bnx2x_8727_power_module(bp, phy, 1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+	bnx2x_8727_config_speed(phy, params);
+
+
+	/* Set TX PreEmphasis if needed */
+	if ((params->feature_config_flags &
+	     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+		DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+			   phy->tx_preemphasis[0],
+			   phy->tx_preemphasis[1]);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+				 phy->tx_preemphasis[0]);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+				 phy->tx_preemphasis[1]);
+	}
+
+	/* If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	 * power mode, if TX Laser is disabled
+	 */
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				dev_info.port_hw_config[params->port].sfp_ctrl))
+			& PORT_HW_CFG_TX_LASER_MASK;
+
+	if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+		DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+		bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+		tmp2 |= 0x1000;
+		tmp2 &= 0xFFEF;
+		bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+				&tmp2);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+				 (tmp2 & 0x7fff));
+	}
+
+	return 0;
+}
+
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 mod_abs, rx_alarm_status;
+	u32 val = REG_RD(bp, params->shmem_base +
+			     offsetof(struct shmem_region, dev_info.
+				      port_feature_config[params->port].
+				      config));
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+	if (mod_abs & (1<<8)) {
+
+		/* Module is absent */
+		DP(NETIF_MSG_LINK,
+		   "MOD_ABS indication show module is absent\n");
+		phy->media_type = ETH_PHY_NOT_PRESENT;
+		/* 1. Set mod_abs to detect next module
+		 *    presence event
+		 * 2. Set EDC off by setting OPTXLOS signal input to low
+		 *    (bit 9).
+		 *    When the EDC is off it locks onto a reference clock and
+		 *    avoids becoming 'lost'.
+		 */
+		mod_abs &= ~(1<<8);
+		if (!(phy->flags & FLAGS_NOC))
+			mod_abs &= ~(1<<9);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+		/* Clear RX alarm since it stays up as long as
+		 * the mod_abs wasn't changed
+		 */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+	} else {
+		/* Module is present */
+		DP(NETIF_MSG_LINK,
+		   "MOD_ABS indication show module is present\n");
+		/* First disable transmitter, and if the module is ok, the
+		 * module_detection will enable it
+		 * 1. Set mod_abs to detect next module absent event ( bit 8)
+		 * 2. Restore the default polarity of the OPRXLOS signal and
+		 * this signal will then correctly indicate the presence or
+		 * absence of the Rx signal. (bit 9)
+		 */
+		mod_abs |= (1<<8);
+		if (!(phy->flags & FLAGS_NOC))
+			mod_abs |= (1<<9);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+		/* Clear RX alarm since it stays up as long as the mod_abs
+		 * wasn't changed. This is need to be done before calling the
+		 * module detection, otherwise it will clear* the link update
+		 * alarm
+		 */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+
+		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+		    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+			bnx2x_sfp_set_transmitter(params, phy, 0);
+
+		if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+			bnx2x_sfp_module_detection(phy, params);
+		else
+			DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+		/* Reconfigure link speed based on module type limitations */
+		bnx2x_8727_config_speed(phy, params);
+	}
+
+	DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
+		   rx_alarm_status);
+	/* No need to check link status in case of module plugged in/out */
+}
+
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_up = 0, oc_port = params->port;
+	u16 link_status = 0;
+	u16 rx_alarm_status, lasi_ctrl, val1;
+
+	/* If PHY is not initialized, do not check link status */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+			&lasi_ctrl);
+	if (!lasi_ctrl)
+		return 0;
+
+	/* Check the LASI on Rx */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
+			&rx_alarm_status);
+	vars->line_speed = 0;
+	DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS  0x%x\n", rx_alarm_status);
+
+	bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+			     MDIO_PMA_LASI_TXCTRL);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+	DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
+
+	/* Clear MSG-OUT */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+	/* If a module is present and there is need to check
+	 * for over current
+	 */
+	if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+		/* Check over-current using 8727 GPIO0 input*/
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+				&val1);
+
+		if ((val1 & (1<<8)) == 0) {
+			if (!CHIP_IS_E1x(bp))
+				oc_port = BP_PATH(bp) + (params->port << 1);
+			DP(NETIF_MSG_LINK,
+			   "8727 Power fault has been detected on port %d\n",
+			   oc_port);
+			netdev_err(bp->dev, "Error: Power fault on Port %d has "
+					    "been detected and the power to "
+					    "that SFP+ module has been removed "
+					    "to prevent failure of the card. "
+					    "Please remove the SFP+ module and "
+					    "restart the system to clear this "
+					    "error.\n",
+			 oc_port);
+			/* Disable all RX_ALARMs except for mod_abs */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_LASI_RXCTRL, (1<<5));
+
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+			/* Wait for module_absent_event */
+			val1 |= (1<<8);
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+			/* Clear RX alarm */
+			bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+			bnx2x_8727_power_module(params->bp, phy, 0);
+			return 0;
+		}
+	} /* Over current check */
+
+	/* When module absent bit is set, check module */
+	if (rx_alarm_status & (1<<5)) {
+		bnx2x_8727_handle_mod_abs(phy, params);
+		/* Enable all mod_abs and link detection bits */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 ((1<<5) | (1<<2)));
+	}
+
+	if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+		DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
+		bnx2x_sfp_set_transmitter(params, phy, 1);
+	} else {
+		DP(NETIF_MSG_LINK, "Tx is disabled\n");
+		return 0;
+	}
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
+
+	/* Bits 0..2 --> speed detected,
+	 * Bits 13..15--> link is down
+	 */
+	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_10000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+			   params->port);
+	} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_1000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+			   params->port);
+	} else {
+		link_up = 0;
+		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+			   params->port);
+	}
+
+	/* Capture 10G link fault. */
+	if (vars->line_speed == SPEED_10000) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+
+		if (val1 & (1<<0)) {
+			vars->fault_detected = 1;
+		}
+	}
+
+	if (link_up) {
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+		DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+	}
+
+	if ((DUAL_MEDIA(params)) &&
+	    (phy->req_line_speed == SPEED_1000)) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8727_PCS_GP, &val1);
+		/* In case of dual-media board and 1G, power up the XAUI side,
+		 * otherwise power it down. For 10G it is done automatically
+		 */
+		if (link_up)
+			val1 &= ~(3<<10);
+		else
+			val1 |= (3<<10);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8727_PCS_GP, val1);
+	}
+	return link_up;
+}
+
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+
+	/* Enable/Disable PHY transmitter output */
+	bnx2x_set_disable_pmd_transmit(params, phy, 1);
+
+	/* Disable Transmitter */
+	bnx2x_sfp_set_transmitter(params, phy, 0);
+	/* Clear LASI */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
+
+}
+
+/******************************************************************/
+/*		BCM8481/BCM84823/BCM84833 PHY SECTION	          */
+/******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+	return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+		(phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+		(phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+					    struct bnx2x *bp,
+					    u8 port)
+{
+	u16 val, fw_ver2, cnt, i;
+	static struct bnx2x_reg_set reg_set[] = {
+		{MDIO_PMA_DEVAD, 0xA819, 0x0014},
+		{MDIO_PMA_DEVAD, 0xA81A, 0xc200},
+		{MDIO_PMA_DEVAD, 0xA81B, 0x0000},
+		{MDIO_PMA_DEVAD, 0xA81C, 0x0300},
+		{MDIO_PMA_DEVAD, 0xA817, 0x0009}
+	};
+	u16 fw_ver1;
+
+	if (bnx2x_is_8483x_8485x(phy)) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+		bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
+				phy->ver_addr);
+	} else {
+		/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+		/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+		for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+			bnx2x_cl45_write(bp, phy, reg_set[i].devad,
+					 reg_set[i].reg, reg_set[i].val);
+
+		for (cnt = 0; cnt < 100; cnt++) {
+			bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+			if (val & 1)
+				break;
+			udelay(5);
+		}
+		if (cnt == 100) {
+			DP(NETIF_MSG_LINK, "Unable to read 848xx "
+					"phy fw version(1)\n");
+			bnx2x_save_spirom_version(bp, port, 0,
+						  phy->ver_addr);
+			return;
+		}
+
+
+		/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+		bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+		for (cnt = 0; cnt < 100; cnt++) {
+			bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+			if (val & 1)
+				break;
+			udelay(5);
+		}
+		if (cnt == 100) {
+			DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+					"version(2)\n");
+			bnx2x_save_spirom_version(bp, port, 0,
+						  phy->ver_addr);
+			return;
+		}
+
+		/* lower 16 bits of the register SPI_FW_STATUS */
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+		/* upper 16 bits of register SPI_FW_STATUS */
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+		bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
+					  phy->ver_addr);
+	}
+
+}
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+				struct bnx2x_phy *phy)
+{
+	u16 val, offset, i;
+	static struct bnx2x_reg_set reg_set[] = {
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
+		{MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+			MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+		{MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
+	};
+	/* PHYC_CTL_LED_CTL */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+	val &= 0xFE00;
+	val |= 0x0092;
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+	for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+				 reg_set[i].val);
+
+	if (bnx2x_is_8483x_8485x(phy))
+		offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+	else
+		offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+
+	/* stretch_en for LED3*/
+	bnx2x_cl45_read_or_write(bp, phy,
+				 MDIO_PMA_DEVAD, offset,
+				 MDIO_PMA_REG_84823_LED3_STRETCH_EN);
+}
+
+static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      u32 action)
+{
+	struct bnx2x *bp = params->bp;
+	switch (action) {
+	case PHY_INIT:
+		if (!bnx2x_is_8483x_8485x(phy)) {
+			/* Save spirom version */
+			bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+		}
+		/* This phy uses the NIG latch mechanism since link indication
+		 * arrives through its LED4 and not via its LASI signal, so we
+		 * get steady signal instead of clear on read
+		 */
+		bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+			      1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+		bnx2x_848xx_set_led(bp, phy);
+		break;
+	}
+}
+
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 autoneg_val, an_1000_val, an_10_100_val;
+
+	bnx2x_848xx_specific_func(phy, params, PHY_INIT);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+	/* set 1000 speed advertisement */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+			&an_1000_val);
+
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD,
+			MDIO_AN_REG_8481_LEGACY_AN_ADV,
+			&an_10_100_val);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+			&autoneg_val);
+	/* Disable forced speed */
+	autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+	an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask &
+	     PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (phy->req_line_speed == SPEED_1000)) {
+		an_1000_val |= (1<<8);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_1000_val |= (1<<9);
+		DP(NETIF_MSG_LINK, "Advertising 1G\n");
+	} else
+		an_1000_val &= ~((1<<8) | (1<<9));
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+			 an_1000_val);
+
+	/* Set 10/100 speed advertisement */
+	if (phy->req_line_speed == SPEED_AUTO_NEG) {
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+			/* Enable autoneg and restart autoneg for legacy speeds
+			 */
+			autoneg_val |= (1<<9 | 1<<12);
+			an_10_100_val |= (1<<8);
+			DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+		}
+
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+			/* Enable autoneg and restart autoneg for legacy speeds
+			 */
+			autoneg_val |= (1<<9 | 1<<12);
+			an_10_100_val |= (1<<7);
+			DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+		}
+
+		if ((phy->speed_cap_mask &
+		     PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+		    (phy->supported & SUPPORTED_10baseT_Full)) {
+			an_10_100_val |= (1<<6);
+			autoneg_val |= (1<<9 | 1<<12);
+			DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+		}
+
+		if ((phy->speed_cap_mask &
+		     PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+		    (phy->supported & SUPPORTED_10baseT_Half)) {
+			an_10_100_val |= (1<<5);
+			autoneg_val |= (1<<9 | 1<<12);
+			DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+		}
+	}
+
+	/* Only 10/100 are allowed to work in FORCE mode */
+	if ((phy->req_line_speed == SPEED_100) &&
+	    (phy->supported &
+	     (SUPPORTED_100baseT_Half |
+	      SUPPORTED_100baseT_Full))) {
+		autoneg_val |= (1<<13);
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+				 (1<<15 | 1<<9 | 7<<0));
+		/* The PHY needs this set even for forced link. */
+		an_10_100_val |= (1<<8) | (1<<7);
+		DP(NETIF_MSG_LINK, "Setting 100M force\n");
+	}
+	if ((phy->req_line_speed == SPEED_10) &&
+	    (phy->supported &
+	     (SUPPORTED_10baseT_Half |
+	      SUPPORTED_10baseT_Full))) {
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+				 (1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 10M force\n");
+	}
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+			 an_10_100_val);
+
+	if (phy->req_duplex == DUPLEX_FULL)
+		autoneg_val |= (1<<8);
+
+	/* Always write this if this is not 84833/4.
+	 * For 84833/4, write it only when it's a forced speed.
+	 */
+	if (!bnx2x_is_8483x_8485x(phy) ||
+	    ((autoneg_val & (1<<12)) == 0))
+		bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD,
+			 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	    (phy->speed_cap_mask &
+	     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+		(phy->req_line_speed == SPEED_10000)) {
+			DP(NETIF_MSG_LINK, "Advertising 10G\n");
+			/* Restart autoneg for 10G*/
+
+			bnx2x_cl45_read_or_write(
+				bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+				0x1000);
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+					 0x3200);
+	} else
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+				 1);
+
+	return 0;
+}
+
+static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+	return bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
+
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params,
+				u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	int idx;
+	u16 val;
+	struct bnx2x *bp = params->bp;
+
+	/* Step 1: Poll the STATUS register to see whether the previous command
+	 * is in progress or the system is busy (CMD_IN_PROGRESS or
+	 * SYSTEM_BUSY). If previous command is in progress or system is busy,
+	 * check again until the previous command finishes execution and the
+	 * system is available for taking command
+	 */
+
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+		    (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if (idx >= PHY848xx_CMDHDLR_WAIT) {
+		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+		return -EINVAL;
+	}
+
+	/* Step2: If any parameters are required for the function, write them
+	 * to the required DATA registers
+	 */
+
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				 MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				 cmd_args[idx]);
+	}
+
+	/* Step3: When the firmware is ready for commands, write the 'Command
+	 * code' to the CMD register
+	 */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+	/* Step4: Once the command has been written, poll the STATUS register
+	 * to check whether the command has completed (CMD_COMPLETED_PASS/
+	 * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+	 */
+
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+		    (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+	    (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+		return -EINVAL;
+	}
+	/* Step5: Once the command has completed, read the specficied DATA
+	 * registers for any saved results for the command, if applicable
+	 */
+
+	/* Gather returning data */
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				&cmd_args[idx]);
+	}
+
+	return 0;
+}
+
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params, u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	int idx;
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_848xx_CMD_HDLR_STATUS,
+			PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
+			break;
+		usleep_range(1000, 2000);
+	}
+	if (idx >= PHY848xx_CMDHDLR_WAIT) {
+		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+		return -EINVAL;
+	}
+
+	/* Prepare argument(s) and issue command */
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				cmd_args[idx]);
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+		    (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+	    (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+		return -EINVAL;
+	}
+	/* Gather returning data */
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				&cmd_args[idx]);
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_848xx_CMD_HDLR_STATUS,
+			PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+	return 0;
+}
+
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params,
+				u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	struct bnx2x *bp = params->bp;
+
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+	    (REG_RD(bp, params->shmem2_base +
+		    offsetof(struct shmem2_region,
+			     link_attr_sync[params->port])) &
+	     LINK_ATTR_84858)) {
+		return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+					    argc);
+	} else {
+		return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+					    argc);
+	}
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     struct link_vars *vars)
+{
+	u32 pair_swap;
+	u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
+	int status;
+	struct bnx2x *bp = params->bp;
+
+	/* Check for configuration. */
+	pair_swap = REG_RD(bp, params->shmem_base +
+			   offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+		PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+	if (pair_swap == 0)
+		return 0;
+
+	/* Only the second argument is used for this command */
+	data[1] = (u16)pair_swap;
+
+	status = bnx2x_848xx_cmd_hdlr(phy, params,
+				      PHY848xx_CMD_SET_PAIR_SWAP, data,
+				      PHY848xx_CMDHDLR_MAX_ARGS);
+	if (status == 0)
+		DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+	return status;
+}
+
+static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 chip_id)
+{
+	u32 reset_pin[2];
+	u32 idx;
+	u8 reset_gpios;
+	if (CHIP_IS_E3(bp)) {
+		/* Assume that these will be GPIOs, not EPIOs. */
+		for (idx = 0; idx < 2; idx++) {
+			/* Map config param to register bit. */
+			reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+			reset_pin[idx] = (reset_pin[idx] &
+				PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+				PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+			reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+			reset_pin[idx] = (1 << reset_pin[idx]);
+		}
+		reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+	} else {
+		/* E2, look from diff place of shmem. */
+		for (idx = 0; idx < 2; idx++) {
+			reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[0].default_cfg));
+			reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+			reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+			reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+			reset_pin[idx] = (1 << reset_pin[idx]);
+		}
+		reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+	}
+
+	return reset_gpios;
+}
+
+static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+				struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 reset_gpios;
+	u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
+				offsetof(struct shmem2_region,
+				other_shmem_base_addr));
+
+	u32 shmem_base_path[2];
+
+	/* Work around for 84833 LED failure inside RESET status */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+		MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+		MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+		MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
+		MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
+
+	shmem_base_path[0] = params->shmem_base;
+	shmem_base_path[1] = other_shmem_base_addr;
+
+	reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
+						  params->chip_id);
+
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+	udelay(10);
+	DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+		reset_gpios);
+
+	return 0;
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	int rc;
+	struct bnx2x *bp = params->bp;
+	u16 cmd_args = 0;
+
+	DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+	/* Prevent Phy from working in EEE and advertising it */
+	rc = bnx2x_848xx_cmd_hdlr(phy, params,
+				  PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
+	if (rc) {
+		DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+		return rc;
+	}
+
+	return bnx2x_eee_disable(phy, params, vars);
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	int rc;
+	struct bnx2x *bp = params->bp;
+	u16 cmd_args = 1;
+
+	rc = bnx2x_848xx_cmd_hdlr(phy, params,
+				  PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
+	if (rc) {
+		DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+		return rc;
+	}
+
+	return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port, initialize = 1;
+	u16 val;
+	u32 actual_phy_selection;
+	u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
+	int rc = 0;
+
+	usleep_range(1000, 2000);
+
+	if (!(CHIP_IS_E1x(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+			       port);
+	} else {
+		/* MDIO reset */
+		bnx2x_cl45_write(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, 0x8000);
+	}
+
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/* Wait for GPHY to come out of reset */
+	msleep(50);
+	if (!bnx2x_is_8483x_8485x(phy)) {
+		/* BCM84823 requires that XGXS links up first @ 10G for normal
+		 * behavior.
+		 */
+		u16 temp;
+		temp = vars->line_speed;
+		vars->line_speed = SPEED_10000;
+		bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+		bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+		vars->line_speed = temp;
+	}
+	/* Check if this is actually BCM84858 */
+	if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+		u16 hw_rev;
+
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+		if (hw_rev == BCM84858_PHY_ID) {
+			params->link_attr_sync |= LINK_ATTR_84858;
+			bnx2x_update_link_attr(params, params->link_attr_sync);
+		}
+	}
+
+	/* Set dual-media configuration according to configuration */
+	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_CTL_REG_84823_MEDIA, &val);
+	val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+		 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+		 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+		 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+		 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+
+	if (CHIP_IS_E3(bp)) {
+		val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+			 MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+	} else {
+		val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+			MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+	}
+
+	actual_phy_selection = bnx2x_phy_selection(params);
+
+	switch (actual_phy_selection) {
+	case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+		/* Do nothing. Essentially this is like the priority copper */
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		/* Do nothing here. The first PHY won't be initialized at all */
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+		initialize = 0;
+		break;
+	}
+	if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+		val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			 MDIO_CTL_REG_84823_MEDIA, val);
+	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+		   params->multi_phy_config, val);
+
+	if (bnx2x_is_8483x_8485x(phy)) {
+		bnx2x_848xx_pair_swap_cfg(phy, params, vars);
+
+		/* Keep AutogrEEEn disabled. */
+		cmd_args[0] = 0x0;
+		cmd_args[1] = 0x0;
+		cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+		cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+		rc = bnx2x_848xx_cmd_hdlr(phy, params,
+					  PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+					  PHY848xx_CMDHDLR_MAX_ARGS);
+		if (rc)
+			DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+	}
+	if (initialize)
+		rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+	else
+		bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+	/* 84833 PHY has a better feature and doesn't need to support this. */
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		u32 cms_enable = REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].default_cfg)) &
+			PORT_HW_CFG_ENABLE_CMS_MASK;
+
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+		if (cms_enable)
+			val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+		else
+			val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+	}
+
+	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_FW_REV, &val);
+
+	/* Configure EEE support */
+	if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+	    (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+	    bnx2x_eee_has_cap(params)) {
+		rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
+		if (rc) {
+			DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+			bnx2x_8483x_disable_eee(phy, params, vars);
+			return rc;
+		}
+
+		if ((phy->req_duplex == DUPLEX_FULL) &&
+		    (params->eee_mode & EEE_MODE_ADV_LPI) &&
+		    (bnx2x_eee_calc_timer(params) ||
+		     !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+			rc = bnx2x_8483x_enable_eee(phy, params, vars);
+		else
+			rc = bnx2x_8483x_disable_eee(phy, params, vars);
+		if (rc) {
+			DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
+			return rc;
+		}
+	} else {
+		vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+	}
+
+	if (bnx2x_is_8483x_8485x(phy)) {
+		/* Bring PHY out of super isolate mode as the final step. */
+		bnx2x_cl45_read_and_write(bp, phy,
+					  MDIO_CTL_DEVAD,
+					  MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+					  (u16)~MDIO_84833_SUPER_ISOLATE);
+	}
+	return rc;
+}
+
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val, val1, val2;
+	u8 link_up = 0;
+
+
+	/* Check 10G-BaseT link status */
+	/* Check PMD signal ok */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, 0xFFFA, &val1);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+			&val2);
+	DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+	/* Check link 10G */
+	if (val2 & (1<<11)) {
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		link_up = 1;
+		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+	} else { /* Check Legacy speed link */
+		u16 legacy_status, legacy_speed;
+
+		/* Enable expansion register 0x42 (Operation mode status) */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+		/* Get legacy speed operation status */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+				&legacy_status);
+
+		DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
+		   legacy_status);
+		link_up = ((legacy_status & (1<<11)) == (1<<11));
+		legacy_speed = (legacy_status & (3<<9));
+		if (legacy_speed == (0<<9))
+			vars->line_speed = SPEED_10;
+		else if (legacy_speed == (1<<9))
+			vars->line_speed = SPEED_100;
+		else if (legacy_speed == (2<<9))
+			vars->line_speed = SPEED_1000;
+		else { /* Should not happen: Treat as link down */
+			vars->line_speed = 0;
+			link_up = 0;
+		}
+
+		if (link_up) {
+			if (legacy_status & (1<<8))
+				vars->duplex = DUPLEX_FULL;
+			else
+				vars->duplex = DUPLEX_HALF;
+
+			DP(NETIF_MSG_LINK,
+			   "Link is up in %dMbps, is_duplex_full= %d\n",
+			   vars->line_speed,
+			   (vars->duplex == DUPLEX_FULL));
+			/* Check legacy speed AN resolution */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+					&val);
+			if (val & (1<<5))
+				vars->link_status |=
+					LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+					&val);
+			if ((val & (1<<0)) == 0)
+				vars->link_status |=
+					LINK_STATUS_PARALLEL_DETECTION_USED;
+		}
+	}
+	if (link_up) {
+		DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
+			   vars->line_speed);
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+		/* Read LP advertised speeds */
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_CL37_FC_LP, &val);
+		if (val & (1<<5))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+		if (val & (1<<6))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+		if (val & (1<<7))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+		if (val & (1<<8))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+		if (val & (1<<9))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_1000T_STATUS, &val);
+
+		if (val & (1<<10))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+		if (val & (1<<11))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_MASTER_STATUS, &val);
+
+		if (val & (1<<11))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+		/* Determine if EEE was negotiated */
+		if (bnx2x_is_8483x_8485x(phy))
+			bnx2x_eee_an_resolve(phy, params, vars);
+	}
+
+	return link_up;
+}
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+	int status = 0;
+	u32 spirom_ver;
+	spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+	status = bnx2x_format_ver(spirom_ver, str, len);
+	return status;
+}
+
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+				struct link_params *params)
+{
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
+
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	bnx2x_cl45_write(params->bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+	bnx2x_cl45_write(params->bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
+
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+				   struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u16 val16;
+
+	if (!(CHIP_IS_E1x(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+			       port);
+	} else {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+		val16 |= MDIO_84833_SUPER_ISOLATE;
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_CTL_DEVAD,
+				 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
+	}
+}
+
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+				     struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u8 port;
+
+	if (!(CHIP_IS_E1x(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
+
+	switch (mode) {
+	case LED_MODE_OFF:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED1_MASK,
+					0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED2_MASK,
+					0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED3_MASK,
+					0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED5_MASK,
+					0x0);
+
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+		}
+		break;
+	case LED_MODE_FRONT_PANEL_OFF:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+		   port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x20);
+
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+			if (phy->type ==
+			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+				/* Disable MI_INT interrupt before setting LED4
+				 * source to constant off.
+				 */
+				if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+					   params->port*4) &
+				    NIG_MASK_MI_INT) {
+					params->link_flags |=
+					LINK_FLAGS_INT_DISABLED;
+
+					bnx2x_bits_dis(
+						bp,
+						NIG_REG_MASK_INTERRUPT_PORT0 +
+						params->port*4,
+						NIG_MASK_MI_INT);
+				}
+				bnx2x_cl45_write(bp, phy,
+						 MDIO_PMA_DEVAD,
+						 MDIO_PMA_REG_8481_SIGNAL_MASK,
+						 0x0);
+			}
+		}
+		break;
+	case LED_MODE_ON:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+			/* Set control reg */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+			val &= 0x8000;
+			val |= 0x2492;
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x20);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x20);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x0);
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x20);
+			if (phy->type ==
+			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+				/* Disable MI_INT interrupt before setting LED4
+				 * source to constant on.
+				 */
+				if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+					   params->port*4) &
+				    NIG_MASK_MI_INT) {
+					params->link_flags |=
+					LINK_FLAGS_INT_DISABLED;
+
+					bnx2x_bits_dis(
+						bp,
+						NIG_REG_MASK_INTERRUPT_PORT0 +
+						params->port*4,
+						NIG_MASK_MI_INT);
+				}
+				bnx2x_cl45_write(bp, phy,
+						 MDIO_PMA_DEVAD,
+						 MDIO_PMA_REG_8481_SIGNAL_MASK,
+						 0x20);
+			}
+		}
+		break;
+
+	case LED_MODE_OPER:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+
+			/* Set control reg */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+
+			if (!((val &
+			       MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+			  >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+				DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
+				bnx2x_cl45_write(bp, phy,
+						 MDIO_PMA_DEVAD,
+						 MDIO_PMA_REG_8481_LINK_SIGNAL,
+						 0xa492);
+			}
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x10);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x80);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x98);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x40);
+
+		} else {
+			/* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+			 * sources are all wired through LED1, rather than only
+			 * 10G in other modes.
+			 */
+			val = ((params->hw_led_mode <<
+				SHARED_HW_CFG_LED_MODE_SHIFT) ==
+			       SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 val);
+
+			/* Tell LED3 to blink on source */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+			val &= ~(7<<6);
+			val |= (1<<6); /* A83B[8:6]= 1 */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
+			if (phy->type ==
+			    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+				/* Restore LED4 source to external link,
+				 * and re-enable interrupts.
+				 */
+				bnx2x_cl45_write(bp, phy,
+						 MDIO_PMA_DEVAD,
+						 MDIO_PMA_REG_8481_SIGNAL_MASK,
+						 0x40);
+				if (params->link_flags &
+				    LINK_FLAGS_INT_DISABLED) {
+					bnx2x_link_int_enable(params);
+					params->link_flags &=
+						~LINK_FLAGS_INT_DISABLED;
+				}
+			}
+		}
+		break;
+	}
+
+	/* This is a workaround for E3+84833 until autoneg
+	 * restart is fixed in f/w
+	 */
+	if (CHIP_IS_E3(bp)) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+	}
+}
+
+/******************************************************************/
+/*			54618SE PHY SECTION			  */
+/******************************************************************/
+static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+					struct link_params *params,
+					u32 action)
+{
+	struct bnx2x *bp = params->bp;
+	u16 temp;
+	switch (action) {
+	case PHY_INIT:
+		/* Configure LED4: set to INTR (0x6). */
+		/* Accessing shadow register 0xe. */
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_GPHY_SHADOW,
+				 MDIO_REG_GPHY_SHADOW_LED_SEL2);
+		bnx2x_cl22_read(bp, phy,
+				MDIO_REG_GPHY_SHADOW,
+				&temp);
+		temp &= ~(0xf << 4);
+		temp |= (0x6 << 4);
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_GPHY_SHADOW,
+				 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+		/* Configure INTR based on link status change. */
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_INTR_MASK,
+				 ~MDIO_REG_INTR_MASK_LINK_STATUS);
+		break;
+	}
+}
+
+static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+					       struct link_params *params,
+					       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+	u32 cfg_pin;
+
+	DP(NETIF_MSG_LINK, "54618SE cfg init\n");
+	usleep_range(1000, 2000);
+
+	/* This works with E3 only, no need to check the chip
+	 * before determining the port.
+	 */
+	port = params->port;
+
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+			PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+			PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+	/* Drive pin high to bring the GPHY out of reset. */
+	bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+	/* wait for GPHY to reset */
+	msleep(50);
+
+	/* reset phy */
+	bnx2x_cl22_write(bp, phy,
+			 MDIO_PMA_REG_CTRL, 0x8000);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/* Wait for GPHY to reset */
+	msleep(50);
+
+
+	bnx2x_54618se_specific_func(phy, params, PHY_INIT);
+	/* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			&temp);
+	temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+	/* Set up fc */
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	fc_val = 0;
+	if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+		fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+	if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+		fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+	/* Read all advertisement */
+	bnx2x_cl22_read(bp, phy,
+			0x09,
+			&an_1000_val);
+
+	bnx2x_cl22_read(bp, phy,
+			0x04,
+			&an_10_100_val);
+
+	bnx2x_cl22_read(bp, phy,
+			MDIO_PMA_REG_CTRL,
+			&autoneg_val);
+
+	/* Disable forced speed */
+	autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+	an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+			   (1<<11));
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask &
+	      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (phy->req_line_speed == SPEED_1000)) {
+		an_1000_val |= (1<<8);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_1000_val |= (1<<9);
+		DP(NETIF_MSG_LINK, "Advertising 1G\n");
+	} else
+		an_1000_val &= ~((1<<8) | (1<<9));
+
+	bnx2x_cl22_write(bp, phy,
+			0x09,
+			an_1000_val);
+	bnx2x_cl22_read(bp, phy,
+			0x09,
+			&an_1000_val);
+
+	/* Advertise 10/100 link speed */
+	if (phy->req_line_speed == SPEED_AUTO_NEG) {
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+			an_10_100_val |= (1<<5);
+			autoneg_val |= (1<<9 | 1<<12);
+			DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+		}
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
+			an_10_100_val |= (1<<6);
+			autoneg_val |= (1<<9 | 1<<12);
+			DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+		}
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+			an_10_100_val |= (1<<7);
+			autoneg_val |= (1<<9 | 1<<12);
+			DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+		}
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+			an_10_100_val |= (1<<8);
+			autoneg_val |= (1<<9 | 1<<12);
+			DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+		}
+	}
+
+	/* Only 10/100 are allowed to work in FORCE mode */
+	if (phy->req_line_speed == SPEED_100) {
+		autoneg_val |= (1<<13);
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl22_write(bp, phy,
+				0x18,
+				(1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 100M force\n");
+	}
+	if (phy->req_line_speed == SPEED_10) {
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl22_write(bp, phy,
+				0x18,
+				(1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 10M force\n");
+	}
+
+	if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
+		int rc;
+
+		bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
+				 MDIO_REG_GPHY_EXP_ACCESS_TOP |
+				 MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+		bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+		temp &= 0xfffe;
+		bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+		rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+		if (rc) {
+			DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+			bnx2x_eee_disable(phy, params, vars);
+		} else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
+			   (phy->req_duplex == DUPLEX_FULL) &&
+			   (bnx2x_eee_calc_timer(params) ||
+			    !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
+			/* Need to advertise EEE only when requested,
+			 * and either no LPI assertion was requested,
+			 * or it was requested and a valid timer was set.
+			 * Also notice full duplex is required for EEE.
+			 */
+			bnx2x_eee_advertise(phy, params, vars,
+					    SHMEM_EEE_1G_ADV);
+		} else {
+			DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
+			bnx2x_eee_disable(phy, params, vars);
+		}
+	} else {
+		vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+				    SHMEM_EEE_SUPPORTED_SHIFT;
+
+		if (phy->flags & FLAGS_EEE) {
+			/* Handle legacy auto-grEEEn */
+			if (params->feature_config_flags &
+			    FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+				temp = 6;
+				DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+			} else {
+				temp = 0;
+				DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
+			}
+			bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+					 MDIO_AN_REG_EEE_ADV, temp);
+		}
+	}
+
+	bnx2x_cl22_write(bp, phy,
+			0x04,
+			an_10_100_val | fc_val);
+
+	if (phy->req_duplex == DUPLEX_FULL)
+		autoneg_val |= (1<<8);
+
+	bnx2x_cl22_write(bp, phy,
+			MDIO_PMA_REG_CTRL, autoneg_val);
+
+	return 0;
+}
+
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+				       struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 temp;
+
+	bnx2x_cl22_write(bp, phy,
+		MDIO_REG_GPHY_SHADOW,
+		MDIO_REG_GPHY_SHADOW_LED_SEL1);
+	bnx2x_cl22_read(bp, phy,
+		MDIO_REG_GPHY_SHADOW,
+		&temp);
+	temp &= 0xff00;
+
+	DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		temp |= 0x00ee;
+		break;
+	case LED_MODE_OPER:
+		temp |= 0x0001;
+		break;
+	case LED_MODE_ON:
+		temp |= 0x00ff;
+		break;
+	default:
+		break;
+	}
+	bnx2x_cl22_write(bp, phy,
+		MDIO_REG_GPHY_SHADOW,
+		MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+	return;
+}
+
+
+static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
+				     struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port;
+
+	/* In case of no EPIO routed to reset the GPHY, put it
+	 * in low power mode.
+	 */
+	bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+	/* This works with E3 only, no need to check the chip
+	 * before determining the port.
+	 */
+	port = params->port;
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+			PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+			PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+	/* Drive pin low to put GPHY in reset. */
+	bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
+				    struct link_params *params,
+				    struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u8 link_up = 0;
+	u16 legacy_status, legacy_speed;
+
+	/* Get speed operation status */
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_GPHY_AUX_STATUS,
+			&legacy_status);
+	DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
+
+	/* Read status to clear the PHY interrupt. */
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_INTR_STATUS,
+			&val);
+
+	link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+	if (link_up) {
+		legacy_speed = (legacy_status & (7<<8));
+		if (legacy_speed == (7<<8)) {
+			vars->line_speed = SPEED_1000;
+			vars->duplex = DUPLEX_FULL;
+		} else if (legacy_speed == (6<<8)) {
+			vars->line_speed = SPEED_1000;
+			vars->duplex = DUPLEX_HALF;
+		} else if (legacy_speed == (5<<8)) {
+			vars->line_speed = SPEED_100;
+			vars->duplex = DUPLEX_FULL;
+		}
+		/* Omitting 100Base-T4 for now */
+		else if (legacy_speed == (3<<8)) {
+			vars->line_speed = SPEED_100;
+			vars->duplex = DUPLEX_HALF;
+		} else if (legacy_speed == (2<<8)) {
+			vars->line_speed = SPEED_10;
+			vars->duplex = DUPLEX_FULL;
+		} else if (legacy_speed == (1<<8)) {
+			vars->line_speed = SPEED_10;
+			vars->duplex = DUPLEX_HALF;
+		} else /* Should not happen */
+			vars->line_speed = 0;
+
+		DP(NETIF_MSG_LINK,
+		   "Link is up in %dMbps, is_duplex_full= %d\n",
+		   vars->line_speed,
+		   (vars->duplex == DUPLEX_FULL));
+
+		/* Check legacy speed AN resolution */
+		bnx2x_cl22_read(bp, phy,
+				0x01,
+				&val);
+		if (val & (1<<5))
+			vars->link_status |=
+				LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+		bnx2x_cl22_read(bp, phy,
+				0x06,
+				&val);
+		if ((val & (1<<0)) == 0)
+			vars->link_status |=
+				LINK_STATUS_PARALLEL_DETECTION_USED;
+
+		DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
+			   vars->line_speed);
+
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+		if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+			/* Report LP advertised speeds */
+			bnx2x_cl22_read(bp, phy, 0x5, &val);
+
+			if (val & (1<<5))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+			if (val & (1<<6))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+			if (val & (1<<7))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+			if (val & (1<<8))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+			if (val & (1<<9))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+			bnx2x_cl22_read(bp, phy, 0xa, &val);
+			if (val & (1<<10))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+			if (val & (1<<11))
+				vars->link_status |=
+				  LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+			if ((phy->flags & FLAGS_EEE) &&
+			    bnx2x_eee_has_cap(params))
+				bnx2x_eee_an_resolve(phy, params, vars);
+		}
+	}
+	return link_up;
+}
+
+static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
+					  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+	DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
+
+	/* Enable master/slave manual mmode and set to master */
+	/* mii write 9 [bits set 11 12] */
+	bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+	/* forced 1G and disable autoneg */
+	/* set val [mii read 0] */
+	/* set val [expr $val & [bits clear 6 12 13]] */
+	/* set val [expr $val | [bits set 6 8]] */
+	/* mii write 0 $val */
+	bnx2x_cl22_read(bp, phy, 0x00, &val);
+	val &= ~((1<<6) | (1<<12) | (1<<13));
+	val |= (1<<6) | (1<<8);
+	bnx2x_cl22_write(bp, phy, 0x00, val);
+
+	/* Set external loopback and Tx using 6dB coding */
+	/* mii write 0x18 7 */
+	/* set val [mii read 0x18] */
+	/* mii write 0x18 [expr $val | [bits set 10 15]] */
+	bnx2x_cl22_write(bp, phy, 0x18, 7);
+	bnx2x_cl22_read(bp, phy, 0x18, &val);
+	bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+	/* This register opens the gate for the UMAC despite its name */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+	/* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	 * length used by the MAC receive logic to check frames.
+	 */
+	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
+/******************************************************************/
+/*			SFX7101 PHY SECTION			  */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	/* SFX7101_XGXS_TEST1 */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
+
+static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	u16 fw_ver1, fw_ver2, val;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
+
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+	DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+	/* Restart autoneg */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+	val |= 0x200;
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+	/* Save spirom version */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+	bnx2x_save_spirom_version(bp, params->port,
+				  (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+	return 0;
+}
+
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_up;
+	u16 val1, val2;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+	DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+		   val2, val1);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+		   val2, val1);
+	link_up = ((val1 & 4) == 4);
+	/* If link is up print the AN outcome of the SFX7101 PHY */
+	if (link_up) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+				&val2);
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+			   val2, (val2 & (1<<14)));
+		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+		/* Read LP advertised speeds */
+		if (val2 & (1<<11))
+			vars->link_status |=
+				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+	}
+	return link_up;
+}
+
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+	if (*len < 5)
+		return -EINVAL;
+	str[0] = (spirom_ver & 0xFF);
+	str[1] = (spirom_ver & 0xFF00) >> 8;
+	str[2] = (spirom_ver & 0xFF0000) >> 16;
+	str[3] = (spirom_ver & 0xFF000000) >> 24;
+	str[4] = '\0';
+	*len -= 5;
+	return 0;
+}
+
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	u16 val, cnt;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_7101_RESET, &val);
+
+	for (cnt = 0; cnt < 10; cnt++) {
+		msleep(50);
+		/* Writes a self-clearing reset */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_7101_RESET,
+				 (val | (1<<15)));
+		/* Wait for clear */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_7101_RESET, &val);
+
+		if ((val & (1<<15)) == 0)
+			break;
+	}
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+				struct link_params *params) {
+	/* Low power mode is controlled by GPIO 2 */
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+	/* The PHY reset is controlled by GPIO 1 */
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+				    struct link_params *params, u8 mode)
+{
+	u16 val = 0;
+	struct bnx2x *bp = params->bp;
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		val = 2;
+		break;
+	case LED_MODE_ON:
+		val = 1;
+		break;
+	case LED_MODE_OPER:
+		val = 0;
+		break;
+	}
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_7107_LINK_LED_CNTL,
+			 val);
+}
+
+/******************************************************************/
+/*			STATIC PHY DECLARATION			  */
+/******************************************************************/
+
+static const struct bnx2x_phy phy_null = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+	.addr		= 0,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= 0,
+	.media_type	= ETH_PHY_NOT_PRESENT,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)NULL,
+	.read_status	= (read_status_t)NULL,
+	.link_reset	= (link_reset_t)NULL,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static const struct bnx2x_phy phy_serdes = {
+	.type		= PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= 0,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_2500baseX_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_xgxs_config_init,
+	.read_status	= (read_status_t)bnx2x_link_settings_status,
+	.link_reset	= (link_reset_t)bnx2x_int_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static const struct bnx2x_phy phy_xgxs = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= 0,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_2500baseX_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_CX4,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_xgxs_config_init,
+	.read_status	= (read_status_t)bnx2x_link_settings_status,
+	.link_reset	= (link_reset_t)bnx2x_int_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
+};
+static const struct bnx2x_phy phy_warpcore = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_TX_ERROR_CHECK,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_1000baseKX_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_10000baseKR_Full |
+			   SUPPORTED_20000baseKR2_Full |
+			   SUPPORTED_20000baseMLD2_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_UNSPECIFIED,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	/* req_duplex = */0,
+	/* rsrv = */0,
+	.config_init	= (config_init_t)bnx2x_warpcore_config_init,
+	.read_status	= (read_status_t)bnx2x_warpcore_read_status,
+	.link_reset	= (link_reset_t)bnx2x_warpcore_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)bnx2x_warpcore_hw_reset,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+
+static const struct bnx2x_phy phy_7101 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_7101_config_init,
+	.read_status	= (read_status_t)bnx2x_7101_read_status,
+	.link_reset	= (link_reset_t)bnx2x_common_ext_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_7101_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_7101_hw_reset,
+	.set_link_led	= (set_link_led_t)bnx2x_7101_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+static const struct bnx2x_phy phy_8073 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= 0,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_2500baseX_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_KR,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8073_config_init,
+	.read_status	= (read_status_t)bnx2x_8073_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8073_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
+};
+static const struct bnx2x_phy phy_8705 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_XFP_FIBER,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8705_config_init,
+	.read_status	= (read_status_t)bnx2x_8705_read_status,
+	.link_reset	= (link_reset_t)bnx2x_common_ext_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_null_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+static const struct bnx2x_phy phy_8706 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_SFPP_10G_FIBER,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8706_config_init,
+	.read_status	= (read_status_t)bnx2x_8706_read_status,
+	.link_reset	= (link_reset_t)bnx2x_common_ext_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static const struct bnx2x_phy phy_8726 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_INIT_XGXS_FIRST |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_NOT_PRESENT,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8726_config_init,
+	.read_status	= (read_status_t)bnx2x_8726_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8726_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static const struct bnx2x_phy phy_8727 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_NOT_PRESENT,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8727_config_init,
+	.read_status	= (read_status_t)bnx2x_8727_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8727_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_8727_hw_reset,
+	.set_link_led	= (set_link_led_t)bnx2x_8727_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+};
+static const struct bnx2x_phy phy_8481 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			  FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8481_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8481_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_8481_hw_reset,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static const struct bnx2x_phy phy_84823 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_REARM_LATCH_SIGNAL |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_84833 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_REARM_LATCH_SIGNAL |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_84834 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_84858 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_54618se = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	/* req_duplex = */0,
+	/* rsrv = */0,
+	.config_init	= (config_init_t)bnx2x_54618se_config_init,
+	.read_status	= (read_status_t)bnx2x_54618se_read_status,
+	.link_reset	= (link_reset_t)bnx2x_54618se_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)bnx2x_5461x_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
+};
+/*****************************************************************/
+/*                                                               */
+/* Populate the phy according. Main function: bnx2x_populate_phy   */
+/*                                                               */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+				     struct bnx2x_phy *phy, u8 port,
+				     u8 phy_index)
+{
+	/* Get the 4 lanes xgxs config rx and tx */
+	u32 rx = 0, tx = 0, i;
+	for (i = 0; i < 2; i++) {
+		/* INT_PHY and EXT_PHY1 share the same value location in
+		 * the shmem. When num_phys is greater than 1, than this value
+		 * applies only to EXT_PHY1
+		 */
+		if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+			rx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			  dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+			tx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			  dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+		} else {
+			rx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+			tx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+		}
+
+		phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+		phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+		phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+		phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+	}
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+				    u8 phy_index, u8 port)
+{
+	u32 ext_phy_config = 0;
+	switch (phy_index) {
+	case EXT_PHY1:
+		ext_phy_config = REG_RD(bp, shmem_base +
+					      offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].external_phy_config));
+		break;
+	case EXT_PHY2:
+		ext_phy_config = REG_RD(bp, shmem_base +
+					      offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].external_phy_config2));
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+		return -EINVAL;
+	}
+
+	return ext_phy_config;
+}
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+				  struct bnx2x_phy *phy)
+{
+	u32 phy_addr;
+	u32 chip_id;
+	u32 switch_cfg = (REG_RD(bp, shmem_base +
+				       offsetof(struct shmem_region,
+			dev_info.port_feature_config[port].link_config)) &
+			  PORT_FEATURE_CONNECTED_SWITCH_MASK);
+	chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+		((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
+	DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+	if (USES_WARPCORE(bp)) {
+		u32 serdes_net_if;
+		phy_addr = REG_RD(bp,
+				  MISC_REG_WC0_CTRL_PHY_ADDR);
+		*phy = phy_warpcore;
+		if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+			phy->flags |= FLAGS_4_PORT_MODE;
+		else
+			phy->flags &= ~FLAGS_4_PORT_MODE;
+			/* Check Dual mode */
+		serdes_net_if = (REG_RD(bp, shmem_base +
+					offsetof(struct shmem_region, dev_info.
+					port_hw_config[port].default_cfg)) &
+				 PORT_HW_CFG_NET_SERDES_IF_MASK);
+		/* Set the appropriate supported and flags indications per
+		 * interface type of the chip
+		 */
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+			phy->supported &= (SUPPORTED_10baseT_Half |
+					   SUPPORTED_10baseT_Full |
+					   SUPPORTED_100baseT_Half |
+					   SUPPORTED_100baseT_Full |
+					   SUPPORTED_1000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_BASE_T;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_XFI:
+			phy->supported &= (SUPPORTED_1000baseT_Full |
+					   SUPPORTED_10000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_XFP_FIBER;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_SFI:
+			phy->supported &= (SUPPORTED_1000baseT_Full |
+					   SUPPORTED_10000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			phy->media_type = ETH_PHY_KR;
+			phy->supported &= (SUPPORTED_1000baseKX_Full |
+					   SUPPORTED_10000baseKR_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+			phy->media_type = ETH_PHY_KR;
+			phy->flags |= FLAGS_WC_DUAL_MODE;
+			phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR2:
+			phy->media_type = ETH_PHY_KR;
+			phy->flags |= FLAGS_WC_DUAL_MODE;
+			phy->supported &= (SUPPORTED_20000baseKR2_Full |
+					   SUPPORTED_10000baseKR_Full |
+					   SUPPORTED_1000baseKX_Full |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+				       serdes_net_if);
+			break;
+		}
+
+		/* Enable MDC/MDIO work-around for E3 A0 since free running MDC
+		 * was not set as expected. For B0, ECO will be enabled so there
+		 * won't be an issue there
+		 */
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			phy->flags |= FLAGS_MDC_MDIO_WA;
+		else
+			phy->flags |= FLAGS_MDC_MDIO_WA_B0;
+	} else {
+		switch (switch_cfg) {
+		case SWITCH_CFG_1G:
+			phy_addr = REG_RD(bp,
+					  NIG_REG_SERDES0_CTRL_PHY_ADDR +
+					  port * 0x10);
+			*phy = phy_serdes;
+			break;
+		case SWITCH_CFG_10G:
+			phy_addr = REG_RD(bp,
+					  NIG_REG_XGXS0_CTRL_PHY_ADDR +
+					  port * 0x18);
+			*phy = phy_xgxs;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+			return -EINVAL;
+		}
+	}
+	phy->addr = (u8)phy_addr;
+	phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+					    SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+					    port);
+	if (CHIP_IS_E2(bp))
+		phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
+	else
+		phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+
+	DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+		   port, phy->addr, phy->mdio_ctrl);
+
+	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
+	return 0;
+}
+
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+				  u8 phy_index,
+				  u32 shmem_base,
+				  u32 shmem2_base,
+				  u8 port,
+				  struct bnx2x_phy *phy)
+{
+	u32 ext_phy_config, phy_type, config2;
+	u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+	ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+						  phy_index, port);
+	phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+	/* Select the phy type */
+	switch (phy_type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+		*phy = phy_8073;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+		*phy = phy_8705;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+		*phy = phy_8706;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+		*phy = phy_8726;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+		/* BCM8727_NOC => BCM8727 no over current */
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+		*phy = phy_8727;
+		phy->flags |= FLAGS_NOC;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+		*phy = phy_8727;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+		*phy = phy_8481;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+		*phy = phy_84823;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+		*phy = phy_84833;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+		*phy = phy_84834;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+		*phy = phy_84858;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
+		*phy = phy_54618se;
+		if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+			phy->flags |= FLAGS_EEE;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+		*phy = phy_7101;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+		*phy = phy_null;
+		return -EINVAL;
+	default:
+		*phy = phy_null;
+		/* In case external PHY wasn't found */
+		if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+		    (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+			return -EINVAL;
+		return 0;
+	}
+
+	phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
+
+	/* The shmem address of the phy version is located on different
+	 * structures. In case this structure is too old, do not set
+	 * the address
+	 */
+	config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+					dev_info.shared_hw_config.config2));
+	if (phy_index == EXT_PHY1) {
+		phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+				port_mb[port].ext_phy_fw_version);
+
+		/* Check specific mdc mdio settings */
+		if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+			mdc_mdio_access = config2 &
+			SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+	} else {
+		u32 size = REG_RD(bp, shmem2_base);
+
+		if (size >
+		    offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+			phy->ver_addr = shmem2_base +
+			    offsetof(struct shmem2_region,
+				     ext_phy_fw_version2[port]);
+		}
+		/* Check specific mdc mdio settings */
+		if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+			mdc_mdio_access = (config2 &
+			SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+			(SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+			 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+	}
+	phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+
+	if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
+		/* Remove 100Mb link supported for BCM84833/4 when phy fw
+		 * version lower than or equal to 1.39
+		 */
+		u32 raw_ver = REG_RD(bp, phy->ver_addr);
+		if (((raw_ver & 0x7F) <= 39) &&
+		    (((raw_ver & 0xF80) >> 7) <= 1))
+			phy->supported &= ~(SUPPORTED_100baseT_Half |
+					    SUPPORTED_100baseT_Full);
+	}
+
+	DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+		   phy_type, port, phy_index);
+	DP(NETIF_MSG_LINK, "             addr=0x%x, mdio_ctl=0x%x\n",
+		   phy->addr, phy->mdio_ctrl);
+	return 0;
+}
+
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+			      u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+{
+	int status = 0;
+	phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+	if (phy_index == INT_PHY)
+		return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+	status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+					port, phy);
+	return status;
+}
+
+static void bnx2x_phy_def_cfg(struct link_params *params,
+			      struct bnx2x_phy *phy,
+			      u8 phy_index)
+{
+	struct bnx2x *bp = params->bp;
+	u32 link_config;
+	/* Populate the default phy configuration for MF mode */
+	if (phy_index == EXT_PHY2) {
+		link_config = REG_RD(bp, params->shmem_base +
+				     offsetof(struct shmem_region, dev_info.
+			port_feature_config[params->port].link_config2));
+		phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+					     offsetof(struct shmem_region,
+						      dev_info.
+			port_hw_config[params->port].speed_capability_mask2));
+	} else {
+		link_config = REG_RD(bp, params->shmem_base +
+				     offsetof(struct shmem_region, dev_info.
+				port_feature_config[params->port].link_config));
+		phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+					     offsetof(struct shmem_region,
+						      dev_info.
+			port_hw_config[params->port].speed_capability_mask));
+	}
+	DP(NETIF_MSG_LINK,
+	   "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n",
+	   phy_index, link_config, phy->speed_cap_mask);
+
+	phy->req_duplex = DUPLEX_FULL;
+	switch (link_config  & PORT_FEATURE_LINK_SPEED_MASK) {
+	case PORT_FEATURE_LINK_SPEED_10M_HALF:
+		phy->req_duplex = DUPLEX_HALF;
+	case PORT_FEATURE_LINK_SPEED_10M_FULL:
+		phy->req_line_speed = SPEED_10;
+		break;
+	case PORT_FEATURE_LINK_SPEED_100M_HALF:
+		phy->req_duplex = DUPLEX_HALF;
+	case PORT_FEATURE_LINK_SPEED_100M_FULL:
+		phy->req_line_speed = SPEED_100;
+		break;
+	case PORT_FEATURE_LINK_SPEED_1G:
+		phy->req_line_speed = SPEED_1000;
+		break;
+	case PORT_FEATURE_LINK_SPEED_2_5G:
+		phy->req_line_speed = SPEED_2500;
+		break;
+	case PORT_FEATURE_LINK_SPEED_10G_CX4:
+		phy->req_line_speed = SPEED_10000;
+		break;
+	default:
+		phy->req_line_speed = SPEED_AUTO_NEG;
+		break;
+	}
+
+	switch (link_config  & PORT_FEATURE_FLOW_CONTROL_MASK) {
+	case PORT_FEATURE_FLOW_CONTROL_AUTO:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+		break;
+	case PORT_FEATURE_FLOW_CONTROL_TX:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+		break;
+	case PORT_FEATURE_FLOW_CONTROL_RX:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+		break;
+	case PORT_FEATURE_FLOW_CONTROL_BOTH:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+		break;
+	default:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		break;
+	}
+}
+
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+	u32 phy_config_swapped, prio_cfg;
+	u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+	phy_config_swapped = params->multi_phy_config &
+		PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+	prio_cfg = params->multi_phy_config &
+			PORT_HW_CFG_PHY_SELECTION_MASK;
+
+	if (phy_config_swapped) {
+		switch (prio_cfg) {
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+		     break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+		     break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+		     break;
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+		     break;
+		}
+	} else
+		return_cfg = prio_cfg;
+
+	return return_cfg;
+}
+
+int bnx2x_phy_probe(struct link_params *params)
+{
+	u8 phy_index, actual_phy_idx;
+	u32 phy_config_swapped, sync_offset, media_types;
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *phy;
+	params->num_phys = 0;
+	DP(NETIF_MSG_LINK, "Begin phy probe\n");
+	phy_config_swapped = params->multi_phy_config &
+		PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+	      phy_index++) {
+		actual_phy_idx = phy_index;
+		if (phy_config_swapped) {
+			if (phy_index == EXT_PHY1)
+				actual_phy_idx = EXT_PHY2;
+			else if (phy_index == EXT_PHY2)
+				actual_phy_idx = EXT_PHY1;
+		}
+		DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+			       " actual_phy_idx %x\n", phy_config_swapped,
+			   phy_index, actual_phy_idx);
+		phy = &params->phy[actual_phy_idx];
+		if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+				       params->shmem2_base, params->port,
+				       phy) != 0) {
+			params->num_phys = 0;
+			DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+				   phy_index);
+			for (phy_index = INT_PHY;
+			      phy_index < MAX_PHYS;
+			      phy_index++)
+				*phy = phy_null;
+			return -EINVAL;
+		}
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+			break;
+
+		if (params->feature_config_flags &
+		    FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
+			phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+
+		if (!(params->feature_config_flags &
+		      FEATURE_CONFIG_MT_SUPPORT))
+			phy->flags |= FLAGS_MDC_MDIO_WA_G;
+
+		sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].media_type);
+		media_types = REG_RD(bp, sync_offset);
+
+		/* Update media type for non-PMF sync only for the first time
+		 * In case the media type changes afterwards, it will be updated
+		 * using the update_status function
+		 */
+		if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+				    (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+				     actual_phy_idx))) == 0) {
+			media_types |= ((phy->media_type &
+					PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+				 actual_phy_idx));
+		}
+		REG_WR(bp, sync_offset, media_types);
+
+		bnx2x_phy_def_cfg(params, phy, phy_index);
+		params->num_phys++;
+	}
+
+	DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+	return 0;
+}
+
+static void bnx2x_init_bmac_loopback(struct link_params *params,
+				     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_BMAC;
+
+		vars->phy_flags = PHY_XGXS_FLAG;
+
+		bnx2x_xgxs_deassert(params);
+
+		/* Set bmac loopback */
+		bnx2x_bmac_enable(params, vars, 1, 1);
+
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+static void bnx2x_init_emac_loopback(struct link_params *params,
+				     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->line_speed = SPEED_1000;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_EMAC;
+
+		vars->phy_flags = PHY_XGXS_FLAG;
+
+		bnx2x_xgxs_deassert(params);
+		/* Set bmac loopback */
+		bnx2x_emac_enable(params, vars, 1);
+		bnx2x_emac_program(params, vars);
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+static void bnx2x_init_xmac_loopback(struct link_params *params,
+				     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 1;
+	if (!params->req_line_speed[0])
+		vars->line_speed = SPEED_10000;
+	else
+		vars->line_speed = params->req_line_speed[0];
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_XMAC;
+	vars->phy_flags = PHY_XGXS_FLAG;
+	/* Set WC to loopback mode since link is required to provide clock
+	 * to the XMAC in 20G mode
+	 */
+	bnx2x_set_aer_mmd(params, &params->phy[0]);
+	bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+	params->phy[INT_PHY].config_loopback(
+			&params->phy[INT_PHY],
+			params);
+
+	bnx2x_xmac_enable(params, vars, 1);
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+static void bnx2x_init_umac_loopback(struct link_params *params,
+				     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 1;
+	vars->line_speed = SPEED_1000;
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_UMAC;
+	vars->phy_flags = PHY_XGXS_FLAG;
+	bnx2x_umac_enable(params, vars, 1);
+
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+static void bnx2x_init_xgxs_loopback(struct link_params *params,
+				     struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+	vars->link_up = 1;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->duplex = DUPLEX_FULL;
+	if (params->req_line_speed[0] == SPEED_1000)
+		vars->line_speed = SPEED_1000;
+	else if ((params->req_line_speed[0] == SPEED_20000) ||
+		 (int_phy->flags & FLAGS_WC_DUAL_MODE))
+		vars->line_speed = SPEED_20000;
+	else
+		vars->line_speed = SPEED_10000;
+
+	if (!USES_WARPCORE(bp))
+		bnx2x_xgxs_deassert(params);
+	bnx2x_link_initialize(params, vars);
+
+	if (params->req_line_speed[0] == SPEED_1000) {
+		if (USES_WARPCORE(bp))
+			bnx2x_umac_enable(params, vars, 0);
+		else {
+			bnx2x_emac_program(params, vars);
+			bnx2x_emac_enable(params, vars, 0);
+		}
+	} else {
+		if (USES_WARPCORE(bp))
+			bnx2x_xmac_enable(params, vars, 0);
+		else
+			bnx2x_bmac_enable(params, vars, 0, 1);
+	}
+
+	if (params->loopback_mode == LOOPBACK_XGXS) {
+		/* Set 10G XGXS loopback */
+		int_phy->config_loopback(int_phy, params);
+	} else {
+		/* Set external phy loopback */
+		u8 phy_index;
+		for (phy_index = EXT_PHY1;
+		      phy_index < params->num_phys; phy_index++)
+			if (params->phy[phy_index].config_loopback)
+				params->phy[phy_index].config_loopback(
+					&params->phy[phy_index],
+					params);
+	}
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+{
+	struct bnx2x *bp = params->bp;
+	u8 val = en * 0x1F;
+
+	/* Open / close the gate between the NIG and the BRB */
+	if (!CHIP_IS_E1x(bp))
+		val |= en * 0x20;
+	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
+
+	if (!CHIP_IS_E1(bp)) {
+		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
+		       en*0x3);
+	}
+
+	REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+		    NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+static int bnx2x_avoid_link_flap(struct link_params *params,
+					    struct link_vars *vars)
+{
+	u32 phy_idx;
+	u32 dont_clear_stat, lfa_sts;
+	struct bnx2x *bp = params->bp;
+
+	bnx2x_set_mdio_emac_per_phy(bp, params);
+	/* Sync the link parameters */
+	bnx2x_link_status_update(params, vars);
+
+	/*
+	 * The module verification was already done by previous link owner,
+	 * so this call is meant only to get warning message
+	 */
+
+	for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+		struct bnx2x_phy *phy = &params->phy[phy_idx];
+		if (phy->phy_specific_func) {
+			DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
+			phy->phy_specific_func(phy, params, PHY_INIT);
+		}
+		if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
+		    (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
+		    (phy->media_type == ETH_PHY_DA_TWINAX))
+			bnx2x_verify_sfp_module(phy, params);
+	}
+	lfa_sts = REG_RD(bp, params->lfa_base +
+			 offsetof(struct shmem_lfa,
+				  lfa_sts));
+
+	dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+	/* Re-enable the NIG/MAC */
+	if (CHIP_IS_E3(bp)) {
+		if (!dont_clear_stat) {
+			REG_WR(bp, GRCBASE_MISC +
+			       MISC_REGISTERS_RESET_REG_2_CLEAR,
+			       (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+				params->port));
+			REG_WR(bp, GRCBASE_MISC +
+			       MISC_REGISTERS_RESET_REG_2_SET,
+			       (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+				params->port));
+		}
+		if (vars->line_speed < SPEED_10000)
+			bnx2x_umac_enable(params, vars, 0);
+		else
+			bnx2x_xmac_enable(params, vars, 0);
+	} else {
+		if (vars->line_speed < SPEED_10000)
+			bnx2x_emac_enable(params, vars, 0);
+		else
+			bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
+	}
+
+	/* Increment LFA count */
+	lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+		   (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+		       LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+		    << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+	/* Clear link flap reason */
+	lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+	REG_WR(bp, params->lfa_base +
+	       offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+	/* Disable NIG DRAIN */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+	/* Enable interrupts */
+	bnx2x_link_int_enable(params);
+	return 0;
+}
+
+static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
+					 struct link_vars *vars,
+					 int lfa_status)
+{
+	u32 lfa_sts, cfg_idx, tmp_val;
+	struct bnx2x *bp = params->bp;
+
+	bnx2x_link_reset(params, vars, 1);
+
+	if (!params->lfa_base)
+		return;
+	/* Store the new link parameters */
+	REG_WR(bp, params->lfa_base +
+	       offsetof(struct shmem_lfa, req_duplex),
+	       params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+	REG_WR(bp, params->lfa_base +
+	       offsetof(struct shmem_lfa, req_flow_ctrl),
+	       params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+	REG_WR(bp, params->lfa_base +
+	       offsetof(struct shmem_lfa, req_line_speed),
+	       params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+	for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+		REG_WR(bp, params->lfa_base +
+		       offsetof(struct shmem_lfa,
+				speed_cap_mask[cfg_idx]),
+		       params->speed_cap_mask[cfg_idx]);
+	}
+
+	tmp_val = REG_RD(bp, params->lfa_base +
+			 offsetof(struct shmem_lfa, additional_config));
+	tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+	tmp_val |= params->req_fc_auto_adv;
+
+	REG_WR(bp, params->lfa_base +
+	       offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+	lfa_sts = REG_RD(bp, params->lfa_base +
+			 offsetof(struct shmem_lfa, lfa_sts));
+
+	/* Clear the "Don't Clear Statistics" bit, and set reason */
+	lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+	/* Set link flap reason */
+	lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+	lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+		    LFA_LINK_FLAP_REASON_OFFSET);
+
+	/* Increment link flap counter */
+	lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+		   (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+		       LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+		    << LINK_FLAP_COUNT_OFFSET));
+	REG_WR(bp, params->lfa_base +
+	       offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+	/* Proceed with regular link initialization */
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+{
+	int lfa_status;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+	DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+		   params->req_line_speed[0], params->req_flow_ctrl[0]);
+	DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+		   params->req_line_speed[1], params->req_flow_ctrl[1]);
+	DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
+	vars->link_status = 0;
+	vars->phy_link_up = 0;
+	vars->link_up = 0;
+	vars->line_speed = 0;
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_NONE;
+	vars->phy_flags = 0;
+	vars->check_kr2_recovery_cnt = 0;
+	params->link_flags = PHY_INITIALIZED;
+	/* Driver opens NIG-BRB filters */
+	bnx2x_set_rx_filter(params, 1);
+	bnx2x_chng_link_count(params, true);
+	/* Check if link flap can be avoided */
+	lfa_status = bnx2x_check_lfa(params);
+
+	if (lfa_status == 0) {
+		DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
+		return bnx2x_avoid_link_flap(params, vars);
+	}
+
+	DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
+		       lfa_status);
+	bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
+
+	/* Disable attentions */
+	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	bnx2x_emac_init(params, vars);
+
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		vars->link_status |= LINK_STATUS_PFC_ENABLED;
+
+	if (params->num_phys == 0) {
+		DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+		return -EINVAL;
+	}
+	set_phy_vars(params, vars);
+
+	DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
+	switch (params->loopback_mode) {
+	case LOOPBACK_BMAC:
+		bnx2x_init_bmac_loopback(params, vars);
+		break;
+	case LOOPBACK_EMAC:
+		bnx2x_init_emac_loopback(params, vars);
+		break;
+	case LOOPBACK_XMAC:
+		bnx2x_init_xmac_loopback(params, vars);
+		break;
+	case LOOPBACK_UMAC:
+		bnx2x_init_umac_loopback(params, vars);
+		break;
+	case LOOPBACK_XGXS:
+	case LOOPBACK_EXT_PHY:
+		bnx2x_init_xgxs_loopback(params, vars);
+		break;
+	default:
+		if (!CHIP_IS_E3(bp)) {
+			if (params->switch_cfg == SWITCH_CFG_10G)
+				bnx2x_xgxs_deassert(params);
+			else
+				bnx2x_serdes_deassert(bp, params->port);
+		}
+		bnx2x_link_initialize(params, vars);
+		msleep(30);
+		bnx2x_link_int_enable(params);
+		break;
+	}
+	bnx2x_update_mng(params, vars->link_status);
+
+	bnx2x_update_mng_eee(params, vars->eee_status);
+	return 0;
+}
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+		     u8 reset_ext_phy)
+{
+	struct bnx2x *bp = params->bp;
+	u8 phy_index, port = params->port, clear_latch_ind = 0;
+	DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
+	/* Disable attentions */
+	vars->link_status = 0;
+	bnx2x_chng_link_count(params, true);
+	bnx2x_update_mng(params, vars->link_status);
+	vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+			      SHMEM_EEE_ACTIVE_BIT);
+	bnx2x_update_mng_eee(params, vars->eee_status);
+	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	/* Activate nig drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+	/* Disable nig egress interface */
+	if (!CHIP_IS_E3(bp)) {
+		REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+		REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+	}
+
+		if (!CHIP_IS_E3(bp)) {
+			bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+		} else {
+			bnx2x_set_xmac_rxtx(params, 0);
+			bnx2x_set_umac_rxtx(params, 0);
+		}
+	/* Disable emac */
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+	usleep_range(10000, 20000);
+	/* The PHY reset is controlled by GPIO 1
+	 * Hold it as vars low
+	 */
+	 /* Clear link led */
+	bnx2x_set_mdio_emac_per_phy(bp, params);
+	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+	if (reset_ext_phy) {
+		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+		      phy_index++) {
+			if (params->phy[phy_index].link_reset) {
+				bnx2x_set_aer_mmd(params,
+						  &params->phy[phy_index]);
+				params->phy[phy_index].link_reset(
+					&params->phy[phy_index],
+					params);
+			}
+			if (params->phy[phy_index].flags &
+			    FLAGS_REARM_LATCH_SIGNAL)
+				clear_latch_ind = 1;
+		}
+	}
+
+	if (clear_latch_ind) {
+		/* Clear latching indication */
+		bnx2x_rearm_latch_signal(bp, port, 0);
+		bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+			       1 << NIG_LATCH_BC_ENABLE_MI_INT);
+	}
+	if (params->phy[INT_PHY].link_reset)
+		params->phy[INT_PHY].link_reset(
+			&params->phy[INT_PHY], params);
+
+	/* Disable nig ingress interface */
+	if (!CHIP_IS_E3(bp)) {
+		/* Reset BigMac */
+		REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+		       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+		REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+		REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+	} else {
+		u32 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+		bnx2x_set_xumac_nig(params, 0, 0);
+		if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+		    MISC_REGISTERS_RESET_REG_2_XMAC)
+			REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+			       XMAC_CTRL_REG_SOFT_RESET);
+	}
+	vars->link_up = 0;
+	vars->phy_flags = 0;
+	return 0;
+}
+int bnx2x_lfa_reset(struct link_params *params,
+			       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 0;
+	vars->phy_flags = 0;
+	params->link_flags &= ~PHY_INITIALIZED;
+	if (!params->lfa_base)
+		return bnx2x_link_reset(params, vars, 1);
+	/*
+	 * Activate NIG drain so that during this time the device won't send
+	 * anything while it is unable to response.
+	 */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+
+	/*
+	 * Close gracefully the gate from BMAC to NIG such that no half packets
+	 * are passed.
+	 */
+	if (!CHIP_IS_E3(bp))
+		bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+	if (CHIP_IS_E3(bp)) {
+		bnx2x_set_xmac_rxtx(params, 0);
+		bnx2x_set_umac_rxtx(params, 0);
+	}
+	/* Wait 10ms for the pipe to clean up*/
+	usleep_range(10000, 20000);
+
+	/* Clean the NIG-BRB using the network filters in a way that will
+	 * not cut a packet in the middle.
+	 */
+	bnx2x_set_rx_filter(params, 0);
+
+	/*
+	 * Re-open the gate between the BMAC and the NIG, after verifying the
+	 * gate to the BRB is closed, otherwise packets may arrive to the
+	 * firmware before driver had initialized it. The target is to achieve
+	 * minimum management protocol down time.
+	 */
+	if (!CHIP_IS_E3(bp))
+		bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
+
+	if (CHIP_IS_E3(bp)) {
+		bnx2x_set_xmac_rxtx(params, 1);
+		bnx2x_set_umac_rxtx(params, 1);
+	}
+	/* Disable NIG drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+	return 0;
+}
+
+/****************************************************************************/
+/*				Common function				    */
+/****************************************************************************/
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
+{
+	struct bnx2x_phy phy[PORT_MAX];
+	struct bnx2x_phy *phy_blk[PORT_MAX];
+	u16 val;
+	s8 port = 0;
+	s8 port_of_path = 0;
+	u32 swap_val, swap_override;
+	swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+	port ^= (swap_val && swap_override);
+	bnx2x_ext_phy_hw_reset(bp, port);
+	/* PART1 - Reset both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		u32 shmem_base, shmem2_base;
+		/* In E2, same phy is using for port0 of the two paths */
+		if (CHIP_IS_E1x(bp)) {
+			shmem_base = shmem_base_path[0];
+			shmem2_base = shmem2_base_path[0];
+			port_of_path = port;
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+			port_of_path = 0;
+		}
+
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port_of_path, &phy[port]) !=
+		    0) {
+			DP(NETIF_MSG_LINK, "populate_phy failed\n");
+			return -EINVAL;
+		}
+		/* Disable attentions */
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+			       port_of_path*4,
+			       (NIG_MASK_XGXS0_LINK_STATUS |
+				NIG_MASK_XGXS0_LINK10G |
+				NIG_MASK_SERDES0_LINK_STATUS |
+				NIG_MASK_MI_INT));
+
+		/* Need to take the phy out of low power mode in order
+		 * to write to access its registers
+		 */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+			       port);
+
+		/* Reset the phy */
+		bnx2x_cl45_write(bp, &phy[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_CTRL,
+				 1<<15);
+	}
+
+	/* Add delay of 150ms after reset */
+	msleep(150);
+
+	if (phy[PORT_0].addr & 0x1) {
+		phy_blk[PORT_0] = &(phy[PORT_1]);
+		phy_blk[PORT_1] = &(phy[PORT_0]);
+	} else {
+		phy_blk[PORT_0] = &(phy[PORT_0]);
+		phy_blk[PORT_1] = &(phy[PORT_1]);
+	}
+
+	/* PART2 - Download firmware to both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		if (CHIP_IS_E1x(bp))
+			port_of_path = port;
+		else
+			port_of_path = 0;
+
+		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+			   phy_blk[port]->addr);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
+			return -EINVAL;
+
+		/* Only set bit 10 = 1 (Tx power down) */
+		bnx2x_cl45_read(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+		/* Phase1 of TX_POWER_DOWN reset */
+		bnx2x_cl45_write(bp, phy_blk[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_TX_POWER_DOWN,
+				 (val | 1<<10));
+	}
+
+	/* Toggle Transmitter: Power down and then up with 600ms delay
+	 * between
+	 */
+	msleep(600);
+
+	/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		/* Phase2 of POWER_DOWN_RESET */
+		/* Release bit 10 (Release Tx power down) */
+		bnx2x_cl45_read(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+		bnx2x_cl45_write(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+		usleep_range(15000, 30000);
+
+		/* Read modify write the SPI-ROM version select register */
+		bnx2x_cl45_read(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+		bnx2x_cl45_write(bp, phy_blk[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+
+		/* set GPIO2 back to LOW */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+			       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+	}
+	return 0;
+}
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
+{
+	u32 val;
+	s8 port;
+	struct bnx2x_phy phy;
+	/* Use port1 because of the static port-swap */
+	/* Enable the module detection interrupt */
+	val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+	val |= ((1<<MISC_REGISTERS_GPIO_3)|
+		(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+	bnx2x_ext_phy_hw_reset(bp, 0);
+	usleep_range(5000, 10000);
+	for (port = 0; port < PORT_MAX; port++) {
+		u32 shmem_base, shmem2_base;
+
+		/* In E2, same phy is using for port0 of the two paths */
+		if (CHIP_IS_E1x(bp)) {
+			shmem_base = shmem_base_path[0];
+			shmem2_base = shmem2_base_path[0];
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+		}
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port, &phy) !=
+		    0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return -EINVAL;
+		}
+
+		/* Reset phy*/
+		bnx2x_cl45_write(bp, &phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+		/* Set fault module detected LED on */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+			       MISC_REGISTERS_GPIO_HIGH,
+			       port);
+	}
+
+	return 0;
+}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+					 u8 *io_gpio, u8 *io_port)
+{
+
+	u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+					  offsetof(struct shmem_region,
+				dev_info.port_hw_config[PORT_0].default_cfg));
+	switch (phy_gpio_reset) {
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+		*io_gpio = 0;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+		*io_gpio = 1;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+		*io_gpio = 2;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+		*io_gpio = 3;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+		*io_gpio = 0;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+		*io_gpio = 1;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+		*io_gpio = 2;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+		*io_gpio = 3;
+		*io_port = 1;
+		break;
+	default:
+		/* Don't override the io_gpio and io_port */
+		break;
+	}
+}
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
+{
+	s8 port, reset_gpio;
+	u32 swap_val, swap_override;
+	struct bnx2x_phy phy[PORT_MAX];
+	struct bnx2x_phy *phy_blk[PORT_MAX];
+	s8 port_of_path;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+
+	reset_gpio = MISC_REGISTERS_GPIO_1;
+	port = 1;
+
+	/* Retrieve the reset gpio/port which control the reset.
+	 * Default is GPIO1, PORT1
+	 */
+	bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+				     (u8 *)&reset_gpio, (u8 *)&port);
+
+	/* Calculate the port based on port swap */
+	port ^= (swap_val && swap_override);
+
+	/* Initiate PHY reset*/
+	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       port);
+	usleep_range(1000, 2000);
+	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+		       port);
+
+	usleep_range(5000, 10000);
+
+	/* PART1 - Reset both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		u32 shmem_base, shmem2_base;
+
+		/* In E2, same phy is using for port0 of the two paths */
+		if (CHIP_IS_E1x(bp)) {
+			shmem_base = shmem_base_path[0];
+			shmem2_base = shmem2_base_path[0];
+			port_of_path = port;
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+			port_of_path = 0;
+		}
+
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port_of_path, &phy[port]) !=
+				       0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return -EINVAL;
+		}
+		/* disable attentions */
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+			       port_of_path*4,
+			       (NIG_MASK_XGXS0_LINK_STATUS |
+				NIG_MASK_XGXS0_LINK10G |
+				NIG_MASK_SERDES0_LINK_STATUS |
+				NIG_MASK_MI_INT));
+
+
+		/* Reset the phy */
+		bnx2x_cl45_write(bp, &phy[port],
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+	}
+
+	/* Add delay of 150ms after reset */
+	msleep(150);
+	if (phy[PORT_0].addr & 0x1) {
+		phy_blk[PORT_0] = &(phy[PORT_1]);
+		phy_blk[PORT_1] = &(phy[PORT_0]);
+	} else {
+		phy_blk[PORT_0] = &(phy[PORT_0]);
+		phy_blk[PORT_1] = &(phy[PORT_1]);
+	}
+	/* PART2 - Download firmware to both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		if (CHIP_IS_E1x(bp))
+			port_of_path = port;
+		else
+			port_of_path = 0;
+		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+			   phy_blk[port]->addr);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
+			return -EINVAL;
+		/* Disable PHY transmitter output */
+		bnx2x_cl45_write(bp, phy_blk[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_TX_DISABLE, 1);
+
+	}
+	return 0;
+}
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+						u32 shmem_base_path[],
+						u32 shmem2_base_path[],
+						u8 phy_index,
+						u32 chip_id)
+{
+	u8 reset_gpios;
+	reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+	udelay(10);
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+	DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+		reset_gpios);
+	return 0;
+}
+
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+				     u32 shmem2_base_path[], u8 phy_index,
+				     u32 ext_phy_type, u32 chip_id)
+{
+	int rc = 0;
+
+	switch (ext_phy_type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+		rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+		rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		/* GPIO1 affects both ports, so there's need to pull
+		 * it for single port alone
+		 */
+		rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+		/* GPIO3's are linked, and so both need to be toggled
+		 * to obtain required 2us pulse.
+		 */
+		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+		rc = -EINVAL;
+		break;
+	default:
+		DP(NETIF_MSG_LINK,
+			   "ext_phy 0x%x common init not required\n",
+			   ext_phy_type);
+		break;
+	}
+
+	if (rc)
+		netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+				      " Port %d\n",
+			 0);
+	return rc;
+}
+
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+			  u32 shmem2_base_path[], u32 chip_id)
+{
+	int rc = 0;
+	u32 phy_ver, val;
+	u8 phy_index = 0;
+	u32 ext_phy_type, ext_phy_config;
+
+	bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0);
+	bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1);
+	DP(NETIF_MSG_LINK, "Begin common phy init\n");
+	if (CHIP_IS_E3(bp)) {
+		/* Enable EPIO */
+		val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+		REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+	}
+	/* Check if common init was already done */
+	phy_ver = REG_RD(bp, shmem_base_path[0] +
+			 offsetof(struct shmem_region,
+				  port_mb[PORT_0].ext_phy_fw_version));
+	if (phy_ver) {
+		DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+			       phy_ver);
+		return 0;
+	}
+
+	/* Read the ext_phy_type for arbitrary port(0) */
+	for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+	      phy_index++) {
+		ext_phy_config = bnx2x_get_ext_phy_config(bp,
+							  shmem_base_path[0],
+							  phy_index, 0);
+		ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+		rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, ext_phy_type,
+						chip_id);
+	}
+	return rc;
+}
+
+static void bnx2x_check_over_curr(struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+	u32 pin_val;
+
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			       dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+		   PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+		PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+	/* Ignore check if no external input PIN available */
+	if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+		return;
+
+	if (!pin_val) {
+		if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+			netdev_err(bp->dev, "Error:  Power fault on Port %d has"
+					    " been detected and the power to "
+					    "that SFP+ module has been removed"
+					    " to prevent failure of the card."
+					    " Please remove the SFP+ module and"
+					    " restart the system to clear this"
+					    " error.\n",
+			 params->port);
+			vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+			bnx2x_warpcore_power_module(params, 0);
+		}
+	} else
+		vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+				    struct link_vars *vars, u32 status,
+				    u32 phy_flag, u32 link_flag, u8 notify)
+{
+	struct bnx2x *bp = params->bp;
+	/* Compare new value with previous value */
+	u8 led_mode;
+	u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
+
+	if ((status ^ old_status) == 0)
+		return 0;
+
+	/* If values differ */
+	switch (phy_flag) {
+	case PHY_HALF_OPEN_CONN_FLAG:
+		DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+		break;
+	case PHY_SFP_TX_FAULT_FLAG:
+		DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
+	}
+	DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+	   old_status, status);
+
+	/* Do not touch the link in case physical link down */
+	if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+		return 1;
+
+	/* a. Update shmem->link_status accordingly
+	 * b. Update link_vars->link_up
+	 */
+	if (status) {
+		vars->link_status &= ~LINK_STATUS_LINK_UP;
+		vars->link_status |= link_flag;
+		vars->link_up = 0;
+		vars->phy_flags |= phy_flag;
+
+		/* activate nig drain */
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+		/* Set LED mode to off since the PHY doesn't know about these
+		 * errors
+		 */
+		led_mode = LED_MODE_OFF;
+	} else {
+		vars->link_status |= LINK_STATUS_LINK_UP;
+		vars->link_status &= ~link_flag;
+		vars->link_up = 1;
+		vars->phy_flags &= ~phy_flag;
+		led_mode = LED_MODE_OPER;
+
+		/* Clear nig drain */
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+	}
+	bnx2x_sync_link(params, vars);
+	/* Update the LED according to the link state */
+	bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+	/* Update link status in the shared memory */
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* C. Trigger General Attention */
+	vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+	if (notify)
+		bnx2x_notify_link_changed(bp);
+
+	return 1;
+}
+
+/******************************************************************************
+* Description:
+*	This function checks for half opened connection change indication.
+*	When such change occurs, it calls the bnx2x_analyze_link_error
+*	to check if Remote Fault is set or cleared. Reception of remote fault
+*	status message in the MAC indicates that the peer's MAC has detected
+*	a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static int bnx2x_check_half_open_conn(struct link_params *params,
+				      struct link_vars *vars,
+				      u8 notify)
+{
+	struct bnx2x *bp = params->bp;
+	u32 lss_status = 0;
+	u32 mac_base;
+	/* In case link status is physically up @ 10G do */
+	if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
+	    (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
+		return 0;
+
+	if (CHIP_IS_E3(bp) &&
+	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	      (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+		/* Check E3 XMAC */
+		/* Note that link speed cannot be queried here, since it may be
+		 * zero while link is down. In case UMAC is active, LSS will
+		 * simply not be set
+		 */
+		mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+		/* Clear stick bits (Requires rising edge) */
+		REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+		REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+		if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+			lss_status = 1;
+
+		bnx2x_analyze_link_error(params, vars, lss_status,
+					 PHY_HALF_OPEN_CONN_FLAG,
+					 LINK_STATUS_NONE, notify);
+	} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+		/* Check E1X / E2 BMAC */
+		u32 lss_status_reg;
+		u32 wb_data[2];
+		mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+			NIG_REG_INGRESS_BMAC0_MEM;
+		/*  Read BIGMAC_REGISTER_RX_LSS_STATUS */
+		if (CHIP_IS_E2(bp))
+			lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+		else
+			lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+		REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+		lss_status = (wb_data[0] > 0);
+
+		bnx2x_analyze_link_error(params, vars, lss_status,
+					 PHY_HALF_OPEN_CONN_FLAG,
+					 LINK_STATUS_NONE, notify);
+	}
+	return 0;
+}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+					 struct link_params *params,
+					 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin, value = 0;
+	u8 led_change, port = params->port;
+
+	/* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+	cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+			  dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+		   PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+		  PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+
+	if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+		DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+		return;
+	}
+
+	led_change = bnx2x_analyze_link_error(params, vars, value,
+					      PHY_SFP_TX_FAULT_FLAG,
+					      LINK_STATUS_SFP_TX_FAULT, 1);
+
+	if (led_change) {
+		/* Change TX_Fault led, set link status for further syncs */
+		u8 led_mode;
+
+		if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+			led_mode = MISC_REGISTERS_GPIO_HIGH;
+			vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+		} else {
+			led_mode = MISC_REGISTERS_GPIO_LOW;
+			vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+		}
+
+		/* If module is unapproved, led should be on regardless */
+		if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+			DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+			   led_mode);
+			bnx2x_set_e3_module_fault_led(params, led_mode);
+		}
+	}
+}
+static void bnx2x_kr2_recovery(struct link_params *params,
+			       struct link_vars *vars,
+			       struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "KR2 recovery\n");
+	bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+	bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_check_kr2_wa(struct link_params *params,
+			       struct link_vars *vars,
+			       struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	u16 base_page, next_page, not_kr2_device, lane;
+	int sigdet;
+
+	/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
+	 * Since some switches tend to reinit the AN process and clear the
+	 * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+	 * and recovered many times
+	 */
+	if (vars->check_kr2_recovery_cnt > 0) {
+		vars->check_kr2_recovery_cnt--;
+		return;
+	}
+
+	sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+	if (!sigdet) {
+		if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+			bnx2x_kr2_recovery(params, vars, phy);
+			DP(NETIF_MSG_LINK, "No sigdet\n");
+		}
+		return;
+	}
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, lane);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+			MDIO_AN_REG_LP_AUTO_NEG, &base_page);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+			MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
+	bnx2x_set_aer_mmd(params, phy);
+
+	/* CL73 has not begun yet */
+	if (base_page == 0) {
+		if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+			bnx2x_kr2_recovery(params, vars, phy);
+			DP(NETIF_MSG_LINK, "No BP\n");
+		}
+		return;
+	}
+
+	/* In case NP bit is not set in the BasePage, or it is set,
+	 * but only KX is advertised, declare this link partner as non-KR2
+	 * device.
+	 */
+	not_kr2_device = (((base_page & 0x8000) == 0) ||
+			  (((base_page & 0x8000) &&
+			    ((next_page & 0xe0) == 0x20))));
+
+	/* In case KR2 is already disabled, check if we need to re-enable it */
+	if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+		if (!not_kr2_device) {
+			DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
+			   next_page);
+			bnx2x_kr2_recovery(params, vars, phy);
+		}
+		return;
+	}
+	/* KR2 is enabled, but not KR2 device */
+	if (not_kr2_device) {
+		/* Disable KR2 on both lanes */
+		DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
+		bnx2x_disable_kr2(params, vars, phy);
+		/* Restart AN on leading lane */
+		bnx2x_warpcore_restart_AN_KR(phy, params);
+		return;
+	}
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+	u16 phy_idx;
+	struct bnx2x *bp = params->bp;
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+			bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+			if (bnx2x_check_half_open_conn(params, vars, 1) !=
+			    0)
+				DP(NETIF_MSG_LINK, "Fault detection failed\n");
+			break;
+		}
+	}
+
+	if (CHIP_IS_E3(bp)) {
+		struct bnx2x_phy *phy = &params->phy[INT_PHY];
+		bnx2x_set_aer_mmd(params, phy);
+		if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
+		    (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+			bnx2x_check_kr2_wa(params, vars, phy);
+		bnx2x_check_over_curr(params, vars);
+		if (vars->rx_tx_asic_rst)
+			bnx2x_warpcore_config_runtime(phy, params, vars);
+
+		if ((REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region, dev_info.
+				port_hw_config[params->port].default_cfg))
+		    & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+		    PORT_HW_CFG_NET_SERDES_IF_SFI) {
+			if (bnx2x_is_sfp_module_plugged(phy, params)) {
+				bnx2x_sfp_tx_fault_detection(phy, params, vars);
+			} else if (vars->link_status &
+				LINK_STATUS_SFP_TX_FAULT) {
+				/* Clean trail, interrupt corrects the leds */
+				vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+				vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+				/* Update link status in the shared memory */
+				bnx2x_update_mng(params, vars->link_status);
+			}
+		}
+	}
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+			     u32 shmem_base,
+			     u32 shmem2_base,
+			     u8 port)
+{
+	u8 phy_index, fan_failure_det_req = 0;
+	struct bnx2x_phy phy;
+	for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+	      phy_index++) {
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port, &phy)
+		    != 0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return 0;
+		}
+		fan_failure_det_req |= (phy.flags &
+					FLAGS_FAN_FAILURE_DET_REQ);
+	}
+	return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+	u8 phy_index;
+	struct bnx2x *bp = params->bp;
+	bnx2x_update_mng(params, 0);
+	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+	      phy_index++) {
+		if (params->phy[phy_index].hw_reset) {
+			params->phy[phy_index].hw_reset(
+				&params->phy[phy_index],
+				params);
+			params->phy[phy_index] = phy_null;
+		}
+	}
+}
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
+			    u8 port)
+{
+	u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+	u32 val;
+	u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+	if (CHIP_IS_E3(bp)) {
+		if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+					      shmem_base,
+					      port,
+					      &gpio_num,
+					      &gpio_port) != 0)
+			return;
+	} else {
+		struct bnx2x_phy phy;
+		for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+		      phy_index++) {
+			if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+					       shmem2_base, port, &phy)
+			    != 0) {
+				DP(NETIF_MSG_LINK, "populate phy failed\n");
+				return;
+			}
+			if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+				gpio_num = MISC_REGISTERS_GPIO_3;
+				gpio_port = port;
+				break;
+			}
+		}
+	}
+
+	if (gpio_num == 0xff)
+		return;
+
+	/* Set GPIO3 to trigger SFP+ module insertion/removal */
+	bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	gpio_port ^= (swap_val && swap_override);
+
+	vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+		(gpio_num + (gpio_port << 2));
+
+	sync_offset = shmem_base +
+		offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].aeu_int_mask);
+	REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+	DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+		       gpio_num, gpio_port, vars->aeu_int_mask);
+
+	if (port == 0)
+		offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+	else
+		offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+	/* Open appropriate AEU for interrupts */
+	aeu_mask = REG_RD(bp, offset);
+	aeu_mask |= vars->aeu_int_mask;
+	REG_WR(bp, offset, aeu_mask);
+
+	/* Enable the GPIO to trigger interrupt */
+	val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+	val |= 1 << (gpio_num + (gpio_port << 2));
+	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
new file mode 100644
index 0000000..b7d2511
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -0,0 +1,546 @@
+/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#ifndef BNX2X_LINK_H
+#define BNX2X_LINK_H
+
+
+
+/***********************************************************/
+/*                         Defines                         */
+/***********************************************************/
+#define DEFAULT_PHY_DEV_ADDR	3
+#define E2_DEFAULT_PHY_DEV_ADDR	5
+
+
+
+#define BNX2X_FLOW_CTRL_AUTO		PORT_FEATURE_FLOW_CONTROL_AUTO
+#define BNX2X_FLOW_CTRL_TX		PORT_FEATURE_FLOW_CONTROL_TX
+#define BNX2X_FLOW_CTRL_RX		PORT_FEATURE_FLOW_CONTROL_RX
+#define BNX2X_FLOW_CTRL_BOTH		PORT_FEATURE_FLOW_CONTROL_BOTH
+#define BNX2X_FLOW_CTRL_NONE		PORT_FEATURE_FLOW_CONTROL_NONE
+
+#define NET_SERDES_IF_XFI		1
+#define NET_SERDES_IF_SFI		2
+#define NET_SERDES_IF_KR		3
+#define NET_SERDES_IF_DXGXS	4
+
+#define SPEED_AUTO_NEG		0
+#define SPEED_20000		20000
+
+#define I2C_DEV_ADDR_A0			0xa0
+#define I2C_DEV_ADDR_A2			0xa2
+
+#define SFP_EEPROM_PAGE_SIZE			16
+#define SFP_EEPROM_VENDOR_NAME_ADDR		0x14
+#define SFP_EEPROM_VENDOR_NAME_SIZE		16
+#define SFP_EEPROM_VENDOR_OUI_ADDR		0x25
+#define SFP_EEPROM_VENDOR_OUI_SIZE		3
+#define SFP_EEPROM_PART_NO_ADDR			0x28
+#define SFP_EEPROM_PART_NO_SIZE			16
+#define SFP_EEPROM_REVISION_ADDR		0x38
+#define SFP_EEPROM_REVISION_SIZE		4
+#define SFP_EEPROM_SERIAL_ADDR			0x44
+#define SFP_EEPROM_SERIAL_SIZE			16
+#define SFP_EEPROM_DATE_ADDR			0x54 /* ASCII YYMMDD */
+#define SFP_EEPROM_DATE_SIZE			6
+#define SFP_EEPROM_DIAG_TYPE_ADDR		0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE		1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ		(1<<2)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR		0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE		1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE		0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR		0x5f
+
+#define PWR_FLT_ERR_MSG_LEN			250
+
+#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
+		((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
+		(((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+		 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
+		((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define SINGLE_MEDIA_DIRECT(params)	(params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define SINGLE_MEDIA(params)		(params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define DUAL_MEDIA(params)		(params->num_phys == 3)
+
+#define FW_PARAM_PHY_ADDR_MASK		0x000000FF
+#define FW_PARAM_PHY_TYPE_MASK		0x0000FF00
+#define FW_PARAM_MDIO_CTRL_MASK		0xFFFF0000
+#define FW_PARAM_MDIO_CTRL_OFFSET		16
+#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+					   FW_PARAM_PHY_ADDR_MASK)
+#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+					   FW_PARAM_PHY_TYPE_MASK)
+#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+					    FW_PARAM_MDIO_CTRL_MASK) >> \
+					    FW_PARAM_MDIO_CTRL_OFFSET)
+#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+	(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
+
+
+#define PFC_BRB_FULL_LB_XOFF_THRESHOLD				170
+#define PFC_BRB_FULL_LB_XON_THRESHOLD				250
+
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+
+#define BMAC_CONTROL_RX_ENABLE		2
+/***********************************************************/
+/*                         Structs                         */
+/***********************************************************/
+#define INT_PHY		0
+#define EXT_PHY1	1
+#define EXT_PHY2	2
+#define MAX_PHYS	3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
+#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
+					 0 : (_phy_idx - 1))
+/***********************************************************/
+/*                      bnx2x_phy struct                     */
+/*  Defines the required arguments and function per phy    */
+/***********************************************************/
+struct link_vars;
+struct link_params;
+struct bnx2x_phy;
+
+typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+			    struct link_vars *vars);
+typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+			    struct link_vars *vars);
+typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+			     struct link_params *params);
+typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+				  struct link_params *params);
+typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+			       struct link_params *params, u8 mode);
+typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
+				    struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+	u8  devad;
+	u16 reg;
+	u16 val;
+};
+
+struct bnx2x_phy {
+	u32 type;
+
+	/* Loaded during init */
+	u8 addr;
+	u8 def_md_devad;
+	u16 flags;
+	/* No Over-Current detection */
+#define FLAGS_NOC			(1<<1)
+	/* Fan failure detection required */
+#define FLAGS_FAN_FAILURE_DET_REQ	(1<<2)
+	/* Initialize first the XGXS and only then the phy itself */
+#define FLAGS_INIT_XGXS_FIRST		(1<<3)
+#define FLAGS_WC_DUAL_MODE		(1<<4)
+#define FLAGS_4_PORT_MODE		(1<<5)
+#define FLAGS_REARM_LATCH_SIGNAL	(1<<6)
+#define FLAGS_SFP_NOT_APPROVED		(1<<7)
+#define FLAGS_MDC_MDIO_WA		(1<<8)
+#define FLAGS_DUMMY_READ		(1<<9)
+#define FLAGS_MDC_MDIO_WA_B0		(1<<10)
+#define FLAGS_TX_ERROR_CHECK		(1<<12)
+#define FLAGS_EEE			(1<<13)
+#define FLAGS_MDC_MDIO_WA_G		(1<<15)
+
+	/* preemphasis values for the rx side */
+	u16 rx_preemphasis[4];
+
+	/* preemphasis values for the tx side */
+	u16 tx_preemphasis[4];
+
+	/* EMAC address for access MDIO */
+	u32 mdio_ctrl;
+
+	u32 supported;
+
+	u32 media_type;
+#define	ETH_PHY_UNSPECIFIED	0x0
+#define	ETH_PHY_SFPP_10G_FIBER	0x1
+#define	ETH_PHY_XFP_FIBER		0x2
+#define	ETH_PHY_DA_TWINAX		0x3
+#define	ETH_PHY_BASE_T		0x4
+#define	ETH_PHY_SFP_1G_FIBER	0x5
+#define	ETH_PHY_KR		0xf0
+#define	ETH_PHY_CX4		0xf1
+#define	ETH_PHY_NOT_PRESENT	0xff
+
+	/* The address in which version is located*/
+	u32 ver_addr;
+
+	u16 req_flow_ctrl;
+
+	u16 req_line_speed;
+
+	u32 speed_cap_mask;
+
+	u16 req_duplex;
+	u16 rsrv;
+	/* Called per phy/port init, and it configures LASI, speed, autoneg,
+	 duplex, flow control negotiation, etc. */
+	config_init_t config_init;
+
+	/* Called due to interrupt. It determines the link, speed */
+	read_status_t read_status;
+
+	/* Called when driver is unloading. Should reset the phy */
+	link_reset_t link_reset;
+
+	/* Set the loopback configuration for the phy */
+	config_loopback_t config_loopback;
+
+	/* Format the given raw number into str up to len */
+	format_fw_ver_t format_fw_ver;
+
+	/* Reset the phy (both ports) */
+	hw_reset_t hw_reset;
+
+	/* Set link led mode (on/off/oper)*/
+	set_link_led_t set_link_led;
+
+	/* PHY Specific tasks */
+	phy_specific_func_t phy_specific_func;
+#define DISABLE_TX	1
+#define ENABLE_TX	2
+#define PHY_INIT	3
+};
+
+/* Inputs parameters to the CLC */
+struct link_params {
+
+	u8 port;
+
+	/* Default / User Configuration */
+	u8 loopback_mode;
+#define LOOPBACK_NONE		0
+#define LOOPBACK_EMAC		1
+#define LOOPBACK_BMAC		2
+#define LOOPBACK_XGXS		3
+#define LOOPBACK_EXT_PHY	4
+#define LOOPBACK_EXT		5
+#define LOOPBACK_UMAC		6
+#define LOOPBACK_XMAC		7
+
+	/* Device parameters */
+	u8 mac_addr[6];
+
+	u16 req_duplex[LINK_CONFIG_SIZE];
+	u16 req_flow_ctrl[LINK_CONFIG_SIZE];
+
+	u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
+	/* shmem parameters */
+	u32 shmem_base;
+	u32 shmem2_base;
+	u32 speed_cap_mask[LINK_CONFIG_SIZE];
+	u32 switch_cfg;
+#define SWITCH_CFG_1G		PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define SWITCH_CFG_10G		PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define SWITCH_CFG_AUTO_DETECT	PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+
+	u32 lane_config;
+
+	/* Phy register parameter */
+	u32 chip_id;
+
+	/* features */
+	u32 feature_config_flags;
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED	(1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED			(1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY		(1<<2)
+#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY	(1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_AFEX			(1<<8)
+#define FEATURE_CONFIG_AUTOGREEEN_ENABLED			(1<<9)
+#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED		(1<<10)
+#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET		(1<<11)
+#define FEATURE_CONFIG_MT_SUPPORT			(1<<13)
+#define FEATURE_CONFIG_BOOT_FROM_SAN			(1<<14)
+
+	/* Will be populated during common init */
+	struct bnx2x_phy phy[MAX_PHYS];
+
+	/* Will be populated during common init */
+	u8 num_phys;
+
+	u8 rsrv;
+
+	/* Used to configure the EEE Tx LPI timer, has several modes of
+	 * operation, according to bits 29:28 -
+	 * 2'b00: Timer will be configured by nvram, output will be the value
+	 *        from nvram.
+	 * 2'b01: Timer will be configured by nvram, output will be in
+	 *        microseconds.
+	 * 2'b10: bits 1:0 contain an nvram value which will be used instead
+	 *        of the one located in the nvram. Output will be that value.
+	 * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+	 *        will be in microseconds.
+	 * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+	 */
+	u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME		(0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME		(0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME		(0x6000)
+#define EEE_MODE_NVRAM_MASK		(0x3)
+#define EEE_MODE_TIMER_MASK		(0xfffff)
+#define EEE_MODE_OUTPUT_TIME		(1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM		(1<<29)
+#define EEE_MODE_ENABLE_LPI		(1<<30)
+#define EEE_MODE_ADV_LPI			(1<<31)
+
+	u16 hw_led_mode; /* part of the hw_config read from the shmem */
+	u32 multi_phy_config;
+
+	/* Device pointer passed to all callback functions */
+	struct bnx2x *bp;
+	u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+				req_flow_ctrl is set to AUTO */
+	u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED		(1<<0)
+#define PHY_INITIALIZED		(1<<1)
+	u32 lfa_base;
+
+	/* The same definitions as the shmem2 parameter */
+	u32 link_attr_sync;
+};
+
+/* Output parameters */
+struct link_vars {
+	u8 phy_flags;
+#define PHY_XGXS_FLAG			(1<<0)
+#define PHY_SGMII_FLAG			(1<<1)
+#define PHY_PHYSICAL_LINK_FLAG		(1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG		(1<<3)
+#define PHY_OVER_CURRENT_FLAG		(1<<4)
+#define PHY_SFP_TX_FAULT_FLAG		(1<<5)
+
+	u8 mac_type;
+#define MAC_TYPE_NONE		0
+#define MAC_TYPE_EMAC		1
+#define MAC_TYPE_BMAC		2
+#define MAC_TYPE_UMAC		3
+#define MAC_TYPE_XMAC		4
+
+	u8 phy_link_up; /* internal phy link indication */
+	u8 link_up;
+
+	u16 line_speed;
+	u16 duplex;
+
+	u16 flow_ctrl;
+	u16 ieee_fc;
+
+	/* The same definitions as the shmem parameter */
+	u32 link_status;
+	u32 eee_status;
+	u8 fault_detected;
+	u8 check_kr2_recovery_cnt;
+#define CHECK_KR2_RECOVERY_CNT	5
+	u16 periodic_flags;
+#define PERIODIC_FLAGS_LINK_EVENT	0x0001
+
+	u32 aeu_int_mask;
+	u8 rx_tx_asic_rst;
+	u8 turn_to_run_wc_rt;
+	u16 rsrv2;
+};
+
+/***********************************************************/
+/*                         Functions                       */
+/***********************************************************/
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
+
+/* Reset the link. Should be called when driver or interface goes down
+   Before calling phy firmware upgrade, the reset_ext_phy should be set
+   to 0 */
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+		     u8 reset_ext_phy);
+int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
+/* bnx2x_link_update should be called upon link interrupt */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
+
+/* use the following phy functions to read/write from external_phy
+  In order to use it to read/write internal phy registers, use
+  DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
+  the register */
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+		   u8 devad, u16 reg, u16 *ret_val);
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+		    u8 devad, u16 reg, u16 val);
+
+/* Reads the link_status from the shmem,
+   and update the link vars accordingly */
+void bnx2x_link_status_update(struct link_params *input,
+			    struct link_vars *output);
+/* returns string representing the fw_version of the external phy */
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+				 u16 len);
+
+/* Set/Unset the led
+   Basically, the CLC takes care of the led for the link, but in case one needs
+   to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
+   blink the led, and LED_MODE_OFF to set the led off.*/
+int bnx2x_set_led(struct link_params *params,
+		  struct link_vars *vars, u8 mode, u32 speed);
+#define LED_MODE_OFF			0
+#define LED_MODE_ON			1
+#define LED_MODE_OPER			2
+#define LED_MODE_FRONT_PANEL_OFF	3
+
+/* bnx2x_handle_module_detect_int should be called upon module detection
+   interrupt */
+void bnx2x_handle_module_detect_int(struct link_params *params);
+
+/* Get the actual link status. In case it returns 0, link is up,
+	otherwise link is down*/
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+		    u8 is_serdes);
+
+/* One-time initialization for external phy after power up */
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+			  u32 shmem2_base_path[], u32 chip_id);
+
+/* Reset the external PHY using GPIO */
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
+
+/* Reset the external of SFX7101 */
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				 struct link_params *params, u8 dev_addr,
+				 u16 addr, u16 byte_cnt, u8 *o_buf);
+
+void bnx2x_hw_reset_phy(struct link_params *params);
+
+/* Check swap bit and adjust PHY order */
+u32 bnx2x_phy_selection(struct link_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+int bnx2x_phy_probe(struct link_params *params);
+
+/* Checks if fan failure detection is required on one of the phys on board */
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
+			     u32 shmem2_base, u8 port);
+
+/* Open / close the gate between the NIG and the BRB */
+void bnx2x_set_rx_filter(struct link_params *params, u8 en);
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS		(2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0	(6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1	(3)
+#define DCBX_E3B0_MAX_NUM_COS		( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+			    DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS			( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+			    DCBX_E2E3_MAX_NUM_COS))
+
+/* PFC port configuration params */
+struct bnx2x_nig_brb_pfc_port_params {
+	/* NIG */
+	u32 pause_enable;
+	u32 llfc_out_en;
+	u32 llfc_enable;
+	u32 pkt_priority_to_cos;
+	u8 num_of_rx_cos_priority_mask;
+	u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
+	u32 llfc_high_priority_classes;
+	u32 llfc_low_priority_classes;
+};
+
+
+/* ETS port configuration params */
+struct bnx2x_ets_bw_params {
+	u8 bw;
+};
+
+struct bnx2x_ets_sp_params {
+	/**
+	 * valid values are 0 - 5. 0 is highest strict priority.
+	 * There can't be two COS's with the same pri.
+	 */
+	u8 pri;
+};
+
+enum bnx2x_cos_state {
+	bnx2x_cos_state_strict = 0,
+	bnx2x_cos_state_bw = 1,
+};
+
+struct bnx2x_ets_cos_params {
+	enum bnx2x_cos_state state ;
+	union {
+		struct bnx2x_ets_bw_params bw_params;
+		struct bnx2x_ets_sp_params sp_params;
+	} params;
+};
+
+struct bnx2x_ets_params {
+	u8 num_of_cos; /* Number of valid COS entries*/
+	struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
+};
+
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+int bnx2x_update_pfc(struct link_params *params,
+		      struct link_vars *vars,
+		      struct bnx2x_nig_brb_pfc_port_params *pfc_params);
+
+
+/* Used to configure the ETS to disable */
+int bnx2x_ets_disabled(struct link_params *params,
+		       struct link_vars *vars);
+
+/* Used to configure the ETS to BW limited */
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+			const u32 cos1_bw);
+
+/* Used to configure the ETS to strict */
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+
+/*  Configure the COS to ETS according to BW and SP settings.*/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+			 const struct link_vars *vars,
+			 struct bnx2x_ets_params *ets_params);
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
+			    u8 port);
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
+
+#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
new file mode 100644
index 0000000..1c81238
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -0,0 +1,15396 @@
+/* bnx2x_main.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>  /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/vxlan.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION					\
+	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
+	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1		"bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H	"bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2		"bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT		(5*HZ)
+
+static char version[] =
+	"QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
+	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("QLogic "
+		   "BCM57710/57711/57711E/"
+		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+		   "57840/57840_MF Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
+
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
+MODULE_PARM_DESC(num_queues,
+		 " Set number of queues (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, S_IRUGO);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+static int int_mode;
+module_param(int_mode, int, S_IRUGO);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
+				"(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, S_IRUGO);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int mrrs = -1;
+module_param(mrrs, int, S_IRUGO);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
+
+struct bnx2x_mac_vals {
+	u32 xmac_addr;
+	u32 xmac_val;
+	u32 emac_addr;
+	u32 emac_val;
+	u32 umac_addr[2];
+	u32 umac_val[2];
+	u32 bmac_addr;
+	u32 bmac_val[2];
+};
+
+enum bnx2x_board_type {
+	BCM57710 = 0,
+	BCM57711,
+	BCM57711E,
+	BCM57712,
+	BCM57712_MF,
+	BCM57712_VF,
+	BCM57800,
+	BCM57800_MF,
+	BCM57800_VF,
+	BCM57810,
+	BCM57810_MF,
+	BCM57810_VF,
+	BCM57840_4_10,
+	BCM57840_2_20,
+	BCM57840_MF,
+	BCM57840_VF,
+	BCM57811,
+	BCM57811_MF,
+	BCM57840_O,
+	BCM57840_MFO,
+	BCM57811_VF
+};
+
+/* indexed by board_type, above */
+static struct {
+	char *name;
+} board_info[] = {
+	[BCM57710]	= { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+	[BCM57711]	= { "QLogic BCM57711 10 Gigabit PCIe" },
+	[BCM57711E]	= { "QLogic BCM57711E 10 Gigabit PCIe" },
+	[BCM57712]	= { "QLogic BCM57712 10 Gigabit Ethernet" },
+	[BCM57712_MF]	= { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+	[BCM57712_VF]	= { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+	[BCM57800]	= { "QLogic BCM57800 10 Gigabit Ethernet" },
+	[BCM57800_MF]	= { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+	[BCM57800_VF]	= { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+	[BCM57810]	= { "QLogic BCM57810 10 Gigabit Ethernet" },
+	[BCM57810_MF]	= { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+	[BCM57810_VF]	= { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+	[BCM57840_4_10]	= { "QLogic BCM57840 10 Gigabit Ethernet" },
+	[BCM57840_2_20]	= { "QLogic BCM57840 20 Gigabit Ethernet" },
+	[BCM57840_MF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+	[BCM57840_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+	[BCM57811]	= { "QLogic BCM57811 10 Gigabit Ethernet" },
+	[BCM57811_MF]	= { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+	[BCM57840_O]	= { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+	[BCM57840_MFO]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+	[BCM57811_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+};
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710		CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711		CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E	CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712		CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF	CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF	CHIP_NUM_57712_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800		CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF	CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF	CHIP_NUM_57800_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810		CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF	CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O	CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF	CHIP_NUM_57810_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10	CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20	CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO	CHIP_NUM_57840_MF_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF	CHIP_NUM_57840_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811
+#define PCI_DEVICE_ID_NX2_57811		CHIP_NUM_57811
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_MF
+#define PCI_DEVICE_ID_NX2_57811_MF	CHIP_NUM_57811_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF	CHIP_NUM_57811_VF
+#endif
+
+static const struct pci_device_id bnx2x_pci_tbl[] = {
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+/* Global resources for unloading a previously loaded device */
+#define BNX2X_PREV_WAIT_NEEDED 1
+static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+
+static void __storm_memset_dma_mapping(struct bnx2x *bp,
+				       u32 addr, dma_addr_t mapping)
+{
+	REG_WR(bp,  addr, U64_LO(mapping));
+	REG_WR(bp,  addr + 4, U64_HI(mapping));
+}
+
+static void storm_memset_spq_addr(struct bnx2x *bp,
+				  dma_addr_t mapping, u16 abs_fid)
+{
+	u32 addr = XSEM_REG_FAST_MEMORY +
+			XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+	__storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+				  u16 pf_id)
+{
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+				 u8 enable)
+{
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+}
+
+static void storm_memset_eq_data(struct bnx2x *bp,
+				 struct event_ring_data *eq_data,
+				u16 pfid)
+{
+	size_t size = sizeof(struct event_ring_data);
+
+	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+				 u16 pfid)
+{
+	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+	REG_WR16(bp, addr, eq_prod);
+}
+
+/* used only at init
+ * locking is done by mcp
+ */
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+	u32 val;
+
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+
+	return val;
+}
+
+#define DMAE_DP_SRC_GRC		"grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI		"pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC		"grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI		"pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE	"dst_addr [none]"
+
+static void bnx2x_dp_dmae(struct bnx2x *bp,
+			  struct dmae_command *dmae, int msglvl)
+{
+	u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+	int i;
+
+	switch (dmae->opcode & DMAE_COMMAND_DST) {
+	case DMAE_CMD_DST_PCI:
+		if (src_type == DMAE_CMD_SRC_PCI)
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		else
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%08x], len [%d*4], dst [%x:%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_lo >> 2,
+			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		break;
+	case DMAE_CMD_DST_GRC:
+		if (src_type == DMAE_CMD_SRC_PCI)
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+			   dmae->len, dmae->dst_addr_lo >> 2,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		else
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%08x], len [%d*4], dst [%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_lo >> 2,
+			   dmae->len, dmae->dst_addr_lo >> 2,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		break;
+	default:
+		if (src_type == DMAE_CMD_SRC_PCI)
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
+			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		else
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
+			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_lo >> 2,
+			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		break;
+	}
+
+	for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
+		DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
+		   i, *(((u32 *)dmae) + i));
+}
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+	u32 cmd_offset;
+	int i;
+
+	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+	}
+	REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
+{
+	return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+			   DMAE_CMD_C_ENABLE);
+}
+
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+	return opcode & ~DMAE_CMD_SRC_RESET;
+}
+
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+			     bool with_comp, u8 comp_type)
+{
+	u32 opcode = 0;
+
+	opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+		   (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+	opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
+
+	opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+	opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+		   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+	opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+	opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+	opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+	if (with_comp)
+		opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+	return opcode;
+}
+
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+				      struct dmae_command *dmae,
+				      u8 src_type, u8 dst_type)
+{
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	/* set the opcode */
+	dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+					 true, DMAE_COMP_PCI);
+
+	/* fill in the completion parameters */
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+			       u32 *comp)
+{
+	int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
+	int rc = 0;
+
+	bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
+
+	/* Lock the dmae channel. Disable BHs to prevent a dead-lock
+	 * as long as this code is called both from syscall context and
+	 * from ndo_set_rx_mode() flow that may be called from BH.
+	 */
+
+	spin_lock_bh(&bp->dmae_lock);
+
+	/* reset completion */
+	*comp = 0;
+
+	/* post the command on the channel used for initializations */
+	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+
+	/* wait for completion */
+	udelay(5);
+	while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+
+		if (!cnt ||
+		    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
+		     bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+			BNX2X_ERR("DMAE timeout!\n");
+			rc = DMAE_TIMEOUT;
+			goto unlock;
+		}
+		cnt--;
+		udelay(50);
+	}
+	if (*comp & DMAE_PCI_ERR_FLAG) {
+		BNX2X_ERR("DMAE PCI error!\n");
+		rc = DMAE_PCI_ERROR;
+	}
+
+unlock:
+
+	spin_unlock_bh(&bp->dmae_lock);
+
+	return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+		      u32 len32)
+{
+	int rc;
+	struct dmae_command dmae;
+
+	if (!bp->dmae_ready) {
+		u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+		if (CHIP_IS_E1(bp))
+			bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+		else
+			bnx2x_init_str_wr(bp, dst_addr, data, len32);
+		return;
+	}
+
+	/* set opcode and fixed command fields */
+	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+	/* fill in addresses and len */
+	dmae.src_addr_lo = U64_LO(dma_addr);
+	dmae.src_addr_hi = U64_HI(dma_addr);
+	dmae.dst_addr_lo = dst_addr >> 2;
+	dmae.dst_addr_hi = 0;
+	dmae.len = len32;
+
+	/* issue the command and wait for completion */
+	rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+	if (rc) {
+		BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#endif
+	}
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+	int rc;
+	struct dmae_command dmae;
+
+	if (!bp->dmae_ready) {
+		u32 *data = bnx2x_sp(bp, wb_data[0]);
+		int i;
+
+		if (CHIP_IS_E1(bp))
+			for (i = 0; i < len32; i++)
+				data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+		else
+			for (i = 0; i < len32; i++)
+				data[i] = REG_RD(bp, src_addr + i*4);
+
+		return;
+	}
+
+	/* set opcode and fixed command fields */
+	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+	/* fill in addresses and len */
+	dmae.src_addr_lo = src_addr >> 2;
+	dmae.src_addr_hi = 0;
+	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+	dmae.len = len32;
+
+	/* issue the command and wait for completion */
+	rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+	if (rc) {
+		BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#endif
+	}
+}
+
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+				      u32 addr, u32 len)
+{
+	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+	int offset = 0;
+
+	while (len > dmae_wr_max) {
+		bnx2x_write_dmae(bp, phys_addr + offset,
+				 addr + offset, dmae_wr_max);
+		offset += dmae_wr_max * 4;
+		len -= dmae_wr_max;
+	}
+
+	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+enum storms {
+	   XSTORM,
+	   TSTORM,
+	   CSTORM,
+	   USTORM,
+	   MAX_STORMS
+};
+
+#define STORMS_NUM 4
+#define REGS_IN_ENTRY 4
+
+static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
+					      enum storms storm,
+					      int entry)
+{
+	switch (storm) {
+	case XSTORM:
+		return XSTORM_ASSERT_LIST_OFFSET(entry);
+	case TSTORM:
+		return TSTORM_ASSERT_LIST_OFFSET(entry);
+	case CSTORM:
+		return CSTORM_ASSERT_LIST_OFFSET(entry);
+	case USTORM:
+		return USTORM_ASSERT_LIST_OFFSET(entry);
+	case MAX_STORMS:
+	default:
+		BNX2X_ERR("unknown storm\n");
+	}
+	return -EINVAL;
+}
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+	char last_idx;
+	int i, j, rc = 0;
+	enum storms storm;
+	u32 regs[REGS_IN_ENTRY];
+	u32 bar_storm_intmem[STORMS_NUM] = {
+		BAR_XSTRORM_INTMEM,
+		BAR_TSTRORM_INTMEM,
+		BAR_CSTRORM_INTMEM,
+		BAR_USTRORM_INTMEM
+	};
+	u32 storm_assert_list_index[STORMS_NUM] = {
+		XSTORM_ASSERT_LIST_INDEX_OFFSET,
+		TSTORM_ASSERT_LIST_INDEX_OFFSET,
+		CSTORM_ASSERT_LIST_INDEX_OFFSET,
+		USTORM_ASSERT_LIST_INDEX_OFFSET
+	};
+	char *storms_string[STORMS_NUM] = {
+		"XSTORM",
+		"TSTORM",
+		"CSTORM",
+		"USTORM"
+	};
+
+	for (storm = XSTORM; storm < MAX_STORMS; storm++) {
+		last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
+				   storm_assert_list_index[storm]);
+		if (last_idx)
+			BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
+				  storms_string[storm], last_idx);
+
+		/* print the asserts */
+		for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+			/* read a single assert entry */
+			for (j = 0; j < REGS_IN_ENTRY; j++)
+				regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
+					  bnx2x_get_assert_list_entry(bp,
+								      storm,
+								      i) +
+					  sizeof(u32) * j);
+
+			/* log entry if it contains a valid assert */
+			if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+				BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					  storms_string[storm], i, regs[3],
+					  regs[2], regs[1], regs[0]);
+				rc++;
+			} else {
+				break;
+			}
+		}
+	}
+
+	BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
+		  CHIP_IS_E1(bp) ? "everest1" :
+		  CHIP_IS_E1H(bp) ? "everest1h" :
+		  CHIP_IS_E2(bp) ? "everest2" : "everest3",
+		  BCM_5710_FW_MAJOR_VERSION,
+		  BCM_5710_FW_MINOR_VERSION,
+		  BCM_5710_FW_REVISION_VERSION);
+
+	return rc;
+}
+
+#define MCPR_TRACE_BUFFER_SIZE	(0x800)
+#define SCRATCH_BUFFER_SIZE(bp)	\
+	(CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
+{
+	u32 addr, val;
+	u32 mark, offset;
+	__be32 data[9];
+	int word;
+	u32 trace_shmem_base;
+	if (BP_NOMCP(bp)) {
+		BNX2X_ERR("NO MCP - can not dump\n");
+		return;
+	}
+	netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+		(bp->common.bc_ver & 0xff0000) >> 16,
+		(bp->common.bc_ver & 0xff00) >> 8,
+		(bp->common.bc_ver & 0xff));
+
+	val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+	if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+		BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
+
+	if (BP_PATH(bp) == 0)
+		trace_shmem_base = bp->common.shmem_base;
+	else
+		trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+
+	/* sanity */
+	if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+	    trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+				SCRATCH_BUFFER_SIZE(bp)) {
+		BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+			  trace_shmem_base);
+		return;
+	}
+
+	addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
+
+	/* validate TRCB signature */
+	mark = REG_RD(bp, addr);
+	if (mark != MFW_TRACE_SIGNATURE) {
+		BNX2X_ERR("Trace buffer signature is missing.");
+		return ;
+	}
+
+	/* read cyclic buffer pointer */
+	addr += 4;
+	mark = REG_RD(bp, addr);
+	mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+	if (mark >= trace_shmem_base || mark < addr + 4) {
+		BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+		return;
+	}
+	printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
+
+	printk("%s", lvl);
+
+	/* dump buffer after the mark */
+	for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
+		for (word = 0; word < 8; word++)
+			data[word] = htonl(REG_RD(bp, offset + 4*word));
+		data[8] = 0x0;
+		pr_cont("%s", (char *)data);
+	}
+
+	/* dump buffer before the mark */
+	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+		for (word = 0; word < 8; word++)
+			data[word] = htonl(REG_RD(bp, offset + 4*word));
+		data[8] = 0x0;
+		pr_cont("%s", (char *)data);
+	}
+	printk("%s" "end of fw dump\n", lvl);
+}
+
+static void bnx2x_fw_dump(struct bnx2x *bp)
+{
+	bnx2x_fw_dump_lvl(bp, KERN_ERR);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	u32 val = REG_RD(bp, addr);
+
+	/* in E1 we must use only PCI configuration space to disable
+	 * MSI/MSIX capability
+	 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+	 */
+	if (CHIP_IS_E1(bp)) {
+		/* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+		 * Use mask register to prevent from HC sending interrupts
+		 * after we exit the function
+		 */
+		REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+	DP(NETIF_MSG_IFDOWN,
+	   "write %x to HC %d (addr 0x%x)\n",
+	   val, port, addr);
+
+	/* flush all outstanding writes */
+	mmiowb();
+
+	REG_WR(bp, addr, val);
+	if (REG_RD(bp, addr) != val)
+		BNX2X_ERR("BUG! Proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+	val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+		 IGU_PF_CONF_INT_LINE_EN |
+		 IGU_PF_CONF_ATTN_BIT_EN);
+
+	DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+	/* flush all outstanding writes */
+	mmiowb();
+
+	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+	if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+		BNX2X_ERR("BUG! Proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+	if (bp->common.int_block == INT_BLOCK_HC)
+		bnx2x_hc_int_disable(bp);
+	else
+		bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
+{
+	int i;
+	u16 j;
+	struct hc_sp_status_block_data sp_sb_data;
+	int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+	u16 start = 0, end = 0;
+	u8 cos;
+#endif
+	if (IS_PF(bp) && disable_int)
+		bnx2x_int_disable(bp);
+
+	bp->stats_state = STATS_STATE_DISABLED;
+	bp->eth_stats.unrecoverable_error++;
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+	BNX2X_ERR("begin crash dump -----------------\n");
+
+	/* Indices */
+	/* Common */
+	if (IS_PF(bp)) {
+		struct host_sp_status_block *def_sb = bp->def_status_blk;
+		int data_size, cstorm_offset;
+
+		BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+			  bp->def_idx, bp->def_att_idx, bp->attn_state,
+			  bp->spq_prod_idx, bp->stats_counter);
+		BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
+			  def_sb->atten_status_block.attn_bits,
+			  def_sb->atten_status_block.attn_bits_ack,
+			  def_sb->atten_status_block.status_block_id,
+			  def_sb->atten_status_block.attn_bits_index);
+		BNX2X_ERR("     def (");
+		for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+			pr_cont("0x%x%s",
+				def_sb->sp_sb.index_values[i],
+				(i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
+
+		data_size = sizeof(struct hc_sp_status_block_data) /
+			    sizeof(u32);
+		cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+		for (i = 0; i < data_size; i++)
+			*((u32 *)&sp_sb_data + i) =
+				REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+					   i * sizeof(u32));
+
+		pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
+			sp_sb_data.igu_sb_id,
+			sp_sb_data.igu_seg_id,
+			sp_sb_data.p_func.pf_id,
+			sp_sb_data.p_func.vnic_id,
+			sp_sb_data.p_func.vf_id,
+			sp_sb_data.p_func.vf_valid,
+			sp_sb_data.state);
+	}
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		int loop;
+		struct hc_status_block_data_e2 sb_data_e2;
+		struct hc_status_block_data_e1x sb_data_e1x;
+		struct hc_status_block_sm  *hc_sm_p =
+			CHIP_IS_E1x(bp) ?
+			sb_data_e1x.common.state_machine :
+			sb_data_e2.common.state_machine;
+		struct hc_index_data *hc_index_p =
+			CHIP_IS_E1x(bp) ?
+			sb_data_e1x.index_data :
+			sb_data_e2.index_data;
+		u8 data_size, cos;
+		u32 *sb_data_p;
+		struct bnx2x_fp_txdata txdata;
+
+		if (!bp->fp)
+			break;
+
+		if (!fp->rx_cons_sb)
+			continue;
+
+		/* Rx */
+		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
+			  i, fp->rx_bd_prod, fp->rx_bd_cons,
+			  fp->rx_comp_prod,
+			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
+			  fp->rx_sge_prod, fp->last_max_sge,
+			  le16_to_cpu(fp->fp_hc_idx));
+
+		/* Tx */
+		for_each_cos_in_tx_queue(fp, cos)
+		{
+			if (!fp->txdata_ptr[cos])
+				break;
+
+			txdata = *fp->txdata_ptr[cos];
+
+			if (!txdata.tx_cons_sb)
+				continue;
+
+			BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
+				  i, txdata.tx_pkt_prod,
+				  txdata.tx_pkt_cons, txdata.tx_bd_prod,
+				  txdata.tx_bd_cons,
+				  le16_to_cpu(*txdata.tx_cons_sb));
+		}
+
+		loop = CHIP_IS_E1x(bp) ?
+			HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
+
+		/* host sb data */
+
+		if (IS_FCOE_FP(fp))
+			continue;
+
+		BNX2X_ERR("     run indexes (");
+		for (j = 0; j < HC_SB_MAX_SM; j++)
+			pr_cont("0x%x%s",
+			       fp->sb_running_index[j],
+			       (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+		BNX2X_ERR("     indexes (");
+		for (j = 0; j < loop; j++)
+			pr_cont("0x%x%s",
+			       fp->sb_index_values[j],
+			       (j == loop - 1) ? ")" : " ");
+
+		/* VF cannot access FW refelection for status block */
+		if (IS_VF(bp))
+			continue;
+
+		/* fw sb data */
+		data_size = CHIP_IS_E1x(bp) ?
+			sizeof(struct hc_status_block_data_e1x) :
+			sizeof(struct hc_status_block_data_e2);
+		data_size /= sizeof(u32);
+		sb_data_p = CHIP_IS_E1x(bp) ?
+			(u32 *)&sb_data_e1x :
+			(u32 *)&sb_data_e2;
+		/* copy sb data in here */
+		for (j = 0; j < data_size; j++)
+			*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+				CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+				j * sizeof(u32));
+
+		if (!CHIP_IS_E1x(bp)) {
+			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
+				sb_data_e2.common.p_func.pf_id,
+				sb_data_e2.common.p_func.vf_id,
+				sb_data_e2.common.p_func.vf_valid,
+				sb_data_e2.common.p_func.vnic_id,
+				sb_data_e2.common.same_igu_sb_1b,
+				sb_data_e2.common.state);
+		} else {
+			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
+				sb_data_e1x.common.p_func.pf_id,
+				sb_data_e1x.common.p_func.vf_id,
+				sb_data_e1x.common.p_func.vf_valid,
+				sb_data_e1x.common.p_func.vnic_id,
+				sb_data_e1x.common.same_igu_sb_1b,
+				sb_data_e1x.common.state);
+		}
+
+		/* SB_SMs data */
+		for (j = 0; j < HC_SB_MAX_SM; j++) {
+			pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
+				j, hc_sm_p[j].__flags,
+				hc_sm_p[j].igu_sb_id,
+				hc_sm_p[j].igu_seg_id,
+				hc_sm_p[j].time_to_expire,
+				hc_sm_p[j].timer_value);
+		}
+
+		/* Indices data */
+		for (j = 0; j < loop; j++) {
+			pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
+			       hc_index_p[j].flags,
+			       hc_index_p[j].timeout);
+		}
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (IS_PF(bp)) {
+		/* event queue */
+		BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+		for (i = 0; i < NUM_EQ_DESC; i++) {
+			u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+			BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+				  i, bp->eq_ring[i].message.opcode,
+				  bp->eq_ring[i].message.error);
+			BNX2X_ERR("data: %x %x %x\n",
+				  data[0], data[1], data[2]);
+		}
+	}
+
+	/* Rings */
+	/* Rx */
+	for_each_valid_rx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		if (!bp->fp)
+			break;
+
+		if (!fp->rx_cons_sb)
+			continue;
+
+		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+		for (j = start; j != end; j = RX_BD(j + 1)) {
+			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
+				  i, j, rx_bd[1], rx_bd[0], sw_bd->data);
+		}
+
+		start = RX_SGE(fp->rx_sge_prod);
+		end = RX_SGE(fp->last_max_sge);
+		for (j = start; j != end; j = RX_SGE(j + 1)) {
+			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
+				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
+		}
+
+		start = RCQ_BD(fp->rx_comp_cons - 10);
+		end = RCQ_BD(fp->rx_comp_cons + 503);
+		for (j = start; j != end; j = RCQ_BD(j + 1)) {
+			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+		}
+	}
+
+	/* Tx */
+	for_each_valid_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		if (!bp->fp)
+			break;
+
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+
+			if (!fp->txdata_ptr[cos])
+				break;
+
+			if (!txdata->tx_cons_sb)
+				continue;
+
+			start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+			end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+			for (j = start; j != end; j = TX_BD(j + 1)) {
+				struct sw_tx_bd *sw_bd =
+					&txdata->tx_buf_ring[j];
+
+				BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
+					  i, cos, j, sw_bd->skb,
+					  sw_bd->first_bd);
+			}
+
+			start = TX_BD(txdata->tx_bd_cons - 10);
+			end = TX_BD(txdata->tx_bd_cons + 254);
+			for (j = start; j != end; j = TX_BD(j + 1)) {
+				u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
+
+				BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
+					  i, cos, j, tx_bd[0], tx_bd[1],
+					  tx_bd[2], tx_bd[3]);
+			}
+		}
+	}
+#endif
+	if (IS_PF(bp)) {
+		bnx2x_fw_dump(bp);
+		bnx2x_mc_assert(bp);
+	}
+	BNX2X_ERR("end crash dump -----------------\n");
+}
+
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC		10000	/* 10 milliseconds */
+#define FLR_WAIT_INTERVAL	50	/* usec */
+#define	FLR_POLL_CNT		(FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+	int pN;
+	u32 init_crd;
+	u32 crd;
+	u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+	int pN;
+	u32 lines_occup;
+	u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+				     struct pbf_pN_buf_regs *regs,
+				     u32 poll_count)
+{
+	u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+	u32 cur_cnt = poll_count;
+
+	crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+	crd = crd_start = REG_RD(bp, regs->crd);
+	init_crd = REG_RD(bp, regs->init_crd);
+
+	DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+	DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
+	DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+	while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+	       (init_crd - crd_start))) {
+		if (cur_cnt--) {
+			udelay(FLR_WAIT_INTERVAL);
+			crd = REG_RD(bp, regs->crd);
+			crd_freed = REG_RD(bp, regs->crd_freed);
+		} else {
+			DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+			   regs->pN);
+			DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
+			   regs->pN, crd);
+			DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+			   regs->pN, crd_freed);
+			break;
+		}
+	}
+	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+	   poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+				     struct pbf_pN_cmd_regs *regs,
+				     u32 poll_count)
+{
+	u32 occup, to_free, freed, freed_start;
+	u32 cur_cnt = poll_count;
+
+	occup = to_free = REG_RD(bp, regs->lines_occup);
+	freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+	DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
+	DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+	while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+		if (cur_cnt--) {
+			udelay(FLR_WAIT_INTERVAL);
+			occup = REG_RD(bp, regs->lines_occup);
+			freed = REG_RD(bp, regs->lines_freed);
+		} else {
+			DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+			   regs->pN);
+			DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
+			   regs->pN, occup);
+			DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+			   regs->pN, freed);
+			break;
+		}
+	}
+	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+	   poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
+}
+
+static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+				    u32 expected, u32 poll_count)
+{
+	u32 cur_cnt = poll_count;
+	u32 val;
+
+	while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+		udelay(FLR_WAIT_INTERVAL);
+
+	return val;
+}
+
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+				    char *msg, u32 poll_cnt)
+{
+	u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+	if (val != 0) {
+		BNX2X_ERR("%s usage count=%d\n", msg, val);
+		return 1;
+	}
+	return 0;
+}
+
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+	/* adjust polling timeout */
+	if (CHIP_REV_IS_EMUL(bp))
+		return FLR_POLL_CNT * 2000;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		return FLR_POLL_CNT * 120;
+
+	return FLR_POLL_CNT;
+}
+
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+	struct pbf_pN_cmd_regs cmd_regs[] = {
+		{0, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_Q0 :
+			PBF_REG_P0_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+			PBF_REG_P0_TQ_LINES_FREED_CNT},
+		{1, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_Q1 :
+			PBF_REG_P1_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+			PBF_REG_P1_TQ_LINES_FREED_CNT},
+		{4, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_LB_Q :
+			PBF_REG_P4_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+			PBF_REG_P4_TQ_LINES_FREED_CNT}
+	};
+
+	struct pbf_pN_buf_regs buf_regs[] = {
+		{0, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_Q0 :
+			PBF_REG_P0_INIT_CRD ,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_Q0 :
+			PBF_REG_P0_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+			PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+		{1, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_Q1 :
+			PBF_REG_P1_INIT_CRD,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_Q1 :
+			PBF_REG_P1_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+			PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+		{4, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_LB_Q :
+			PBF_REG_P4_INIT_CRD,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_LB_Q :
+			PBF_REG_P4_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+			PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+	};
+
+	int i;
+
+	/* Verify the command queues are flushed P0, P1, P4 */
+	for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+		bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+	/* Verify the transmission buffers are flushed P0, P1, P4 */
+	for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+		bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+	(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+	(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+	(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
+{
+	u32 op_gen_command = 0;
+	u32 comp_addr = BAR_CSTRORM_INTMEM +
+			CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+	int ret = 0;
+
+	if (REG_RD(bp, comp_addr)) {
+		BNX2X_ERR("Cleanup complete was not 0 before sending\n");
+		return 1;
+	}
+
+	op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+	op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+	op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+	op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+	DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
+	REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
+
+	if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+		BNX2X_ERR("FW final cleanup did not succeed\n");
+		DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
+		   (REG_RD(bp, comp_addr)));
+		bnx2x_panic();
+		return 1;
+	}
+	/* Zero completion for next FLR */
+	REG_WR(bp, comp_addr, 0);
+
+	return ret;
+}
+
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+	u16 status;
+
+	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
+	return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+	/* wait for CFC PF usage-counter to zero (includes all the VFs) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			CFC_REG_NUM_LCIDS_INSIDE_PF,
+			"CFC PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			DORQ_REG_PF_USAGE_CNT,
+			"DQ PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+			"QM PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+			"Timers VNIC usage counter timed out",
+			poll_cnt))
+		return 1;
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+			"Timers NUM_SCANS usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait DMAE PF usage counter to zero */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			dmae_reg_go_c[INIT_DMAE_C(bp)],
+			"DMAE command register timed out",
+			poll_cnt))
+		return 1;
+
+	return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+	DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+	val = REG_RD(bp, PBF_REG_DISABLE_PF);
+	DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+	   val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+	DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+	/* Re-enable PF target read access */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+	/* Poll HW usage counters */
+	DP(BNX2X_MSG_SP, "Polling usage counters\n");
+	if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+		return -EBUSY;
+
+	/* Zero the igu 'trailing edge' and 'leading edge' */
+
+	/* Send the FW cleanup command */
+	if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+		return -EBUSY;
+
+	/* ATC cleanup */
+
+	/* Verify TX hw is flushed */
+	bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+	/* Wait 100ms (not adjusted according to platform) */
+	msleep(100);
+
+	/* Verify no pending pci transactions */
+	if (bnx2x_is_pcie_pending(bp->pdev))
+		BNX2X_ERR("PCIE Transactions still pending\n");
+
+	/* Debug */
+	bnx2x_hw_enable_status(bp);
+
+	/*
+	 * Master enable - Due to WB DMAE writes performed before this
+	 * register is re-initialized as part of the regular function init
+	 */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+	return 0;
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	u32 val = REG_RD(bp, addr);
+	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
+
+	if (msix) {
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0);
+		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+		if (single_msix)
+			val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
+	} else if (msi) {
+		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else {
+		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+		if (!CHIP_IS_E1(bp)) {
+			DP(NETIF_MSG_IFUP,
+			   "write %x to HC %d (addr 0x%x)\n", val, port, addr);
+
+			REG_WR(bp, addr, val);
+
+			val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+		}
+	}
+
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
+	DP(NETIF_MSG_IFUP,
+	   "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
+	   (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+	REG_WR(bp, addr, val);
+	/*
+	 * Ensure that HC_CONFIG is written before leading/trailing edge config
+	 */
+	mmiowb();
+	barrier();
+
+	if (!CHIP_IS_E1(bp)) {
+		/* init leading/trailing edge */
+		if (IS_MF(bp)) {
+			val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+			if (bp->port.pmf)
+				/* enable nig and gpio3 attention */
+				val |= 0x1100;
+		} else
+			val = 0xffff;
+
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+	}
+
+	/* Make sure that interrupts are indeed enabled from here on */
+	mmiowb();
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+	u32 val;
+	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
+
+	val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+	if (msix) {
+		val &= ~(IGU_PF_CONF_INT_LINE_EN |
+			 IGU_PF_CONF_SINGLE_ISR_EN);
+		val |= (IGU_PF_CONF_MSI_MSIX_EN |
+			IGU_PF_CONF_ATTN_BIT_EN);
+
+		if (single_msix)
+			val |= IGU_PF_CONF_SINGLE_ISR_EN;
+	} else if (msi) {
+		val &= ~IGU_PF_CONF_INT_LINE_EN;
+		val |= (IGU_PF_CONF_MSI_MSIX_EN |
+			IGU_PF_CONF_ATTN_BIT_EN |
+			IGU_PF_CONF_SINGLE_ISR_EN);
+	} else {
+		val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+		val |= (IGU_PF_CONF_INT_LINE_EN |
+			IGU_PF_CONF_ATTN_BIT_EN |
+			IGU_PF_CONF_SINGLE_ISR_EN);
+	}
+
+	/* Clean previous status - need to configure igu prior to ack*/
+	if ((!msix) || single_msix) {
+		REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+		bnx2x_ack_int(bp);
+	}
+
+	val |= IGU_PF_CONF_FUNC_EN;
+
+	DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
+	   val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+	if (val & IGU_PF_CONF_INT_LINE_EN)
+		pci_intx(bp->pdev, true);
+
+	barrier();
+
+	/* init leading/trailing edge */
+	if (IS_MF(bp)) {
+		val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+		if (bp->port.pmf)
+			/* enable nig and gpio3 attention */
+			val |= 0x1100;
+	} else
+		val = 0xffff;
+
+	REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+	REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+
+	/* Make sure that interrupts are indeed enabled from here on */
+	mmiowb();
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+	if (bp->common.int_block == INT_BLOCK_HC)
+		bnx2x_hc_int_enable(bp);
+	else
+		bnx2x_igu_int_enable(bp);
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	int i, offset;
+
+	if (disable_hw)
+		/* prevent the HW from sending interrupts */
+		bnx2x_int_disable(bp);
+
+	/* make sure all ISRs are done */
+	if (msix) {
+		synchronize_irq(bp->msix_table[0].vector);
+		offset = 1;
+		if (CNIC_SUPPORT(bp))
+			offset++;
+		for_each_eth_queue(bp, i)
+			synchronize_irq(bp->msix_table[offset++].vector);
+	} else
+		synchronize_irq(bp->pdev->irq);
+
+	/* make sure sp_task is not running */
+	cancel_delayed_work(&bp->sp_task);
+	cancel_delayed_work(&bp->period_task);
+	flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+
+	DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+	   "Trying to take a lock on resource %d\n", resource);
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return false;
+	}
+
+	if (func <= 5)
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	else
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+	/* Try to acquire the lock */
+	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (lock_status & resource_bit)
+		return true;
+
+	DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+	   "Failed to get a lock on resource %d\n", resource);
+	return false;
+}
+
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp:	driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+	if (BP_PATH(bp))
+		return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+	else
+		return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to acquire a leader lock for current engine.
+ */
+static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+	/* Set the interrupt occurred bit for the sp-task to recognize it
+	 * must ack the interrupt and transition according to the IGU
+	 * state machine.
+	 */
+	atomic_set(&bp->interrupt_occurred, 1);
+
+	/* The sp_task must execute only after this bit
+	 * is set, otherwise we will get out of sync and miss all
+	 * further interrupts. Hence, the barrier.
+	 */
+	smp_wmb();
+
+	/* schedule sp_task to workqueue */
+	return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+	struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+	DP(BNX2X_MSG_SP,
+	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
+	   fp->index, cid, command, bp->state,
+	   rr_cqe->ramrod_cqe.ramrod_type);
+
+	/* If cid is within VF range, replace the slowpath object with the
+	 * one corresponding to this VF
+	 */
+	if (cid >= BNX2X_FIRST_VF_CID  &&
+	    cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+		bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
+	switch (command) {
+	case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+		DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+		drv_cmd = BNX2X_Q_CMD_UPDATE;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_SETUP;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_HALT):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_HALT;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_TERMINATE):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_TERMINATE;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_EMPTY):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_EMPTY;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+		DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+		drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+		break;
+
+	default:
+		BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+			  command, fp->index);
+		return;
+	}
+
+	if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+	    q_obj->complete_cmd(bp, q_obj, drv_cmd))
+		/* q_obj->complete_cmd() failure means that this was
+		 * an unexpected completion.
+		 *
+		 * In this case we don't want to increase the bp->spq_left
+		 * because apparently we haven't sent this command the first
+		 * place.
+		 */
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#else
+		return;
+#endif
+
+	smp_mb__before_atomic();
+	atomic_inc(&bp->cq_spq_left);
+	/* push the change in bp->spq_left and towards the memory */
+	smp_mb__after_atomic();
+
+	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
+
+	if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
+	    (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
+		/* if Q update ramrod is completed for last Q in AFEX vif set
+		 * flow, then ACK MCP at the end
+		 *
+		 * mark pending ACK to MCP bit.
+		 * prevent case that both bits are cleared.
+		 * At the end of load/unload driver checks that
+		 * sp_state is cleared, and this order prevents
+		 * races
+		 */
+		smp_mb__before_atomic();
+		set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
+		wmb();
+		clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+		smp_mb__after_atomic();
+
+		/* schedule the sp task as mcp ack is required */
+		bnx2x_schedule_sp_task(bp);
+	}
+
+	return;
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+	struct bnx2x *bp = netdev_priv(dev_instance);
+	u16 status = bnx2x_ack_int(bp);
+	u16 mask;
+	int i;
+	u8 cos;
+
+	/* Return here if interrupt is shared and it's not for us */
+	if (unlikely(status == 0)) {
+		DP(NETIF_MSG_INTR, "not our interrupt!\n");
+		return IRQ_NONE;
+	}
+	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
+		if (status & mask) {
+			/* Handle Rx or Tx according to SB id */
+			for_each_cos_in_tx_queue(fp, cos)
+				prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
+			prefetch(&fp->sb_running_index[SM_RX_ID]);
+			napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
+			status &= ~mask;
+		}
+	}
+
+	if (CNIC_SUPPORT(bp)) {
+		mask = 0x2;
+		if (status & (mask | 0x1)) {
+			struct cnic_ops *c_ops = NULL;
+
+			rcu_read_lock();
+			c_ops = rcu_dereference(bp->cnic_ops);
+			if (c_ops && (bp->cnic_eth_dev.drv_state &
+				      CNIC_DRV_STATE_HANDLES_IRQ))
+				c_ops->cnic_handler(bp->cnic_data, NULL);
+			rcu_read_unlock();
+
+			status &= ~mask;
+		}
+	}
+
+	if (unlikely(status & 0x1)) {
+
+		/* schedule sp task to perform default status block work, ack
+		 * attentions and enable interrupts.
+		 */
+		bnx2x_schedule_sp_task(bp);
+
+		status &= ~0x1;
+		if (!status)
+			return IRQ_HANDLED;
+	}
+
+	if (unlikely(status))
+		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+		   status);
+
+	return IRQ_HANDLED;
+}
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+	int cnt;
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
+	/* Validating that the resource is not already taken */
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (lock_status & resource_bit) {
+		BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
+		   lock_status, resource_bit);
+		return -EEXIST;
+	}
+
+	/* Try for 5 second every 5ms */
+	for (cnt = 0; cnt < 1000; cnt++) {
+		/* Try to acquire the lock */
+		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+		lock_status = REG_RD(bp, hw_lock_control_reg);
+		if (lock_status & resource_bit)
+			return 0;
+
+		usleep_range(5000, 10000);
+	}
+	BNX2X_ERR("Timeout\n");
+	return -EAGAIN;
+}
+
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+	return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
+	/* Validating that the resource is currently taken */
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (!(lock_status & resource_bit)) {
+		BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
+			  lock_status, resource_bit);
+		return -EFAULT;
+	}
+
+	REG_WR(bp, hw_lock_control_reg, resource_bit);
+	return 0;
+}
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+	int value;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	/* read GPIO value */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+	/* get the requested pin value */
+	if ((gpio_reg & gpio_mask) == gpio_mask)
+		value = 1;
+	else
+		value = 0;
+
+	return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO and mask except the float bits */
+	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK,
+		   "Set GPIO %d (shift %d) -> output low\n",
+		   gpio_num, gpio_shift);
+		/* clear FLOAT and set CLR */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK,
+		   "Set GPIO %d (shift %d) -> output high\n",
+		   gpio_num, gpio_shift);
+		/* clear FLOAT and set SET */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK,
+		   "Set GPIO %d (shift %d) -> input\n",
+		   gpio_num, gpio_shift);
+		/* set FLOAT */
+		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return 0;
+}
+
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+	u32 gpio_reg = 0;
+	int rc = 0;
+
+	/* Any port swapping should be handled by caller. */
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO and mask except the float bits */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+		/* set CLR */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+		/* set SET */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+		/* set FLOAT */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+		break;
+
+	default:
+		BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc == 0)
+		REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return rc;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO int */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+		DP(NETIF_MSG_LINK,
+		   "Clear GPIO INT %d (shift %d) -> output low\n",
+		   gpio_num, gpio_shift);
+		/* clear SET and set CLR */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+		DP(NETIF_MSG_LINK,
+		   "Set GPIO INT %d (shift %d) -> output high\n",
+		   gpio_num, gpio_shift);
+		/* clear CLR and set SET */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
+{
+	u32 spio_reg;
+
+	/* Only 2 SPIOs are configurable */
+	if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
+		BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+	/* read SPIO and mask except the float bits */
+	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
+
+	switch (mode) {
+	case MISC_SPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
+		/* clear FLOAT and set CLR */
+		spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+		spio_reg |=  (spio << MISC_SPIO_CLR_POS);
+		break;
+
+	case MISC_SPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
+		/* clear FLOAT and set SET */
+		spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+		spio_reg |=  (spio << MISC_SPIO_SET_POS);
+		break;
+
+	case MISC_SPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
+		/* set FLOAT */
+		spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_SPIO, spio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+	return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+	u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+	bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+					   ADVERTISED_Pause);
+	switch (bp->link_vars.ieee_fc &
+		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+		bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+						  ADVERTISED_Pause);
+		break;
+
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+		bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void bnx2x_set_requested_fc(struct bnx2x *bp)
+{
+	/* Initialize link parameters structure variables
+	 * It is recommended to turn off RX FC for jumbo frames
+	 *  for better performance
+	 */
+	if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+		bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+	else
+		bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+}
+
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+	u32 pause_enabled = 0;
+
+	if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+			pause_enabled = 1;
+
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+			   USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+		       pause_enabled);
+	}
+
+	DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+	   pause_enabled ? "enabled" : "disabled");
+}
+
+int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+	int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
+	u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+
+	if (!BP_NOMCP(bp)) {
+		bnx2x_set_requested_fc(bp);
+		bnx2x_acquire_phy_lock(bp);
+
+		if (load_mode == LOAD_DIAG) {
+			struct link_params *lp = &bp->link_params;
+			lp->loopback_mode = LOOPBACK_XGXS;
+			/* Prefer doing PHY loopback at highest speed */
+			if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
+				if (lp->speed_cap_mask[cfx_idx] &
+				    PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+					lp->req_line_speed[cfx_idx] =
+					SPEED_20000;
+				else if (lp->speed_cap_mask[cfx_idx] &
+					    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+						lp->req_line_speed[cfx_idx] =
+						SPEED_10000;
+				else
+					lp->req_line_speed[cfx_idx] =
+					SPEED_1000;
+			}
+		}
+
+		if (load_mode == LOAD_LOOPBACK_EXT) {
+			struct link_params *lp = &bp->link_params;
+			lp->loopback_mode = LOOPBACK_EXT;
+		}
+
+		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+		bnx2x_release_phy_lock(bp);
+
+		bnx2x_init_dropless_fc(bp);
+
+		bnx2x_calc_fc_adv(bp);
+
+		if (bp->link_vars.link_up) {
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+			bnx2x_link_report(bp);
+		}
+		queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+		bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
+		return rc;
+	}
+	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+	return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
+
+		bnx2x_init_dropless_fc(bp);
+
+		bnx2x_calc_fc_adv(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+void bnx2x_force_link_reset(struct bnx2x *bp)
+{
+	bnx2x_acquire_phy_lock(bp);
+	bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+	bnx2x_release_phy_lock(bp);
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
+{
+	u8 rc = 0;
+
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+				     is_serdes);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+	return rc;
+}
+
+/* Calculates the sum of vn_min_rates.
+   It's needed for further normalizing of the min_rates.
+   Returns:
+     sum of vn_min_rates.
+       or
+     0 - if all the min_rates are 0.
+     In the later case fairness algorithm should be deactivated.
+     If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_min(struct bnx2x *bp,
+				      struct cmng_init_input *input)
+{
+	int all_zero = 1;
+	int vn;
+
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		u32 vn_cfg = bp->mf_config[vn];
+		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+		/* Skip hidden vns */
+		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+			vn_min_rate = 0;
+		/* If min rate is zero - set it to 1 */
+		else if (!vn_min_rate)
+			vn_min_rate = DEF_MIN_RATE;
+		else
+			all_zero = 0;
+
+		input->vnic_min_rate[vn] = vn_min_rate;
+	}
+
+	/* if ETS or all min rates are zeros - disable fairness */
+	if (BNX2X_IS_ETS_ENABLED(bp)) {
+		input->flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+	} else if (all_zero) {
+		input->flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP,
+		   "All MIN values are zeroes fairness will be disabled\n");
+	} else
+		input->flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
+static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
+				    struct cmng_init_input *input)
+{
+	u16 vn_max_rate;
+	u32 vn_cfg = bp->mf_config[vn];
+
+	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+		vn_max_rate = 0;
+	else {
+		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
+		if (IS_MF_PERCENT_BW(bp)) {
+			/* maxCfg in percents of linkspeed */
+			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+		} else /* SD modes */
+			/* maxCfg is absolute in 100Mb units */
+			vn_max_rate = maxCfg * 100;
+	}
+
+	DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
+
+	input->vnic_max_rate[vn] = vn_max_rate;
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+	if (CHIP_REV_IS_SLOW(bp))
+		return CMNG_FNS_NONE;
+	if (IS_MF(bp))
+		return CMNG_FNS_MINMAX;
+
+	return CMNG_FNS_NONE;
+}
+
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+	int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
+
+	if (BP_NOMCP(bp))
+		return; /* what should be the default value in this case */
+
+	/* For 2 port configuration the absolute function number formula
+	 * is:
+	 *      abs_func = 2 * vn + BP_PORT + BP_PATH
+	 *
+	 *      and there are 4 functions per port
+	 *
+	 * For 4 port configuration it is
+	 *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+	 *
+	 *      and there are 2 functions per port
+	 */
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+		if (func >= E1H_FUNC_MAX)
+			break;
+
+		bp->mf_config[vn] =
+			MF_CFG_RD(bp, func_mf_config[func].config);
+	}
+	if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+		DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+		bp->flags |= MF_FUNC_DIS;
+	} else {
+		DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+		bp->flags &= ~MF_FUNC_DIS;
+	}
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+	struct cmng_init_input input;
+	memset(&input, 0, sizeof(struct cmng_init_input));
+
+	input.port_rate = bp->link_vars.line_speed;
+
+	if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
+		int vn;
+
+		/* read mf conf from shmem */
+		if (read_cfg)
+			bnx2x_read_mf_cfg(bp);
+
+		/* vn_weight_sum and enable fairness if not 0 */
+		bnx2x_calc_vn_min(bp, &input);
+
+		/* calculate and set min-max rate for each vn */
+		if (bp->port.pmf)
+			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
+				bnx2x_calc_vn_max(bp, vn, &input);
+
+		/* always enable rate shaping and fairness */
+		input.flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+
+		bnx2x_init_cmng(&input, &bp->cmng);
+		return;
+	}
+
+	/* rate shaping and fairness are disabled */
+	DP(NETIF_MSG_IFUP,
+	   "rate shaping and fairness are disabled\n");
+}
+
+static void storm_memset_cmng(struct bnx2x *bp,
+			      struct cmng_init *cmng,
+			      u8 port)
+{
+	int vn;
+	size_t size = sizeof(struct cmng_struct_per_port);
+
+	u32 addr = BAR_XSTRORM_INTMEM +
+			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
+
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		int func = func_by_vn(bp, vn);
+
+		addr = BAR_XSTRORM_INTMEM +
+		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
+		size = sizeof(struct rate_shaping_vars_per_vn);
+		__storm_memset_struct(bp, addr, size,
+				      (u32 *)&cmng->vnic.vnic_max_rate[vn]);
+
+		addr = BAR_XSTRORM_INTMEM +
+		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
+		size = sizeof(struct fairness_vars_per_vn);
+		__storm_memset_struct(bp, addr, size,
+				      (u32 *)&cmng->vnic.vnic_min_rate[vn]);
+	}
+}
+
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+	int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+	if (cmng_fns != CMNG_FNS_NONE) {
+		bnx2x_cmng_fns_init(bp, false, cmng_fns);
+		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+	} else {
+		/* rate shaping and fairness are disabled */
+		DP(NETIF_MSG_IFUP,
+		   "single function mode without fairness\n");
+	}
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+	/* Make sure that we are synced with the current statistics */
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+	bnx2x_init_dropless_fc(bp);
+
+	if (bp->link_vars.link_up) {
+
+		if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
+			struct host_port_stats *pstats;
+
+			pstats = bnx2x_sp(bp, port_stats);
+			/* reset old mac stats */
+			memset(&(pstats->mac_stx[0]), 0,
+			       sizeof(struct mac_stx));
+		}
+		if (bp->state == BNX2X_STATE_OPEN)
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+	}
+
+	if (bp->link_vars.link_up && bp->link_vars.line_speed)
+		bnx2x_set_local_cmng(bp);
+
+	__bnx2x_link_report(bp);
+
+	if (IS_MF(bp))
+		bnx2x_link_sync_notify(bp);
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+	if (bp->state != BNX2X_STATE_OPEN)
+		return;
+
+	/* read updated dcb configuration */
+	if (IS_PF(bp)) {
+		bnx2x_dcbx_pmf_update(bp);
+		bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+		if (bp->link_vars.link_up)
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+		else
+			bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+			/* indicate link status */
+		bnx2x_link_report(bp);
+
+	} else { /* VF */
+		bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+					  SUPPORTED_10baseT_Full |
+					  SUPPORTED_100baseT_Half |
+					  SUPPORTED_100baseT_Full |
+					  SUPPORTED_1000baseT_Full |
+					  SUPPORTED_2500baseX_Full |
+					  SUPPORTED_10000baseT_Full |
+					  SUPPORTED_TP |
+					  SUPPORTED_FIBRE |
+					  SUPPORTED_Autoneg |
+					  SUPPORTED_Pause |
+					  SUPPORTED_Asym_Pause);
+		bp->port.advertising[0] = bp->port.supported[0];
+
+		bp->link_params.bp = bp;
+		bp->link_params.port = BP_PORT(bp);
+		bp->link_params.req_duplex[0] = DUPLEX_FULL;
+		bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+		bp->link_params.req_line_speed[0] = SPEED_10000;
+		bp->link_params.speed_cap_mask[0] = 0x7f0000;
+		bp->link_params.switch_cfg = SWITCH_CFG_10G;
+		bp->link_vars.mac_type = MAC_TYPE_BMAC;
+		bp->link_vars.line_speed = SPEED_10000;
+		bp->link_vars.link_status =
+			(LINK_STATUS_LINK_UP |
+			 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+		bp->link_vars.link_up = 1;
+		bp->link_vars.duplex = DUPLEX_FULL;
+		bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		__bnx2x_link_report(bp);
+
+		bnx2x_sample_bulletin(bp);
+
+		/* if bulletin board did not have an update for link status
+		 * __bnx2x_link_report will report current status
+		 * but it will NOT duplicate report in case of already reported
+		 * during sampling bulletin board.
+		 */
+		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+	}
+}
+
+static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
+				  u16 vlan_val, u8 allowed_prio)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_afex_update_params *f_update_params =
+		&func_params.params.afex_update;
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
+
+	/* no need to wait for RAMROD completion, so don't
+	 * set RAMROD_COMP_WAIT flag
+	 */
+
+	f_update_params->vif_id = vifid;
+	f_update_params->afex_default_vlan = vlan_val;
+	f_update_params->allowed_priorities = allowed_prio;
+
+	/* if ramrod can not be sent, response to MCP immediately */
+	if (bnx2x_func_state_change(bp, &func_params) < 0)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+
+	return 0;
+}
+
+static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
+					  u16 vif_index, u8 func_bit_map)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_afex_viflists_params *update_params =
+		&func_params.params.afex_viflists;
+	int rc;
+	u32 drv_msg_code;
+
+	/* validate only LIST_SET and LIST_GET are received from switch */
+	if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
+		BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
+			  cmd_type);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
+
+	/* set parameters according to cmd_type */
+	update_params->afex_vif_list_command = cmd_type;
+	update_params->vif_list_index = vif_index;
+	update_params->func_bit_map =
+		(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
+	update_params->func_to_clear = 0;
+	drv_msg_code =
+		(cmd_type == VIF_LIST_RULE_GET) ?
+		DRV_MSG_CODE_AFEX_LISTGET_ACK :
+		DRV_MSG_CODE_AFEX_LISTSET_ACK;
+
+	/* if ramrod can not be sent, respond to MCP immediately for
+	 * SET and GET requests (other are not triggered from MCP)
+	 */
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc < 0)
+		bnx2x_fw_command(bp, drv_msg_code, 0);
+
+	return 0;
+}
+
+static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
+{
+	struct afex_stats afex_stats;
+	u32 func = BP_ABS_FUNC(bp);
+	u32 mf_config;
+	u16 vlan_val;
+	u32 vlan_prio;
+	u16 vif_id;
+	u8 allowed_prio;
+	u8 vlan_mode;
+	u32 addr_to_write, vifid, addrs, stats_type, i;
+
+	if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
+		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
+		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
+		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+		addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
+		   vifid, addrs);
+		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
+					       addrs);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
+		addr_to_write = SHMEM2_RD(bp,
+			afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
+		stats_type = SHMEM2_RD(bp,
+			afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
+		   addr_to_write);
+
+		bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
+
+		/* write response to scratchpad, for MCP */
+		for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
+			REG_WR(bp, addr_to_write + i*sizeof(u32),
+			       *(((u32 *)(&afex_stats))+i));
+
+		/* send ack message to MCP */
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
+		mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
+		bp->mf_config[BP_VN(bp)] = mf_config;
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
+		   mf_config);
+
+		/* if VIF_SET is "enabled" */
+		if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
+			/* set rate limit directly to internal RAM */
+			struct cmng_init_input cmng_input;
+			struct rate_shaping_vars_per_vn m_rs_vn;
+			size_t size = sizeof(struct rate_shaping_vars_per_vn);
+			u32 addr = BAR_XSTRORM_INTMEM +
+			    XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
+
+			bp->mf_config[BP_VN(bp)] = mf_config;
+
+			bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
+			m_rs_vn.vn_counter.rate =
+				cmng_input.vnic_max_rate[BP_VN(bp)];
+			m_rs_vn.vn_counter.quota =
+				(m_rs_vn.vn_counter.rate *
+				 RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+			__storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
+
+			/* read relevant values from mf_cfg struct in shmem */
+			vif_id =
+				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+				FUNC_MF_CFG_E1HOV_TAG_SHIFT;
+			vlan_val =
+				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+				FUNC_MF_CFG_AFEX_VLAN_SHIFT;
+			vlan_prio = (mf_config &
+				     FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+				    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
+			vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
+			vlan_mode =
+				(MF_CFG_RD(bp,
+					   func_mf_config[func].afex_config) &
+				 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+				FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
+			allowed_prio =
+				(MF_CFG_RD(bp,
+					   func_mf_config[func].afex_config) &
+				 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+				FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
+
+			/* send ramrod to FW, return in case of failure */
+			if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
+						   allowed_prio))
+				return;
+
+			bp->afex_def_vlan_tag = vlan_val;
+			bp->afex_vlan_mode = vlan_mode;
+		} else {
+			/* notify link down because BP->flags is disabled */
+			bnx2x_link_report(bp);
+
+			/* send INVALID VIF ramrod to FW */
+			bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
+
+			/* Reset the default afex VLAN */
+			bp->afex_def_vlan_tag = -1;
+		}
+	}
+}
+
+static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
+{
+	struct bnx2x_func_switch_update_params *switch_update_params;
+	struct bnx2x_func_state_params func_params;
+
+	memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
+	switch_update_params = &func_params.params.switch_update;
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+	if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
+		int func = BP_ABS_FUNC(bp);
+		u32 val;
+
+		/* Re-learn the S-tag from shmem */
+		val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				FUNC_MF_CFG_E1HOV_TAG_MASK;
+		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+			bp->mf_ov = val;
+		} else {
+			BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
+			goto fail;
+		}
+
+		/* Configure new S-tag in LLH */
+		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
+		       bp->mf_ov);
+
+		/* Send Ramrod to update FW of change */
+		__set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+			  &switch_update_params->changes);
+		switch_update_params->vlan = bp->mf_ov;
+
+		if (bnx2x_func_state_change(bp, &func_params) < 0) {
+			BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
+				  bp->mf_ov);
+			goto fail;
+		} else {
+			DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+			   bp->mf_ov);
+		}
+	} else {
+		goto fail;
+	}
+
+	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+	return;
+fail:
+	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val;
+
+	bp->port.pmf = 1;
+	DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
+
+	/*
+	 * We need the mb() to ensure the ordering between the writing to
+	 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+	 */
+	smp_mb();
+
+	/* queue a periodic task */
+	queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+	bnx2x_dcbx_pmf_update(bp);
+
+	/* enable nig attention */
+	val = (0xff0f | (1 << (BP_VN(bp) + 4)));
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+	} else if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+	}
+
+	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
+{
+	int mb_idx = BP_FW_MB_IDX(bp);
+	u32 seq;
+	u32 rc = 0;
+	u32 cnt = 1;
+	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+	mutex_lock(&bp->fw_mb_mutex);
+	seq = ++bp->fw_seq;
+	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+			(command | seq), param);
+
+	do {
+		/* let the FW do it's magic ... */
+		msleep(delay);
+
+		rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
+
+		/* Give the FW up to 5 second (500*10ms) */
+	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+	   cnt*delay, rc, seq);
+
+	/* is this a reply to our command? */
+	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+		rc &= FW_MSG_CODE_MASK;
+	else {
+		/* FW BUG! */
+		BNX2X_ERR("FW failed to respond!\n");
+		bnx2x_fw_dump(bp);
+		rc = 0;
+	}
+	mutex_unlock(&bp->fw_mb_mutex);
+
+	return rc;
+}
+
+static void storm_memset_func_cfg(struct bnx2x *bp,
+				 struct tstorm_eth_function_common_config *tcfg,
+				 u16 abs_fid)
+{
+	size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+	if (CHIP_IS_E1x(bp)) {
+		struct tstorm_eth_function_common_config tcfg = {0};
+
+		storm_memset_func_cfg(bp, &tcfg, p->func_id);
+	}
+
+	/* Enable the function in the FW */
+	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+	storm_memset_func_en(bp, p->func_id, 1);
+
+	/* spq */
+	if (p->spq_active) {
+		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+		REG_WR(bp, XSEM_REG_FAST_MEMORY +
+		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+	}
+}
+
+/**
+ * bnx2x_get_common_flags - Return common flags
+ *
+ * @bp		device handle
+ * @fp		queue handle
+ * @zero_stats	TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+					    struct bnx2x_fastpath *fp,
+					    bool zero_stats)
+{
+	unsigned long flags = 0;
+
+	/* PF driver will always initialize the Queue to an ACTIVE state */
+	__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
+
+	/* tx only connections collect statistics (on the same index as the
+	 * parent connection). The statistics are zeroed when the parent
+	 * connection is initialized.
+	 */
+
+	__set_bit(BNX2X_Q_FLG_STATS, &flags);
+	if (zero_stats)
+		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
+	if (bp->flags & TX_SWITCHING)
+		__set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
+	__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+	__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
+
+	return flags;
+}
+
+static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+				       struct bnx2x_fastpath *fp,
+				       bool leading)
+{
+	unsigned long flags = 0;
+
+	/* calculate other queue flags */
+	if (IS_MF_SD(bp))
+		__set_bit(BNX2X_Q_FLG_OV, &flags);
+
+	if (IS_FCOE_FP(fp)) {
+		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
+		/* For FCoE - force usage of default priority (for afex) */
+		__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
+	}
+
+	if (fp->mode != TPA_MODE_DISABLED) {
+		__set_bit(BNX2X_Q_FLG_TPA, &flags);
+		__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+		if (fp->mode == TPA_MODE_GRO)
+			__set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
+	}
+
+	if (leading) {
+		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+		__set_bit(BNX2X_Q_FLG_MCAST, &flags);
+	}
+
+	/* Always set HW VLAN stripping */
+	__set_bit(BNX2X_Q_FLG_VLAN, &flags);
+
+	/* configure silent vlan removal */
+	if (IS_MF_AFEX(bp))
+		__set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
+
+	return flags | bnx2x_get_common_flags(bp, fp, true);
+}
+
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+	u8 cos)
+{
+	gen_init->stat_id = bnx2x_stats_id(fp);
+	gen_init->spcl_id = fp->cl_id;
+
+	/* Always use mini-jumbo MTU for FCoE L2 ring */
+	if (IS_FCOE_FP(fp))
+		gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+	else
+		gen_init->mtu = bp->dev->mtu;
+
+	gen_init->cos = cos;
+
+	gen_init->fp_hsi = ETH_FP_HSI_VERSION;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+	struct bnx2x_rxq_setup_params *rxq_init)
+{
+	u8 max_sge = 0;
+	u16 sge_sz = 0;
+	u16 tpa_agg_size = 0;
+
+	if (fp->mode != TPA_MODE_DISABLED) {
+		pause->sge_th_lo = SGE_TH_LO(bp);
+		pause->sge_th_hi = SGE_TH_HI(bp);
+
+		/* validate SGE ring has enough to cross high threshold */
+		WARN_ON(bp->dropless_fc &&
+				pause->sge_th_hi + FW_PREFETCH_CNT >
+				MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
+		tpa_agg_size = TPA_AGG_SIZE;
+		max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+			SGE_PAGE_SHIFT;
+		max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+		sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
+	}
+
+	/* pause - not for e1 */
+	if (!CHIP_IS_E1(bp)) {
+		pause->bd_th_lo = BD_TH_LO(bp);
+		pause->bd_th_hi = BD_TH_HI(bp);
+
+		pause->rcq_th_lo = RCQ_TH_LO(bp);
+		pause->rcq_th_hi = RCQ_TH_HI(bp);
+		/*
+		 * validate that rings have enough entries to cross
+		 * high thresholds
+		 */
+		WARN_ON(bp->dropless_fc &&
+				pause->bd_th_hi + FW_PREFETCH_CNT >
+				bp->rx_ring_size);
+		WARN_ON(bp->dropless_fc &&
+				pause->rcq_th_hi + FW_PREFETCH_CNT >
+				NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
+
+		pause->pri_map = 1;
+	}
+
+	/* rxq setup */
+	rxq_init->dscr_map = fp->rx_desc_mapping;
+	rxq_init->sge_map = fp->rx_sge_mapping;
+	rxq_init->rcq_map = fp->rx_comp_mapping;
+	rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+
+	/* This should be a maximum number of data bytes that may be
+	 * placed on the BD (not including paddings).
+	 */
+	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+			   BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
+
+	rxq_init->cl_qzone_id = fp->cl_qzone_id;
+	rxq_init->tpa_agg_sz = tpa_agg_size;
+	rxq_init->sge_buf_sz = sge_sz;
+	rxq_init->max_sges_pkt = max_sge;
+	rxq_init->rss_engine_id = BP_FUNC(bp);
+	rxq_init->mcast_engine_id = BP_FUNC(bp);
+
+	/* Maximum number or simultaneous TPA aggregation for this Queue.
+	 *
+	 * For PF Clients it should be the maximum available number.
+	 * VF driver(s) may want to define it to a smaller value.
+	 */
+	rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
+
+	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+	rxq_init->fw_sb_id = fp->fw_sb_id;
+
+	if (IS_FCOE_FP(fp))
+		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+	else
+		rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+	/* configure silent vlan removal
+	 * if multi function mode is afex, then mask default vlan
+	 */
+	if (IS_MF_AFEX(bp)) {
+		rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
+		rxq_init->silent_removal_mask = VLAN_VID_MASK;
+	}
+}
+
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+	u8 cos)
+{
+	txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
+	txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+	txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+	txq_init->fw_sb_id = fp->fw_sb_id;
+
+	/*
+	 * set the tss leading client id for TX classification ==
+	 * leading RSS client id
+	 */
+	txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+	if (IS_FCOE_FP(fp)) {
+		txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+		txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+	}
+}
+
+static void bnx2x_pf_init(struct bnx2x *bp)
+{
+	struct bnx2x_func_init_params func_init = {0};
+	struct event_ring_data eq_data = { {0} };
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* reset IGU PF statistics: MSIX + ATTN */
+		/* PF */
+		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+			   (CHIP_MODE_IS_4_PORT(bp) ?
+				BP_FUNC(bp) : BP_VN(bp))*4, 0);
+		/* ATTN */
+		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+			   BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+			   (CHIP_MODE_IS_4_PORT(bp) ?
+				BP_FUNC(bp) : BP_VN(bp))*4, 0);
+	}
+
+	func_init.spq_active = true;
+	func_init.pf_id = BP_FUNC(bp);
+	func_init.func_id = BP_FUNC(bp);
+	func_init.spq_map = bp->spq_mapping;
+	func_init.spq_prod = bp->spq_prod_idx;
+
+	bnx2x_func_init(bp, &func_init);
+
+	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+	/*
+	 * Congestion management values depend on the link rate
+	 * There is no active link so initial link rate is set to 10 Gbps.
+	 * When the link comes up The congestion management values are
+	 * re-calculated according to the actual link rate.
+	 */
+	bp->link_vars.line_speed = SPEED_10000;
+	bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+	/* Only the PMF sets the HW */
+	if (bp->port.pmf)
+		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+	/* init Event Queue - PCI bus guarantees correct endianity*/
+	eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+	eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+	eq_data.producer = bp->eq_prod;
+	eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+	eq_data.sb_id = DEF_SB_ID;
+	storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	bnx2x_tx_disable(bp);
+
+	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+
+	/* Tx queue should be only re-enabled */
+	netif_tx_wake_all_queues(bp->dev);
+
+	/*
+	 * Should not call netif_carrier_on since it will be called if the link
+	 * is up when checking for link state
+	 */
+}
+
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+	struct eth_stats_info *ether_stat =
+		&bp->slowpath->drv_info_to_mcp.ether_stat;
+	struct bnx2x_vlan_mac_obj *mac_obj =
+		&bp->sp_objs->mac_obj;
+	int i;
+
+	strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+		ETH_STAT_INFO_VERSION_LEN);
+
+	/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+	 * mac_local field in ether_stat struct. The base address is offset by 2
+	 * bytes to account for the field being 8 bytes but a mac address is
+	 * only 6 bytes. Likewise, the stride for the get_n_elements function is
+	 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+	 * allocated by the ether_stat struct, so the macs will land in their
+	 * proper positions.
+	 */
+	for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+		memset(ether_stat->mac_local + i, 0,
+		       sizeof(ether_stat->mac_local[0]));
+	mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+				DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+				ether_stat->mac_local + MAC_PAD, MAC_PAD,
+				ETH_ALEN);
+	ether_stat->mtu_size = bp->dev->mtu;
+	if (bp->dev->features & NETIF_F_RXCSUM)
+		ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+	if (bp->dev->features & NETIF_F_TSO)
+		ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+	ether_stat->feature_flags |= bp->common.boot_mode;
+
+	ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+	ether_stat->txq_size = bp->tx_ring_size;
+	ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+	ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+	struct fcoe_stats_info *fcoe_stat =
+		&bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+	if (!CNIC_LOADED(bp))
+		return;
+
+	memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
+
+	fcoe_stat->qos_priority =
+		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+	/* insert FCoE stats from ramrod response */
+	if (!NO_FCOE(bp)) {
+		struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+			&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
+			tstorm_queue_statistics;
+
+		struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+			&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
+			xstorm_queue_statistics;
+
+		struct fcoe_statistics_params *fw_fcoe_stat =
+			&bp->fw_stats_data->fcoe;
+
+		ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+			  fcoe_stat->rx_bytes_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+		ADD_64_LE(fcoe_stat->rx_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+			  fcoe_stat->rx_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+		ADD_64_LE(fcoe_stat->rx_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+			  fcoe_stat->rx_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+		ADD_64_LE(fcoe_stat->rx_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+			  fcoe_stat->rx_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+			  fcoe_stat->rx_frames_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+			  fcoe_stat->rx_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+			  fcoe_stat->rx_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+		ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+			  fcoe_stat->rx_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+		ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+			  fcoe_stat->tx_bytes_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+		ADD_64_LE(fcoe_stat->tx_bytes_hi,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+			  fcoe_stat->tx_bytes_lo,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+		ADD_64_LE(fcoe_stat->tx_bytes_hi,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+			  fcoe_stat->tx_bytes_lo,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+		ADD_64_LE(fcoe_stat->tx_bytes_hi,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+			  fcoe_stat->tx_bytes_lo,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+			  fcoe_stat->tx_frames_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+			  fcoe_stat->tx_frames_lo,
+			  fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+			  fcoe_stat->tx_frames_lo,
+			  fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+		ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+			  fcoe_stat->tx_frames_lo,
+			  fcoe_q_xstorm_stats->mcast_pkts_sent);
+	}
+
+	/* ask L5 driver to add data to the struct */
+	bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+	struct iscsi_stats_info *iscsi_stat =
+		&bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+	if (!CNIC_LOADED(bp))
+		return;
+
+	memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+	       ETH_ALEN);
+
+	iscsi_stat->qos_priority =
+		app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+	/* ask L5 driver to add data to the struct */
+	bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+}
+
+/* called due to MCP event (on pmf):
+ *	reread new bandwidth configuration
+ *	configure FW
+ *	notify others function about the change
+ */
+static void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+	if (bp->link_vars.link_up) {
+		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+		bnx2x_link_sync_notify(bp);
+	}
+	storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+	bnx2x_config_mf_bw(bp);
+	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+	DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+	bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH	(20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT		(25)
+
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+	enum drv_info_opcode op_code;
+	u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+	bool release = false;
+	int wait;
+
+	/* if drv_info version supported by MFW doesn't match - send NACK */
+	if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+		return;
+	}
+
+	op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+		  DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+	/* Must prevent other flows from accessing drv_info_to_mcp */
+	mutex_lock(&bp->drv_info_mutex);
+
+	memset(&bp->slowpath->drv_info_to_mcp, 0,
+	       sizeof(union drv_info_to_mcp));
+
+	switch (op_code) {
+	case ETH_STATS_OPCODE:
+		bnx2x_drv_info_ether_stat(bp);
+		break;
+	case FCOE_STATS_OPCODE:
+		bnx2x_drv_info_fcoe_stat(bp);
+		break;
+	case ISCSI_STATS_OPCODE:
+		bnx2x_drv_info_iscsi_stat(bp);
+		break;
+	default:
+		/* if op code isn't supported - send NACK */
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+		goto out;
+	}
+
+	/* if we got drv_info attn from MFW then these fields are defined in
+	 * shmem2 for sure
+	 */
+	SHMEM2_WR(bp, drv_info_host_addr_lo,
+		U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+	SHMEM2_WR(bp, drv_info_host_addr_hi,
+		U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+	bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+	/* Since possible management wants both this and get_driver_version
+	 * need to wait until management notifies us it finished utilizing
+	 * the buffer.
+	 */
+	if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+		DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+	} else if (!bp->drv_info_mng_owner) {
+		u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+		for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+			u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+			/* Management is done; need to clear indication */
+			if (indication & bit) {
+				SHMEM2_WR(bp, mfw_drv_indication,
+					  indication & ~bit);
+				release = true;
+				break;
+			}
+
+			msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+		}
+	}
+	if (!release) {
+		DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+		bp->drv_info_mng_owner = true;
+	}
+
+out:
+	mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+	u8 vals[4];
+	int i = 0;
+
+	if (bnx2x_format) {
+		i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+			   &vals[0], &vals[1], &vals[2], &vals[3]);
+		if (i > 0)
+			vals[0] -= '0';
+	} else {
+		i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+			   &vals[0], &vals[1], &vals[2], &vals[3]);
+	}
+
+	while (i < 4)
+		vals[i++] = 0;
+
+	return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+	u32 iscsiver = DRV_VER_NOT_LOADED;
+	u32 fcoever = DRV_VER_NOT_LOADED;
+	u32 ethver = DRV_VER_NOT_LOADED;
+	int idx = BP_FW_MB_IDX(bp);
+	u8 *version;
+
+	if (!SHMEM2_HAS(bp, func_os_drv_ver))
+		return;
+
+	mutex_lock(&bp->drv_info_mutex);
+	/* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+	if (bp->drv_info_mng_owner)
+		goto out;
+
+	if (bp->state != BNX2X_STATE_OPEN)
+		goto out;
+
+	/* Parse ethernet driver version */
+	ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+	if (!CNIC_LOADED(bp))
+		goto out;
+
+	/* Try getting storage driver version via cnic */
+	memset(&bp->slowpath->drv_info_to_mcp, 0,
+	       sizeof(union drv_info_to_mcp));
+	bnx2x_drv_info_iscsi_stat(bp);
+	version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+	iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+	memset(&bp->slowpath->drv_info_to_mcp, 0,
+	       sizeof(union drv_info_to_mcp));
+	bnx2x_drv_info_fcoe_stat(bp);
+	version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+	fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+	mutex_unlock(&bp->drv_info_mutex);
+
+	DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+	   ethver, iscsiver, fcoever);
+}
+
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+	u32 drv_ver;
+	u32 valid_dump;
+
+	if (!SHMEM2_HAS(bp, drv_info))
+		return;
+
+	/* Update Driver load time, possibly broken in y2038 */
+	SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
+
+	drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+	SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+	SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+	/* Check & notify On-Chip dump. */
+	valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+	if (valid_dump & FIRST_DUMP_VALID)
+		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+	if (valid_dump & SECOND_DUMP_VALID)
+		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
+static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
+{
+	u32 cmd_ok, cmd_fail;
+
+	/* sanity */
+	if (event & DRV_STATUS_DCC_EVENT_MASK &&
+	    event & DRV_STATUS_OEM_EVENT_MASK) {
+		BNX2X_ERR("Received simultaneous events %08x\n", event);
+		return;
+	}
+
+	if (event & DRV_STATUS_DCC_EVENT_MASK) {
+		cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
+		cmd_ok = DRV_MSG_CODE_DCC_OK;
+	} else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
+		cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
+		cmd_ok = DRV_MSG_CODE_OEM_OK;
+	}
+
+	DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
+
+	if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+		     DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
+		/* This is the only place besides the function initialization
+		 * where the bp->flags can change so it is done without any
+		 * locks
+		 */
+		if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+			DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
+			bp->flags |= MF_FUNC_DIS;
+
+			bnx2x_e1h_disable(bp);
+		} else {
+			DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
+			bp->flags &= ~MF_FUNC_DIS;
+
+			bnx2x_e1h_enable(bp);
+		}
+		event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+			   DRV_STATUS_OEM_DISABLE_ENABLE_PF);
+	}
+
+	if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+		     DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
+		bnx2x_config_mf_bw(bp);
+		event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+			   DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
+	}
+
+	/* Report results to MCP */
+	if (event)
+		bnx2x_fw_command(bp, cmd_fail, 0);
+	else
+		bnx2x_fw_command(bp, cmd_ok, 0);
+}
+
+/* must be called under the spq lock */
+static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+	struct eth_spe *next_spe = bp->spq_prod_bd;
+
+	if (bp->spq_prod_bd == bp->spq_last_bd) {
+		bp->spq_prod_bd = bp->spq;
+		bp->spq_prod_idx = 0;
+		DP(BNX2X_MSG_SP, "end of spq\n");
+	} else {
+		bp->spq_prod_bd++;
+		bp->spq_prod_idx++;
+	}
+	return next_spe;
+}
+
+/* must be called under the spq lock */
+static void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+
+	/*
+	 * Make sure that BD data is updated before writing the producer:
+	 * BD data is written to the memory, the producer is read from the
+	 * memory, thus we need a full memory barrier to ensure the ordering.
+	 */
+	mb();
+
+	REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+		 bp->spq_prod_idx);
+	mmiowb();
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd:	command to check
+ * @cmd_type:	command type
+ */
+static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+	if ((cmd_type == NONE_CONNECTION_TYPE) ||
+	    (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+	    (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+	    (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp:		driver handle
+ * @command:	command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid:	SW CID the command is related to
+ * @data_hi:	command private data address (high 32 bits)
+ * @data_lo:	command private data address (low 32 bits)
+ * @cmd_type:	command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+		  u32 data_hi, u32 data_lo, int cmd_type)
+{
+	struct eth_spe *spe;
+	u16 type;
+	bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		BNX2X_ERR("Can't post SP when there is panic\n");
+		return -EIO;
+	}
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+
+	if (common) {
+		if (!atomic_read(&bp->eq_spq_left)) {
+			BNX2X_ERR("BUG! EQ ring full!\n");
+			spin_unlock_bh(&bp->spq_lock);
+			bnx2x_panic();
+			return -EBUSY;
+		}
+	} else if (!atomic_read(&bp->cq_spq_left)) {
+			BNX2X_ERR("BUG! SPQ ring full!\n");
+			spin_unlock_bh(&bp->spq_lock);
+			bnx2x_panic();
+			return -EBUSY;
+	}
+
+	spe = bnx2x_sp_get_next(bp);
+
+	/* CID needs port number to be encoded int it */
+	spe->hdr.conn_and_cmd_data =
+			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+				    HW_CID(bp, cid));
+
+	/* In some cases, type may already contain the func-id
+	 * mainly in SRIOV related use cases, so we add it here only
+	 * if it's not already set.
+	 */
+	if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+		type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+			SPE_HDR_CONN_TYPE;
+		type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+			 SPE_HDR_FUNCTION_ID);
+	} else {
+		type = cmd_type;
+	}
+
+	spe->hdr.type = cpu_to_le16(type);
+
+	spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+	spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+	/*
+	 * It's ok if the actual decrement is issued towards the memory
+	 * somewhere between the spin_lock and spin_unlock. Thus no
+	 * more explicit memory barrier is needed.
+	 */
+	if (common)
+		atomic_dec(&bp->eq_spq_left);
+	else
+		atomic_dec(&bp->cq_spq_left);
+
+	DP(BNX2X_MSG_SP,
+	   "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
+	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+	   (u32)(U64_LO(bp->spq_mapping) +
+	   (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
+	   HW_CID(bp, cid), data_hi, data_lo, type,
+	   atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
+
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+	return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+	u32 j, val;
+	int rc = 0;
+
+	might_sleep();
+	for (j = 0; j < 1000; j++) {
+		REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
+		val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
+		if (val & MCPR_ACCESS_LOCK_LOCK)
+			break;
+
+		usleep_range(5000, 10000);
+	}
+	if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
+		BNX2X_ERR("Cannot acquire MCP access lock register\n");
+		rc = -EBUSY;
+	}
+
+	return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+	REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+}
+
+#define BNX2X_DEF_SB_ATT_IDX	0x0001
+#define BNX2X_DEF_SB_IDX	0x0002
+
+static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+	struct host_sp_status_block *def_sb = bp->def_status_blk;
+	u16 rc = 0;
+
+	barrier(); /* status block is written to by the chip */
+	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+		rc |= BNX2X_DEF_SB_ATT_IDX;
+	}
+
+	if (bp->def_idx != def_sb->sp_sb.running_index) {
+		bp->def_idx = def_sb->sp_sb.running_index;
+		rc |= BNX2X_DEF_SB_IDX;
+	}
+
+	/* Do not reorder: indices reading should complete before handling */
+	barrier();
+	return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+	int port = BP_PORT(bp);
+	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
+	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+				       NIG_REG_MASK_INTERRUPT_PORT0;
+	u32 aeu_mask;
+	u32 nig_mask = 0;
+	u32 reg_addr;
+
+	if (bp->attn_state & asserted)
+		BNX2X_ERR("IGU ERROR\n");
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, aeu_addr);
+
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
+	   aeu_mask, asserted);
+	aeu_mask &= ~(asserted & 0x3ff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, aeu_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+	bp->attn_state |= asserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+	if (asserted & ATTN_HARD_WIRED_MASK) {
+		if (asserted & ATTN_NIG_FOR_FUNC) {
+
+			bnx2x_acquire_phy_lock(bp);
+
+			/* save nig interrupt mask */
+			nig_mask = REG_RD(bp, nig_int_mask_addr);
+
+			/* If nig_mask is not set, no need to call the update
+			 * function.
+			 */
+			if (nig_mask) {
+				REG_WR(bp, nig_int_mask_addr, 0);
+
+				bnx2x_link_attn(bp);
+			}
+
+			/* handle unicore attn? */
+		}
+		if (asserted & ATTN_SW_TIMER_4_FUNC)
+			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+		if (asserted & GPIO_2_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+		if (asserted & GPIO_3_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+		if (asserted & GPIO_4_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+		if (port == 0) {
+			if (asserted & ATTN_GENERAL_ATTN_1) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_2) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_3) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+			}
+		} else {
+			if (asserted & ATTN_GENERAL_ATTN_4) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_5) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_6) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+			}
+		}
+
+	} /* if hardwired */
+
+	if (bp->common.int_block == INT_BLOCK_HC)
+		reg_addr = (HC_REG_COMMAND_REG + port*32 +
+			    COMMAND_REG_ATTN_BITS_SET);
+	else
+		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+	REG_WR(bp, reg_addr, asserted);
+
+	/* now set back the mask */
+	if (asserted & ATTN_NIG_FOR_FUNC) {
+		/* Verify that IGU ack through BAR was written before restoring
+		 * NIG mask. This loop should exit after 2-3 iterations max.
+		 */
+		if (bp->common.int_block != INT_BLOCK_HC) {
+			u32 cnt = 0, igu_acked;
+			do {
+				igu_acked = REG_RD(bp,
+						   IGU_REG_ATTENTION_ACK_BITS);
+			} while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
+				 (++cnt < MAX_IGU_ATTN_ACK_TO));
+			if (!igu_acked)
+				DP(NETIF_MSG_HW,
+				   "Failed to verify IGU ack on time\n");
+			barrier();
+		}
+		REG_WR(bp, nig_int_mask_addr, nig_mask);
+		bnx2x_release_phy_lock(bp);
+	}
+}
+
+static void bnx2x_fan_failure(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 ext_phy_config;
+	/* mark the failure */
+	ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+
+	ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+	ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+		 ext_phy_config);
+
+	/* log the failure */
+	netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+			    "Please contact OEM Support for assistance\n");
+
+	/* Schedule device reset (unload)
+	 * This is due to some boards consuming sufficient power when driver is
+	 * up to overheat if fan fails.
+	 */
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
+}
+
+static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+	int port = BP_PORT(bp);
+	int reg_offset;
+	u32 val;
+
+	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("SPIO5 hw attention\n");
+
+		/* Fan failure attention */
+		bnx2x_hw_reset_phy(&bp->link_params);
+		bnx2x_fan_failure(bp);
+	}
+
+	if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_handle_module_detect_int(&bp->link_params);
+		bnx2x_release_phy_lock(bp);
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_0) {
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+		bnx2x_panic();
+	}
+}
+
+static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+		BNX2X_ERR("DB hw attention 0x%x\n", val);
+		/* DORQ discard attention */
+		if (val & 0x2)
+			BNX2X_ERR("FATAL error from DORQ\n");
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_1) {
+
+		int port = BP_PORT(bp);
+		int reg_offset;
+
+		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+		bnx2x_panic();
+	}
+}
+
+static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+		BNX2X_ERR("CFC hw attention 0x%x\n", val);
+		/* CFC error attention */
+		if (val & 0x2)
+			BNX2X_ERR("FATAL error from CFC\n");
+	}
+
+	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+		BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
+		/* RQ_USDMDP_FIFO_OVERFLOW */
+		if (val & 0x18000)
+			BNX2X_ERR("FATAL error from PXP\n");
+
+		if (!CHIP_IS_E1x(bp)) {
+			val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+			BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+		}
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_2) {
+
+		int port = BP_PORT(bp);
+		int reg_offset;
+
+		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+		bnx2x_panic();
+	}
+}
+
+static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+		if (attn & BNX2X_PMF_LINK_ASSERT) {
+			int func = BP_FUNC(bp);
+
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+			bnx2x_read_mf_cfg(bp);
+			bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+					func_mf_config[BP_ABS_FUNC(bp)].config);
+			val = SHMEM_RD(bp,
+				       func_mb[BP_FW_MB_IDX(bp)].drv_status);
+
+			if (val & (DRV_STATUS_DCC_EVENT_MASK |
+				   DRV_STATUS_OEM_EVENT_MASK))
+				bnx2x_oem_event(bp,
+					(val & (DRV_STATUS_DCC_EVENT_MASK |
+						DRV_STATUS_OEM_EVENT_MASK)));
+
+			if (val & DRV_STATUS_SET_MF_BW)
+				bnx2x_set_mf_bw(bp);
+
+			if (val & DRV_STATUS_DRV_INFO_REQ)
+				bnx2x_handle_drv_info_req(bp);
+
+			if (val & DRV_STATUS_VF_DISABLED)
+				bnx2x_schedule_iov_task(bp,
+							BNX2X_IOV_HANDLE_FLR);
+
+			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+				bnx2x_pmf_update(bp);
+
+			if (bp->port.pmf &&
+			    (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+				bp->dcbx_enabled > 0)
+				/* start dcbx state machine */
+				bnx2x_dcbx_set_params(bp,
+					BNX2X_DCBX_STATE_NEG_RECEIVED);
+			if (val & DRV_STATUS_AFEX_EVENT_MASK)
+				bnx2x_handle_afex_cmd(bp,
+					val & DRV_STATUS_AFEX_EVENT_MASK);
+			if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+				bnx2x_handle_eee_event(bp);
+
+			if (val & DRV_STATUS_OEM_UPDATE_SVID)
+				bnx2x_handle_update_svid_cmd(bp);
+
+			if (bp->link_vars.periodic_flags &
+			    PERIODIC_FLAGS_LINK_EVENT) {
+				/*  sync with link */
+				bnx2x_acquire_phy_lock(bp);
+				bp->link_vars.periodic_flags &=
+					~PERIODIC_FLAGS_LINK_EVENT;
+				bnx2x_release_phy_lock(bp);
+				if (IS_MF(bp))
+					bnx2x_link_sync_notify(bp);
+				bnx2x_link_report(bp);
+			}
+			/* Always call it here: bnx2x_link_report() will
+			 * prevent the link indication duplication.
+			 */
+			bnx2x__link_status_update(bp);
+		} else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+			BNX2X_ERR("MC assert!\n");
+			bnx2x_mc_assert(bp);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+			bnx2x_panic();
+
+		} else if (attn & BNX2X_MCP_ASSERT) {
+
+			BNX2X_ERR("MCP assert!\n");
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+			bnx2x_fw_dump(bp);
+
+		} else
+			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+	}
+
+	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+		if (attn & BNX2X_GRC_TIMEOUT) {
+			val = CHIP_IS_E1(bp) ? 0 :
+					REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
+			BNX2X_ERR("GRC time-out 0x%08x\n", val);
+		}
+		if (attn & BNX2X_GRC_RSV) {
+			val = CHIP_IS_E1(bp) ? 0 :
+					REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
+			BNX2X_ERR("GRC reserved 0x%08x\n", val);
+		}
+		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+	}
+}
+
+/*
+ * Bits map:
+ * 0-7   - Engine0 load counter.
+ * 8-15  - Engine1 load counter.
+ * 16    - Engine0 RESET_IN_PROGRESS bit.
+ * 17    - Engine1 RESET_IN_PROGRESS bit.
+ * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ *         on the engine
+ * 19    - Engine1 ONE_IS_LOADED.
+ * 20    - Chip reset flow bit. When set none-leader must wait for both engines
+ *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ *         just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG		MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK	0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT	0
+#define BNX2X_PATH1_LOAD_CNT_MASK	0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT	8
+#define BNX2X_PATH0_RST_IN_PROG_BIT	0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT	0x00020000
+#define BNX2X_GLOBAL_RESET_BIT		0x00040000
+
+/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+	u32 val;
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+	u32 val;
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
+ * should be run under rtnl lock
+ */
+static bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+	return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
+static void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+	u32 val;
+	u32 bit = BP_PATH(bp) ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	/* Clear the bit */
+	val &= ~bit;
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+	u32 val;
+	u32 bit = BP_PATH(bp) ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	/* Set the bit */
+	val |= bit;
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
+{
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = engine ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* return false if bit is set */
+	return (val & bit) ? false : true;
+}
+
+/*
+ * set pf load for the current pf.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_pf_load(struct bnx2x *bp)
+{
+	u32 val1, val;
+	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK;
+	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
+
+	/* get the current counter value */
+	val1 = (val & mask) >> shift;
+
+	/* set bit of that PF */
+	val1 |= (1 << bp->pf_num);
+
+	/* clear the old value */
+	val &= ~mask;
+
+	/* set the new one */
+	val |= ((val1 << shift) & mask);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/**
+ * bnx2x_clear_pf_load - clear pf load mark
+ *
+ * @bp:		driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * whether other functions are still loaded
+ */
+bool bnx2x_clear_pf_load(struct bnx2x *bp)
+{
+	u32 val1, val;
+	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK;
+	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
+
+	/* get the current counter value */
+	val1 = (val & mask) >> shift;
+
+	/* clear bit of that PF */
+	val1 &= ~(1 << bp->pf_num);
+
+	/* clear the old value */
+	val &= ~mask;
+
+	/* set the new one */
+	val |= ((val1 << shift) & mask);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+	return val1 != 0;
+}
+
+/*
+ * Read the load status for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
+{
+	u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK);
+	u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT);
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
+
+	val = (val & mask) >> shift;
+
+	DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
+	   engine, val);
+
+	return val != 0;
+}
+
+static void _print_parity(struct bnx2x *bp, u32 reg)
+{
+	pr_cont(" [0x%08x] ", REG_RD(bp, reg));
+}
+
+static void _print_next_block(int idx, const char *blk)
+{
+	pr_cont("%s%s", idx ? ", " : "", blk);
+}
+
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+					    int *par_num, bool print)
+{
+	u32 cur_bit;
+	bool res;
+	int i;
+
+	res = false;
+
+	for (i = 0; sig; i++) {
+		cur_bit = (0x1UL << i);
+		if (sig & cur_bit) {
+			res |= true; /* Each bit is real error! */
+
+			if (print) {
+				switch (cur_bit) {
+				case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+					_print_next_block((*par_num)++, "BRB");
+					_print_parity(bp,
+						      BRB1_REG_BRB1_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+					_print_next_block((*par_num)++,
+							  "PARSER");
+					_print_parity(bp, PRS_REG_PRS_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+					_print_next_block((*par_num)++, "TSDM");
+					_print_parity(bp,
+						      TSDM_REG_TSDM_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+					_print_next_block((*par_num)++,
+							  "SEARCHER");
+					_print_parity(bp, SRC_REG_SRC_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+					_print_next_block((*par_num)++, "TCM");
+					_print_parity(bp, TCM_REG_TCM_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+					_print_next_block((*par_num)++,
+							  "TSEMI");
+					_print_parity(bp,
+						      TSEM_REG_TSEM_PRTY_STS_0);
+					_print_parity(bp,
+						      TSEM_REG_TSEM_PRTY_STS_1);
+					break;
+				case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+					_print_next_block((*par_num)++, "XPB");
+					_print_parity(bp, GRCBASE_XPB +
+							  PB_REG_PB_PRTY_STS);
+					break;
+				}
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return res;
+}
+
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+					    int *par_num, bool *global,
+					    bool print)
+{
+	u32 cur_bit;
+	bool res;
+	int i;
+
+	res = false;
+
+	for (i = 0; sig; i++) {
+		cur_bit = (0x1UL << i);
+		if (sig & cur_bit) {
+			res |= true; /* Each bit is real error! */
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "PBF");
+					_print_parity(bp, PBF_REG_PBF_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "QM");
+					_print_parity(bp, QM_REG_QM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "TM");
+					_print_parity(bp, TM_REG_TM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "XSDM");
+					_print_parity(bp,
+						      XSDM_REG_XSDM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "XCM");
+					_print_parity(bp, XCM_REG_XCM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++,
+							  "XSEMI");
+					_print_parity(bp,
+						      XSEM_REG_XSEM_PRTY_STS_0);
+					_print_parity(bp,
+						      XSEM_REG_XSEM_PRTY_STS_1);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++,
+							  "DOORBELLQ");
+					_print_parity(bp,
+						      DORQ_REG_DORQ_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "NIG");
+					if (CHIP_IS_E1x(bp)) {
+						_print_parity(bp,
+							NIG_REG_NIG_PRTY_STS);
+					} else {
+						_print_parity(bp,
+							NIG_REG_NIG_PRTY_STS_0);
+						_print_parity(bp,
+							NIG_REG_NIG_PRTY_STS_1);
+					}
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+				if (print)
+					_print_next_block((*par_num)++,
+							  "VAUX PCI CORE");
+				*global = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++,
+							  "DEBUG");
+					_print_parity(bp, DBG_REG_DBG_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "USDM");
+					_print_parity(bp,
+						      USDM_REG_USDM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "UCM");
+					_print_parity(bp, UCM_REG_UCM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++,
+							  "USEMI");
+					_print_parity(bp,
+						      USEM_REG_USEM_PRTY_STS_0);
+					_print_parity(bp,
+						      USEM_REG_USEM_PRTY_STS_1);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "UPB");
+					_print_parity(bp, GRCBASE_UPB +
+							  PB_REG_PB_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "CSDM");
+					_print_parity(bp,
+						      CSDM_REG_CSDM_PRTY_STS);
+				}
+				break;
+			case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+				if (print) {
+					_print_next_block((*par_num)++, "CCM");
+					_print_parity(bp, CCM_REG_CCM_PRTY_STS);
+				}
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return res;
+}
+
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+					    int *par_num, bool print)
+{
+	u32 cur_bit;
+	bool res;
+	int i;
+
+	res = false;
+
+	for (i = 0; sig; i++) {
+		cur_bit = (0x1UL << i);
+		if (sig & cur_bit) {
+			res = true; /* Each bit is real error! */
+			if (print) {
+				switch (cur_bit) {
+				case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+					_print_next_block((*par_num)++,
+							  "CSEMI");
+					_print_parity(bp,
+						      CSEM_REG_CSEM_PRTY_STS_0);
+					_print_parity(bp,
+						      CSEM_REG_CSEM_PRTY_STS_1);
+					break;
+				case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+					_print_next_block((*par_num)++, "PXP");
+					_print_parity(bp, PXP_REG_PXP_PRTY_STS);
+					_print_parity(bp,
+						      PXP2_REG_PXP2_PRTY_STS_0);
+					_print_parity(bp,
+						      PXP2_REG_PXP2_PRTY_STS_1);
+					break;
+				case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+					_print_next_block((*par_num)++,
+							  "PXPPCICLOCKCLIENT");
+					break;
+				case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+					_print_next_block((*par_num)++, "CFC");
+					_print_parity(bp,
+						      CFC_REG_CFC_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+					_print_next_block((*par_num)++, "CDU");
+					_print_parity(bp, CDU_REG_CDU_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+					_print_next_block((*par_num)++, "DMAE");
+					_print_parity(bp,
+						      DMAE_REG_DMAE_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+					_print_next_block((*par_num)++, "IGU");
+					if (CHIP_IS_E1x(bp))
+						_print_parity(bp,
+							HC_REG_HC_PRTY_STS);
+					else
+						_print_parity(bp,
+							IGU_REG_IGU_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+					_print_next_block((*par_num)++, "MISC");
+					_print_parity(bp,
+						      MISC_REG_MISC_PRTY_STS);
+					break;
+				}
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return res;
+}
+
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+					    int *par_num, bool *global,
+					    bool print)
+{
+	bool res = false;
+	u32 cur_bit;
+	int i;
+
+	for (i = 0; sig; i++) {
+		cur_bit = (0x1UL << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+				if (print)
+					_print_next_block((*par_num)++,
+							  "MCP ROM");
+				*global = true;
+				res = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+				if (print)
+					_print_next_block((*par_num)++,
+							  "MCP UMP RX");
+				*global = true;
+				res = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+				if (print)
+					_print_next_block((*par_num)++,
+							  "MCP UMP TX");
+				*global = true;
+				res = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+				(*par_num)++;
+				/* clear latched SCPAD PATIRY from MCP */
+				REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+				       1UL << 10);
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return res;
+}
+
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+					    int *par_num, bool print)
+{
+	u32 cur_bit;
+	bool res;
+	int i;
+
+	res = false;
+
+	for (i = 0; sig; i++) {
+		cur_bit = (0x1UL << i);
+		if (sig & cur_bit) {
+			res = true; /* Each bit is real error! */
+			if (print) {
+				switch (cur_bit) {
+				case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+					_print_next_block((*par_num)++,
+							  "PGLUE_B");
+					_print_parity(bp,
+						      PGLUE_B_REG_PGLUE_B_PRTY_STS);
+					break;
+				case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+					_print_next_block((*par_num)++, "ATC");
+					_print_parity(bp,
+						      ATC_REG_ATC_PRTY_STS);
+					break;
+				}
+			}
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return res;
+}
+
+static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+			      u32 *sig)
+{
+	bool res = false;
+
+	if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+	    (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+	    (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+	    (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+	    (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+		int par_num = 0;
+
+		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+				 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
+			  sig[0] & HW_PRTY_ASSERT_SET_0,
+			  sig[1] & HW_PRTY_ASSERT_SET_1,
+			  sig[2] & HW_PRTY_ASSERT_SET_2,
+			  sig[3] & HW_PRTY_ASSERT_SET_3,
+			  sig[4] & HW_PRTY_ASSERT_SET_4);
+		if (print) {
+			if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+			     (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+			     (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+			     (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
+			     (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
+				netdev_err(bp->dev,
+					   "Parity errors detected in blocks: ");
+			} else {
+				print = false;
+			}
+		}
+		res |= bnx2x_check_blocks_with_parity0(bp,
+			sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+		res |= bnx2x_check_blocks_with_parity1(bp,
+			sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+		res |= bnx2x_check_blocks_with_parity2(bp,
+			sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+		res |= bnx2x_check_blocks_with_parity3(bp,
+			sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+		res |= bnx2x_check_blocks_with_parity4(bp,
+			sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
+
+		if (print)
+			pr_cont("\n");
+	}
+
+	return res;
+}
+
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp:		driver handle
+ * @global:	true if there was a global attention
+ * @print:	show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
+{
+	struct attn_route attn = { {0} };
+	int port = BP_PORT(bp);
+
+	attn.sig[0] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+			     port*4);
+	attn.sig[1] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+			     port*4);
+	attn.sig[2] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+			     port*4);
+	attn.sig[3] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+			     port*4);
+	/* Since MCP attentions can't be disabled inside the block, we need to
+	 * read AEU registers to see whether they're currently disabled
+	 */
+	attn.sig[3] &= ((REG_RD(bp,
+				!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+				      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+			 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+			~MISC_AEU_ENABLE_MCP_PRTY_BITS);
+
+	if (!CHIP_IS_E1x(bp))
+		attn.sig[4] = REG_RD(bp,
+			MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+				     port*4);
+
+	return bnx2x_parity_attn(bp, global, print, attn.sig);
+}
+
+static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+	if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+		val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+		BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
+		if (val &
+		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
+		if (val &
+		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
+	}
+	if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+		val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+		BNX2X_ERR("ATC hw attention 0x%x\n", val);
+		if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
+	}
+
+	if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+		BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+		(u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+	}
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+	struct attn_route attn, *group_mask;
+	int port = BP_PORT(bp);
+	int index;
+	u32 reg_addr;
+	u32 val;
+	u32 aeu_mask;
+	bool global = false;
+
+	/* need to take HW lock because MCP or other port might also
+	   try to handle this event */
+	bnx2x_acquire_alr(bp);
+
+	if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
+		bp->recovery_state = BNX2X_RECOVERY_INIT;
+		schedule_delayed_work(&bp->sp_rtnl_task, 0);
+		/* Disable HW interrupts */
+		bnx2x_int_disable(bp);
+		/* In case of parity errors don't handle attentions so that
+		 * other function would "see" parity errors.
+		 */
+#else
+		bnx2x_panic();
+#endif
+		bnx2x_release_alr(bp);
+		return;
+	}
+
+	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+	if (!CHIP_IS_E1x(bp))
+		attn.sig[4] =
+		      REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+	else
+		attn.sig[4] = 0;
+
+	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
+
+	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+		if (deasserted & (1 << index)) {
+			group_mask = &bp->attn_group[index];
+
+			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
+			   index,
+			   group_mask->sig[0], group_mask->sig[1],
+			   group_mask->sig[2], group_mask->sig[3],
+			   group_mask->sig[4]);
+
+			bnx2x_attn_int_deasserted4(bp,
+					attn.sig[4] & group_mask->sig[4]);
+			bnx2x_attn_int_deasserted3(bp,
+					attn.sig[3] & group_mask->sig[3]);
+			bnx2x_attn_int_deasserted1(bp,
+					attn.sig[1] & group_mask->sig[1]);
+			bnx2x_attn_int_deasserted2(bp,
+					attn.sig[2] & group_mask->sig[2]);
+			bnx2x_attn_int_deasserted0(bp,
+					attn.sig[0] & group_mask->sig[0]);
+		}
+	}
+
+	bnx2x_release_alr(bp);
+
+	if (bp->common.int_block == INT_BLOCK_HC)
+		reg_addr = (HC_REG_COMMAND_REG + port*32 +
+			    COMMAND_REG_ATTN_BITS_CLR);
+	else
+		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
+
+	val = ~deasserted;
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+	REG_WR(bp, reg_addr, val);
+
+	if (~bp->attn_state & deasserted)
+		BNX2X_ERR("IGU ERROR\n");
+
+	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, reg_addr);
+
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
+	   aeu_mask, deasserted);
+	aeu_mask |= (deasserted & 0x3ff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, reg_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+	bp->attn_state &= ~deasserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+	/* read local copy of bits */
+	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits);
+	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits_ack);
+	u32 attn_state = bp->attn_state;
+
+	/* look for changed bits */
+	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
+	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
+
+	DP(NETIF_MSG_HW,
+	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
+	   attn_bits, attn_ack, asserted, deasserted);
+
+	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+		BNX2X_ERR("BAD attention state\n");
+
+	/* handle bits that were raised */
+	if (asserted)
+		bnx2x_attn_int_asserted(bp, asserted);
+
+	if (deasserted)
+		bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update)
+{
+	u32 igu_addr = bp->igu_base_addr;
+	igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+			     igu_addr);
+}
+
+static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+	/* No memory barriers */
+	storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+	mmiowb(); /* keep prod updates ordered */
+}
+
+static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+				      union event_ring_elem *elem)
+{
+	u8 err = elem->message.error;
+
+	if (!bp->cnic_eth_dev.starting_cid  ||
+	    (cid < bp->cnic_eth_dev.starting_cid &&
+	    cid != bp->cnic_eth_dev.iscsi_l2_cid))
+		return 1;
+
+	DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+	if (unlikely(err)) {
+
+		BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+			  cid);
+		bnx2x_panic_dump(bp, false);
+	}
+	bnx2x_cnic_cfc_comp(bp, cid, err);
+	return 0;
+}
+
+static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+	struct bnx2x_mcast_ramrod_params rparam;
+	int rc;
+
+	memset(&rparam, 0, sizeof(rparam));
+
+	rparam.mcast_obj = &bp->mcast_obj;
+
+	netif_addr_lock_bh(bp->dev);
+
+	/* Clear pending state for the last command */
+	bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+	/* If there are pending mcast commands - send them */
+	if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+		if (rc < 0)
+			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+				  rc);
+	}
+
+	netif_addr_unlock_bh(bp->dev);
+}
+
+static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+					    union event_ring_elem *elem)
+{
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+	u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* Always push next commands out, don't wait here */
+	__set_bit(RAMROD_CONT, &ramrod_flags);
+
+	switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
+			    >> BNX2X_SWCID_SHIFT) {
+	case BNX2X_FILTER_MAC_PENDING:
+		DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
+		if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
+			vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+		else
+			vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
+
+		break;
+	case BNX2X_FILTER_VLAN_PENDING:
+		DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+		vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+		break;
+	case BNX2X_FILTER_MCAST_PENDING:
+		DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
+		/* This is only relevant for 57710 where multicast MACs are
+		 * configured as unicast MACs using the same ramrod.
+		 */
+		bnx2x_handle_mcast_eqe(bp);
+		return;
+	default:
+		BNX2X_ERR("Unsupported classification command: %d\n",
+			  elem->message.data.eth_event.echo);
+		return;
+	}
+
+	rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+	else if (rc > 0)
+		DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+}
+
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+
+static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+	netif_addr_lock_bh(bp->dev);
+
+	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+	/* Send rx_mode command again if was requested */
+	if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+		bnx2x_set_storm_rx_mode(bp);
+	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+				    &bp->sp_state))
+		bnx2x_set_iscsi_eth_rx_mode(bp, true);
+	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+				    &bp->sp_state))
+		bnx2x_set_iscsi_eth_rx_mode(bp, false);
+
+	netif_addr_unlock_bh(bp->dev);
+}
+
+static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
+					      union event_ring_elem *elem)
+{
+	if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
+		DP(BNX2X_MSG_SP,
+		   "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
+		   elem->message.data.vif_list_event.func_bit_map);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
+			elem->message.data.vif_list_event.func_bit_map);
+	} else if (elem->message.data.vif_list_event.echo ==
+		   VIF_LIST_RULE_SET) {
+		DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
+	}
+}
+
+/* called with rtnl_lock */
+static void bnx2x_after_function_update(struct bnx2x *bp)
+{
+	int q, rc;
+	struct bnx2x_fastpath *fp;
+	struct bnx2x_queue_state_params queue_params = {NULL};
+	struct bnx2x_queue_update_params *q_update_params =
+		&queue_params.params.update;
+
+	/* Send Q update command with afex vlan removal values for all Qs */
+	queue_params.cmd = BNX2X_Q_CMD_UPDATE;
+
+	/* set silent vlan removal values according to vlan mode */
+	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+		  &q_update_params->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+		  &q_update_params->update_flags);
+	__set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+	/* in access mode mark mask and value are 0 to strip all vlans */
+	if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
+		q_update_params->silent_removal_value = 0;
+		q_update_params->silent_removal_mask = 0;
+	} else {
+		q_update_params->silent_removal_value =
+			(bp->afex_def_vlan_tag & VLAN_VID_MASK);
+		q_update_params->silent_removal_mask = VLAN_VID_MASK;
+	}
+
+	for_each_eth_queue(bp, q) {
+		/* Set the appropriate Queue object */
+		fp = &bp->fp[q];
+		queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+		/* send the ramrod */
+		rc = bnx2x_queue_state_change(bp, &queue_params);
+		if (rc < 0)
+			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+				  q);
+	}
+
+	if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
+		fp = &bp->fp[FCOE_IDX(bp)];
+		queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+		/* clear pending completion bit */
+		__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+		/* mark latest Q bit */
+		smp_mb__before_atomic();
+		set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+		smp_mb__after_atomic();
+
+		/* send Q update ramrod for FCoE Q */
+		rc = bnx2x_queue_state_change(bp, &queue_params);
+		if (rc < 0)
+			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+				  q);
+	} else {
+		/* If no FCoE ring - ACK MCP now */
+		bnx2x_link_report(bp);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+	}
+}
+
+static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+	struct bnx2x *bp, u32 cid)
+{
+	DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
+
+	if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
+		return &bnx2x_fcoe_sp_obj(bp, q_obj);
+	else
+		return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
+}
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+	u16 hw_cons, sw_cons, sw_prod;
+	union event_ring_elem *elem;
+	u8 echo;
+	u32 cid;
+	u8 opcode;
+	int rc, spqe_cnt = 0;
+	struct bnx2x_queue_sp_obj *q_obj;
+	struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+	struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
+
+	hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+	/* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+	 * when we get the next-page we need to adjust so the loop
+	 * condition below will be met. The next element is the size of a
+	 * regular element and hence incrementing by 1
+	 */
+	if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+		hw_cons++;
+
+	/* This function may never run in parallel with itself for a
+	 * specific bp, thus there is no need in "paired" read memory
+	 * barrier here.
+	 */
+	sw_cons = bp->eq_cons;
+	sw_prod = bp->eq_prod;
+
+	DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
+			hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
+
+	for (; sw_cons != hw_cons;
+	      sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+		elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+		rc = bnx2x_iov_eq_sp_event(bp, elem);
+		if (!rc) {
+			DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+			   rc);
+			goto next_spqe;
+		}
+
+		/* elem CID originates from FW; actually LE */
+		cid = SW_CID((__force __le32)
+			     elem->message.data.cfc_del_event.cid);
+		opcode = elem->message.opcode;
+
+		/* handle eq element */
+		switch (opcode) {
+		case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+			bnx2x_vf_mbx_schedule(bp,
+					      &elem->message.data.vf_pf_event);
+			continue;
+
+		case EVENT_RING_OPCODE_STAT_QUERY:
+			DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+			       "got statistics comp event %d\n",
+			       bp->stats_comp++);
+			/* nothing to do with stats comp */
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_CFC_DEL:
+			/* handle according to cid range */
+			/*
+			 * we may want to verify here that the bp state is
+			 * HALTING
+			 */
+			DP(BNX2X_MSG_SP,
+			   "got delete ramrod for MULTI[%d]\n", cid);
+
+			if (CNIC_LOADED(bp) &&
+			    !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+				goto next_spqe;
+
+			q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+			if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+				break;
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_STOP_TRAFFIC:
+			DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
+			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+			if (f_obj->complete_cmd(bp, f_obj,
+						BNX2X_F_CMD_TX_STOP))
+				break;
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_START_TRAFFIC:
+			DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
+			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+			if (f_obj->complete_cmd(bp, f_obj,
+						BNX2X_F_CMD_TX_START))
+				break;
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+			echo = elem->message.data.function_update_event.echo;
+			if (echo == SWITCH_UPDATE) {
+				DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+				   "got FUNC_SWITCH_UPDATE ramrod\n");
+				if (f_obj->complete_cmd(
+					bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
+					break;
+
+			} else {
+				int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
+				DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+				   "AFEX: ramrod completed FUNCTION_UPDATE\n");
+				f_obj->complete_cmd(bp, f_obj,
+						    BNX2X_F_CMD_AFEX_UPDATE);
+
+				/* We will perform the Queues update from
+				 * sp_rtnl task as all Queue SP operations
+				 * should run under rtnl_lock.
+				 */
+				bnx2x_schedule_sp_rtnl(bp, cmd, 0);
+			}
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
+			f_obj->complete_cmd(bp, f_obj,
+					    BNX2X_F_CMD_AFEX_VIFLISTS);
+			bnx2x_after_afex_vif_lists(bp, elem);
+			goto next_spqe;
+		case EVENT_RING_OPCODE_FUNCTION_START:
+			DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+			   "got FUNC_START ramrod\n");
+			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+				break;
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_FUNCTION_STOP:
+			DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+			   "got FUNC_STOP ramrod\n");
+			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+				break;
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_SET_TIMESYNC:
+			DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
+			   "got set_timesync ramrod completion\n");
+			if (f_obj->complete_cmd(bp, f_obj,
+						BNX2X_F_CMD_SET_TIMESYNC))
+				break;
+			goto next_spqe;
+		}
+
+		switch (opcode | bp->state) {
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_OPENING_WAIT4_PORT):
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			cid = elem->message.data.eth_event.echo &
+				BNX2X_SWCID_MASK;
+			DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+			   cid);
+			rss_raw->clear_pending(rss_raw);
+			break;
+
+		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_SET_MAC |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
+			bnx2x_handle_classification_eqe(bp, elem);
+			break;
+
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+			bnx2x_handle_mcast_eqe(bp);
+			break;
+
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+			bnx2x_handle_rx_mode_eqe(bp);
+			break;
+		default:
+			/* unknown event log error and continue */
+			BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+				  elem->message.opcode, bp->state);
+		}
+next_spqe:
+		spqe_cnt++;
+	} /* for */
+
+	smp_mb__before_atomic();
+	atomic_add(spqe_cnt, &bp->eq_spq_left);
+
+	bp->eq_cons = sw_cons;
+	bp->eq_prod = sw_prod;
+	/* Make sure that above mem writes were issued towards the memory */
+	smp_wmb();
+
+	/* update producer */
+	bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+
+	DP(BNX2X_MSG_SP, "sp task invoked\n");
+
+	/* make sure the atomic interrupt_occurred has been written */
+	smp_rmb();
+	if (atomic_read(&bp->interrupt_occurred)) {
+
+		/* what work needs to be performed? */
+		u16 status = bnx2x_update_dsb_idx(bp);
+
+		DP(BNX2X_MSG_SP, "status %x\n", status);
+		DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+		atomic_set(&bp->interrupt_occurred, 0);
+
+		/* HW attentions */
+		if (status & BNX2X_DEF_SB_ATT_IDX) {
+			bnx2x_attn_int(bp);
+			status &= ~BNX2X_DEF_SB_ATT_IDX;
+		}
+
+		/* SP events: STAT_QUERY and others */
+		if (status & BNX2X_DEF_SB_IDX) {
+			struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+		if (FCOE_INIT(bp) &&
+			    (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+				/* Prevent local bottom-halves from running as
+				 * we are going to change the local NAPI list.
+				 */
+				local_bh_disable();
+				napi_schedule(&bnx2x_fcoe(bp, napi));
+				local_bh_enable();
+			}
+
+			/* Handle EQ completions */
+			bnx2x_eq_int(bp);
+			bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+				     le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+			status &= ~BNX2X_DEF_SB_IDX;
+		}
+
+		/* if status is non zero then perhaps something went wrong */
+		if (unlikely(status))
+			DP(BNX2X_MSG_SP,
+			   "got an unknown interrupt! (status 0x%x)\n", status);
+
+		/* ack status block only if something was actually handled */
+		bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+			     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+	}
+
+	/* afex - poll to check if VIFSET_ACK should be sent to MFW */
+	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
+			       &bp->sp_state)) {
+		bnx2x_link_report(bp);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+	}
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+		     IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	if (CNIC_LOADED(bp)) {
+		struct cnic_ops *c_ops;
+
+		rcu_read_lock();
+		c_ops = rcu_dereference(bp->cnic_ops);
+		if (c_ops)
+			c_ops->cnic_handler(bp->cnic_data, NULL);
+		rcu_read_unlock();
+	}
+
+	/* schedule sp task to perform default status block work, ack
+	 * attentions and enable interrupts.
+	 */
+	bnx2x_schedule_sp_task(bp);
+
+	return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+		 bp->fw_drv_pulse_wr_seq);
+}
+
+static void bnx2x_timer(unsigned long data)
+{
+	struct bnx2x *bp = (struct bnx2x *) data;
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (IS_PF(bp) &&
+	    !BP_NOMCP(bp)) {
+		int mb_idx = BP_FW_MB_IDX(bp);
+		u16 drv_pulse;
+		u16 mcp_pulse;
+
+		++bp->fw_drv_pulse_wr_seq;
+		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+		drv_pulse = bp->fw_drv_pulse_wr_seq;
+		bnx2x_drv_pulse(bp);
+
+		mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
+			     MCP_PULSE_SEQ_MASK);
+		/* The delta between driver pulse and mcp response
+		 * should not get too big. If the MFW is more than 5 pulses
+		 * behind, we should worry about it enough to generate an error
+		 * log.
+		 */
+		if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+			BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+				  drv_pulse, mcp_pulse);
+	}
+
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+	/* sample pf vf bulletin board for new posts from pf */
+	if (IS_VF(bp))
+		bnx2x_timer_sriov(bp);
+
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+	u32 i;
+	if (!(len%4) && !(addr%4))
+		for (i = 0; i < len; i += 4)
+			REG_WR(bp, addr + i, fill);
+	else
+		for (i = 0; i < len; i++)
+			REG_WR8(bp, addr + i, fill);
+}
+
+/* helper: writes FP SP data to FW - data_size in dwords */
+static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+				int fw_sb_id,
+				u32 *sb_data_p,
+				u32 data_size)
+{
+	int index;
+	for (index = 0; index < data_size; index++)
+		REG_WR(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+			sizeof(u32)*index,
+			*(sb_data_p + index));
+}
+
+static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+	u32 *sb_data_p;
+	u32 data_size = 0;
+	struct hc_status_block_data_e2 sb_data_e2;
+	struct hc_status_block_data_e1x sb_data_e1x;
+
+	/* disable the function first */
+	if (!CHIP_IS_E1x(bp)) {
+		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+		sb_data_e2.common.state = SB_DISABLED;
+		sb_data_e2.common.p_func.vf_valid = false;
+		sb_data_p = (u32 *)&sb_data_e2;
+		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+	} else {
+		memset(&sb_data_e1x, 0,
+		       sizeof(struct hc_status_block_data_e1x));
+		sb_data_e1x.common.state = SB_DISABLED;
+		sb_data_e1x.common.p_func.vf_valid = false;
+		sb_data_p = (u32 *)&sb_data_e1x;
+		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+	}
+	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+			CSTORM_STATUS_BLOCK_SIZE);
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+			CSTORM_SYNC_BLOCK_SIZE);
+}
+
+/* helper:  writes SP SB data to FW */
+static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+		struct hc_sp_status_block_data *sp_sb_data)
+{
+	int func = BP_FUNC(bp);
+	int i;
+	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+		REG_WR(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+			i*sizeof(u32),
+			*((u32 *)sp_sb_data + i));
+}
+
+static void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	struct hc_sp_status_block_data sp_sb_data;
+	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+	sp_sb_data.state = SB_DISABLED;
+	sp_sb_data.p_func.vf_valid = false;
+
+	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+			CSTORM_SP_STATUS_BLOCK_SIZE);
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+			CSTORM_SP_SYNC_BLOCK_SIZE);
+}
+
+static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+					   int igu_sb_id, int igu_seg_id)
+{
+	hc_sm->igu_sb_id = igu_sb_id;
+	hc_sm->igu_seg_id = igu_seg_id;
+	hc_sm->timer_value = 0xFF;
+	hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+/* allocates state machine ids. */
+static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+	/* zero out state machine indices */
+	/* rx indices */
+	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+	/* tx indices */
+	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+	/* map indices */
+	/* rx indices */
+	index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+		SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+	/* tx indices */
+	index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+	index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+		SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+			  u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+	int igu_seg_id;
+
+	struct hc_status_block_data_e2 sb_data_e2;
+	struct hc_status_block_data_e1x sb_data_e1x;
+	struct hc_status_block_sm  *hc_sm_p;
+	int data_size;
+	u32 *sb_data_p;
+
+	if (CHIP_INT_MODE_IS_BC(bp))
+		igu_seg_id = HC_SEG_ACCESS_NORM;
+	else
+		igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+	bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+	if (!CHIP_IS_E1x(bp)) {
+		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+		sb_data_e2.common.state = SB_ENABLED;
+		sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+		sb_data_e2.common.p_func.vf_id = vfid;
+		sb_data_e2.common.p_func.vf_valid = vf_valid;
+		sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+		sb_data_e2.common.same_igu_sb_1b = true;
+		sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+		sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+		hc_sm_p = sb_data_e2.common.state_machine;
+		sb_data_p = (u32 *)&sb_data_e2;
+		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+		bnx2x_map_sb_state_machines(sb_data_e2.index_data);
+	} else {
+		memset(&sb_data_e1x, 0,
+		       sizeof(struct hc_status_block_data_e1x));
+		sb_data_e1x.common.state = SB_ENABLED;
+		sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+		sb_data_e1x.common.p_func.vf_id = 0xff;
+		sb_data_e1x.common.p_func.vf_valid = false;
+		sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+		sb_data_e1x.common.same_igu_sb_1b = true;
+		sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+		sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+		hc_sm_p = sb_data_e1x.common.state_machine;
+		sb_data_p = (u32 *)&sb_data_e1x;
+		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+		bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
+	}
+
+	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+				       igu_sb_id, igu_seg_id);
+	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+				       igu_sb_id, igu_seg_id);
+
+	DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
+
+	/* write indices to HW - PCI guarantees endianity of regpairs */
+	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
+				     u16 tx_usec, u16 rx_usec)
+{
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
+				    false, rx_usec);
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+				       HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+				       tx_usec);
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+				       HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+				       tx_usec);
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+				       HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+				       tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+	struct host_sp_status_block *def_sb = bp->def_status_blk;
+	dma_addr_t mapping = bp->def_status_blk_mapping;
+	int igu_sp_sb_index;
+	int igu_seg_id;
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int reg_offset, reg_offset_en5;
+	u64 section;
+	int index;
+	struct hc_sp_status_block_data sp_sb_data;
+	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+	if (CHIP_INT_MODE_IS_BC(bp)) {
+		igu_sp_sb_index = DEF_SB_IGU_ID;
+		igu_seg_id = HC_SEG_ACCESS_DEF;
+	} else {
+		igu_sp_sb_index = bp->igu_dsb_id;
+		igu_seg_id = IGU_SEG_ACCESS_DEF;
+	}
+
+	/* ATTN */
+	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+					    atten_status_block);
+	def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+
+	bp->attn_state = 0;
+
+	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+	reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+				 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
+	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+		int sindex;
+		/* take care of sig[0]..sig[4] */
+		for (sindex = 0; sindex < 4; sindex++)
+			bp->attn_group[index].sig[sindex] =
+			   REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+		if (!CHIP_IS_E1x(bp))
+			/*
+			 * enable5 is separate from the rest of the registers,
+			 * and therefore the address skip is 4
+			 * and not 16 between the different groups
+			 */
+			bp->attn_group[index].sig[4] = REG_RD(bp,
+					reg_offset_en5 + 0x4*index);
+		else
+			bp->attn_group[index].sig[4] = 0;
+	}
+
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+				     HC_REG_ATTN_MSG0_ADDR_L);
+
+		REG_WR(bp, reg_offset, U64_LO(section));
+		REG_WR(bp, reg_offset + 4, U64_HI(section));
+	} else if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+	}
+
+	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+					    sp_sb);
+
+	bnx2x_zero_sp_sb(bp);
+
+	/* PCI guarantees endianity of regpairs */
+	sp_sb_data.state		= SB_ENABLED;
+	sp_sb_data.host_sb_addr.lo	= U64_LO(section);
+	sp_sb_data.host_sb_addr.hi	= U64_HI(section);
+	sp_sb_data.igu_sb_id		= igu_sp_sb_index;
+	sp_sb_data.igu_seg_id		= igu_seg_id;
+	sp_sb_data.p_func.pf_id		= func;
+	sp_sb_data.p_func.vnic_id	= BP_VN(bp);
+	sp_sb_data.p_func.vf_id		= 0xff;
+
+	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_eth_queue(bp, i)
+		bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+					 bp->tx_ticks, bp->rx_ticks);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+	spin_lock_init(&bp->spq_lock);
+	atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
+
+	bp->spq_prod_idx = 0;
+	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+	bp->spq_prod_bd = bp->spq;
+	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
+{
+	int i;
+	for (i = 1; i <= NUM_EQ_PAGES; i++) {
+		union event_ring_elem *elem =
+			&bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
+
+		elem->next_page.addr.hi =
+			cpu_to_le32(U64_HI(bp->eq_mapping +
+				   BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+		elem->next_page.addr.lo =
+			cpu_to_le32(U64_LO(bp->eq_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
+	}
+	bp->eq_cons = 0;
+	bp->eq_prod = NUM_EQ_DESC;
+	bp->eq_cons_sb = BNX2X_EQ_INDEX;
+	/* we want a warning message before it gets wrought... */
+	atomic_set(&bp->eq_spq_left,
+		min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
+}
+
+/* called with netif_addr_lock_bh() */
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			       unsigned long rx_mode_flags,
+			       unsigned long rx_accept_flags,
+			       unsigned long tx_accept_flags,
+			       unsigned long ramrod_flags)
+{
+	struct bnx2x_rx_mode_ramrod_params ramrod_param;
+	int rc;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Prepare ramrod parameters */
+	ramrod_param.cid = 0;
+	ramrod_param.cl_id = cl_id;
+	ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+	ramrod_param.func_id = BP_FUNC(bp);
+
+	ramrod_param.pstate = &bp->sp_state;
+	ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+	ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+	ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.rx_mode_flags = rx_mode_flags;
+
+	ramrod_param.rx_accept_flags = rx_accept_flags;
+	ramrod_param.tx_accept_flags = tx_accept_flags;
+
+	rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+	if (rc < 0) {
+		BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+				   unsigned long *rx_accept_flags,
+				   unsigned long *tx_accept_flags)
+{
+	/* Clear the flags first */
+	*rx_accept_flags = 0;
+	*tx_accept_flags = 0;
+
+	switch (rx_mode) {
+	case BNX2X_RX_MODE_NONE:
+		/*
+		 * 'drop all' supersedes any accept flags that may have been
+		 * passed to the function.
+		 */
+		break;
+	case BNX2X_RX_MODE_NORMAL:
+		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
+
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
+		break;
+	case BNX2X_RX_MODE_ALLMULTI:
+		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
+
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
+		break;
+	case BNX2X_RX_MODE_PROMISC:
+		/* According to definition of SI mode, iface in promisc mode
+		 * should receive matched and unmatched (in resolution of port)
+		 * unicast packets.
+		 */
+		__set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
+
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+
+		if (IS_MF_SI(bp))
+			__set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
+		else
+			__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
+		break;
+	default:
+		BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+	int rc;
+
+	if (!NO_FCOE(bp))
+		/* Configure rx_mode of FCoE Queue */
+		__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+	rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+				     &tx_accept_flags);
+	if (rc)
+		return rc;
+
+	__set_bit(RAMROD_RX, &ramrod_flags);
+	__set_bit(RAMROD_TX, &ramrod_flags);
+
+	return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+				   rx_accept_flags, tx_accept_flags,
+				   ramrod_flags);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+	int i;
+
+	/* Zero this manually as its initialization is
+	   currently missing in the initTool */
+	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+			CHIP_INT_MODE_IS_BC(bp) ?
+			HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
+	}
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+		bnx2x_init_internal_common(bp);
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		/* nothing to do */
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		/* internal memory per function is
+		   initialized inside bnx2x_pf_init */
+		break;
+
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		break;
+	}
+}
+
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+	return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+	return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
+}
+
+static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return BP_L_ID(fp->bp) + fp->index;
+	else	/* We want Client ID to be the same as IGU SB ID for 57712 */
+		return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+	u8 cos;
+	unsigned long q_type = 0;
+	u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
+	fp->rx_queue = fp_idx;
+	fp->cid = fp_idx;
+	fp->cl_id = bnx2x_fp_cl_id(fp);
+	fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+	fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
+	/* qZone id equals to FW (per path) client id */
+	fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
+
+	/* init shortcut */
+	fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+
+	/* Setup SB indices */
+	fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+	BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+	/* init tx data */
+	for_each_cos_in_tx_queue(fp, cos) {
+		bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+				  CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+				  FP_COS_TO_TXQ(fp, cos, bp),
+				  BNX2X_TX_SB_INDEX_BASE + cos, fp);
+		cids[cos] = fp->txdata_ptr[cos]->cid;
+	}
+
+	/* nothing more for vf to do here */
+	if (IS_VF(bp))
+		return;
+
+	bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+		      fp->fw_sb_id, fp->igu_sb_id);
+	bnx2x_update_fpsb_idx(fp);
+	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+			     fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+			     bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+	/**
+	 * Configure classification DBs: Always enable Tx switching
+	 */
+	bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
+	DP(NETIF_MSG_IFUP,
+	   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
+	   fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+	   fp->igu_sb_id);
+}
+
+static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
+{
+	int i;
+
+	for (i = 1; i <= NUM_TX_RINGS; i++) {
+		struct eth_tx_next_bd *tx_next_bd =
+			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+		tx_next_bd->addr_hi =
+			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+		tx_next_bd->addr_lo =
+			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+	}
+
+	*txdata->tx_cons_sb = cpu_to_le16(0);
+
+	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+	txdata->tx_db.data.zero_fill1 = 0;
+	txdata->tx_db.data.prod = 0;
+
+	txdata->tx_pkt_prod = 0;
+	txdata->tx_pkt_cons = 0;
+	txdata->tx_bd_prod = 0;
+	txdata->tx_bd_cons = 0;
+	txdata->tx_pkt = 0;
+}
+
+static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_tx_queue_cnic(bp, i)
+		bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
+}
+
+static void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+	int i;
+	u8 cos;
+
+	for_each_eth_queue(bp, i)
+		for_each_cos_in_tx_queue(&bp->fp[i], cos)
+			bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
+}
+
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+	unsigned long q_type = 0;
+
+	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+						     BNX2X_FCOE_ETH_CL_ID_IDX);
+	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+	bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+			  fp);
+
+	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+	/* qZone id equals to FW (per path) client id */
+	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+	/* init shortcut */
+	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+		bnx2x_rx_ustorm_prods_offset(fp);
+
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+	/* No multi-CoS for FCoE L2 client */
+	BUG_ON(fp->max_cos != 1);
+
+	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+			     &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+			     bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+	DP(NETIF_MSG_IFUP,
+	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+	   fp->igu_sb_id);
+}
+
+void bnx2x_nic_init_cnic(struct bnx2x *bp)
+{
+	if (!NO_FCOE(bp))
+		bnx2x_init_fcoe_fp(bp);
+
+	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+		      BNX2X_VF_ID_INVALID, false,
+		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+	/* ensure status block indices were read */
+	rmb();
+	bnx2x_init_rx_rings_cnic(bp);
+	bnx2x_init_tx_rings_cnic(bp);
+
+	/* flush all */
+	mb();
+	mmiowb();
+}
+
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
+{
+	int i;
+
+	/* Setup NIC internals and enable interrupts */
+	for_each_eth_queue(bp, i)
+		bnx2x_init_eth_fp(bp, i);
+
+	/* ensure status block indices were read */
+	rmb();
+	bnx2x_init_rx_rings(bp);
+	bnx2x_init_tx_rings(bp);
+
+	if (IS_PF(bp)) {
+		/* Initialize MOD_ABS interrupts */
+		bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+				       bp->common.shmem_base,
+				       bp->common.shmem2_base, BP_PORT(bp));
+
+		/* initialize the default status block and sp ring */
+		bnx2x_init_def_sb(bp);
+		bnx2x_update_dsb_idx(bp);
+		bnx2x_init_sp_ring(bp);
+	} else {
+		bnx2x_memset_stats(bp);
+	}
+}
+
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+{
+	bnx2x_init_eq_ring(bp);
+	bnx2x_init_internal(bp, load_code);
+	bnx2x_pf_init(bp);
+	bnx2x_stats_init(bp);
+
+	/* flush all before enabling interrupts */
+	mb();
+	mmiowb();
+
+	bnx2x_int_enable(bp);
+
+	/* Check for SPIO5 */
+	bnx2x_attn_int_deasserted0(bp,
+		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+				   AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* gzip service functions */
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+					    &bp->gunzip_mapping, GFP_KERNEL);
+	if (bp->gunzip_buf  == NULL)
+		goto gunzip_nomem1;
+
+	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+	if (bp->strm  == NULL)
+		goto gunzip_nomem2;
+
+	bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
+	if (bp->strm->workspace == NULL)
+		goto gunzip_nomem3;
+
+	return 0;
+
+gunzip_nomem3:
+	kfree(bp->strm);
+	bp->strm = NULL;
+
+gunzip_nomem2:
+	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+			  bp->gunzip_mapping);
+	bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+	BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
+	return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+	if (bp->strm) {
+		vfree(bp->strm->workspace);
+		kfree(bp->strm);
+		bp->strm = NULL;
+	}
+
+	if (bp->gunzip_buf) {
+		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+				  bp->gunzip_mapping);
+		bp->gunzip_buf = NULL;
+	}
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+	int n, rc;
+
+	/* check gzip header */
+	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+		BNX2X_ERR("Bad gzip header\n");
+		return -EINVAL;
+	}
+
+	n = 10;
+
+#define FNAME				0x8
+
+	if (zbuf[3] & FNAME)
+		while ((zbuf[n++] != 0) && (n < len));
+
+	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+	bp->strm->avail_in = len - n;
+	bp->strm->next_out = bp->gunzip_buf;
+	bp->strm->avail_out = FW_BUF_SIZE;
+
+	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+	if (rc != Z_OK)
+		return rc;
+
+	rc = zlib_inflate(bp->strm, Z_FINISH);
+	if ((rc != Z_OK) && (rc != Z_STREAM_END))
+		netdev_err(bp->dev, "Firmware decompression error: %s\n",
+			   bp->strm->msg);
+
+	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+	if (bp->gunzip_outlen & 0x3)
+		netdev_err(bp->dev,
+			   "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
+				bp->gunzip_outlen);
+	bp->gunzip_outlen >>= 2;
+
+	zlib_inflateEnd(bp->strm);
+
+	if (rc == Z_STREAM_END)
+		return 0;
+
+	return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+	u32 wb_write[3];
+
+	/* Ethernet source and destination addresses */
+	wb_write[0] = 0x55555555;
+	wb_write[1] = 0x55555555;
+	wb_write[2] = 0x20;		/* SOP */
+	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+	/* NON-IP protocol */
+	wb_write[0] = 0x09000000;
+	wb_write[1] = 0x55555555;
+	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
+	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+	int factor;
+	int count, i;
+	u32 val = 0;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		factor = 120;
+	else if (CHIP_REV_IS_EMUL(bp))
+		factor = 200;
+	else
+		factor = 1;
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+	/*  Write 0 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send Ethernet packet */
+	bnx2x_lb_pckt(bp);
+
+	/* TODO do i reset NIG statistic? */
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	count = 1000 * factor;
+	while (count) {
+
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+		if (val == 0x10)
+			break;
+
+		usleep_range(10000, 20000);
+		count--;
+	}
+	if (val != 0x10) {
+		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+		return -1;
+	}
+
+	/* Wait until PRS register shows 1 packet */
+	count = 1000 * factor;
+	while (count) {
+		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+		if (val == 1)
+			break;
+
+		usleep_range(10000, 20000);
+		count--;
+	}
+	if (val != 0x1) {
+		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+		return -2;
+	}
+
+	/* Reset and init BRB, PRS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+
+	DP(NETIF_MSG_HW, "part2\n");
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+	/* Write 0 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send 10 Ethernet packets */
+	for (i = 0; i < 10; i++)
+		bnx2x_lb_pckt(bp);
+
+	/* Wait until NIG register shows 10 + 1
+	   packets of size 11*0x10 = 0xb0 */
+	count = 1000 * factor;
+	while (count) {
+
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+		if (val == 0xb0)
+			break;
+
+		usleep_range(10000, 20000);
+		count--;
+	}
+	if (val != 0xb0) {
+		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+		return -3;
+	}
+
+	/* Wait until PRS register shows 2 packets */
+	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+	if (val != 2)
+		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
+
+	/* Write 1 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+	/* Wait until PRS register shows 3 packets */
+	msleep(10 * factor);
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+	if (val != 3)
+		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
+
+	/* clear NIG EOP FIFO */
+	for (i = 0; i < 11; i++)
+		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+	if (val != 1) {
+		BNX2X_ERR("clear of NIG failed\n");
+		return -4;
+	}
+
+	/* Reset and init BRB, PRS, NIG */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+	if (!CNIC_SUPPORT(bp))
+		/* set NIC mode */
+		REG_WR(bp, PRS_REG_NIC_MODE, 1);
+
+	/* Enable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+	DP(NETIF_MSG_HW, "done\n");
+
+	return 0; /* OK */
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
+{
+	u32 val;
+
+	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+	else
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+	/*
+	 * mask read length error interrupts in brb for parser
+	 * (parsing unit and 'checksum and crc' unit)
+	 * these errors are legal (PU reads fixed length and CAC can cause
+	 * read length error on truncated packets)
+	 */
+	REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+	val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
+		PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
+		PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
+	if (!CHIP_IS_E1x(bp))
+		val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
+			PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
+	REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
+
+	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+	if (!CHIP_IS_E1x(bp))
+		/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+		REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
+	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
+}
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+	u32 val = 0x1400;
+
+	/* reset_common */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       0xd3ffff7f);
+
+	if (CHIP_IS_E3(bp)) {
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+	bp->dmae_ready = 0;
+	spin_lock_init(&bp->dmae_lock);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+	u16 devctl;
+	int r_order, w_order;
+
+	pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
+	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+	if (bp->mrrs == -1)
+		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+	else {
+		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+		r_order = bp->mrrs;
+	}
+
+	bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+	int is_required;
+	u32 val;
+	int port;
+
+	if (BP_NOMCP(bp))
+		return;
+
+	is_required = 0;
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+	      SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+		is_required = 1;
+
+	/*
+	 * The fan failure mechanism is usually related to the PHY type since
+	 * the power consumption of the board is affected by the PHY. Currently,
+	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+	 */
+	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+		for (port = PORT_0; port < PORT_MAX; port++) {
+			is_required |=
+				bnx2x_fan_failure_det_req(
+					bp,
+					bp->common.shmem_base,
+					bp->common.shmem2_base,
+					port);
+		}
+
+	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+	if (is_required == 0)
+		return;
+
+	/* Fan failure is indicated by SPIO 5 */
+	bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
+
+	/* set to active low mode */
+	val = REG_RD(bp, MISC_REG_SPIO_INT);
+	val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
+	REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+	/* enable interrupt to signal the IGU */
+	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+	val |= MISC_SPIO_SPIO5;
+	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+void bnx2x_pf_disable(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+	val &= ~IGU_PF_CONF_FUNC_EN;
+
+	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+	REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static void bnx2x__common_init_phy(struct bnx2x *bp)
+{
+	u32 shmem_base[2], shmem2_base[2];
+	/* Avoid common init in case MFW supports LFA */
+	if (SHMEM2_RD(bp, size) >
+	    (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+		return;
+	shmem_base[0] =  bp->common.shmem_base;
+	shmem2_base[0] = bp->common.shmem2_base;
+	if (!CHIP_IS_E1x(bp)) {
+		shmem_base[1] =
+			SHMEM2_RD(bp, other_shmem_base_addr);
+		shmem2_base[1] =
+			SHMEM2_RD(bp, other_shmem2_base_addr);
+	}
+	bnx2x_acquire_phy_lock(bp);
+	bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+			      bp->common.chip_id);
+	bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+	/* make sure this value is 0 */
+	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+	bnx2x_config_endianity(bp, 1);
+#else
+	bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+	bnx2x_config_endianity(bp, 0);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp:		driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+	u32 val;
+
+	DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
+
+	/*
+	 * take the RESET lock to protect undi_unload flow from accessing
+	 * registers while we're resetting the chip
+	 */
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+	bnx2x_reset_common(bp);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+
+	val = 0xfffc;
+	if (CHIP_IS_E3(bp)) {
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		u8 abs_func_id;
+
+		/**
+		 * 4-port mode or 2-port mode we need to turn of master-enable
+		 * for everyone, after that, turn it back on for self.
+		 * so, we disregard multi-function or not, and always disable
+		 * for all functions on the given path, this means 0,2,4,6 for
+		 * path 0 and 1,3,5,7 for path 1
+		 */
+		for (abs_func_id = BP_PATH(bp);
+		     abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+			if (abs_func_id == BP_ABS_FUNC(bp)) {
+				REG_WR(bp,
+				    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+				    1);
+				continue;
+			}
+
+			bnx2x_pretend_func(bp, abs_func_id);
+			/* clear pf enable */
+			bnx2x_pf_disable(bp);
+			bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
+	if (CHIP_IS_E1(bp)) {
+		/* enable HW interrupt from PXP on USDM overflow
+		   bit 16 on INT_MASK_0 */
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+	}
+
+	bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
+	bnx2x_init_pxp(bp);
+	bnx2x_set_endianity(bp);
+	bnx2x_ilt_init_page_size(bp, INITOP_SET);
+
+	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+	/* let the HW do it's magic ... */
+	msleep(100);
+	/* finish PXP init */
+	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+	if (val != 1) {
+		BNX2X_ERR("PXP2 CFG failed\n");
+		return -EBUSY;
+	}
+	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+	if (val != 1) {
+		BNX2X_ERR("PXP2 RD_INIT failed\n");
+		return -EBUSY;
+	}
+
+	/* Timers bug workaround E2 only. We need to set the entire ILT to
+	 * have entries with value "0" and valid bit on.
+	 * This needs to be done by the first PF that is loaded in a path
+	 * (i.e. common phase)
+	 */
+	if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ *	1.  First PF driver which loads on a path will:
+ *		a.  After taking the chip out of reset, by using pretend,
+ *		    it will write "0" to the following registers of
+ *		    the other vnics.
+ *		    REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ *		    REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ *		    REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ *		    And for itself it will write '1' to
+ *		    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ *		    dmae-operations (writing to pram for example.)
+ *		    note: can be done for only function 6,7 but cleaner this
+ *			  way.
+ *		b.  Write zero+valid to the entire ILT.
+ *		c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ *		    VNIC3 (of that port). The range allocated will be the
+ *		    entire ILT. This is needed to prevent  ILT range error.
+ *	2.  Any PF driver load flow:
+ *		a.  ILT update with the physical addresses of the allocated
+ *		    logical pages.
+ *		b.  Wait 20msec. - note that this timeout is needed to make
+ *		    sure there are no requests in one of the PXP internal
+ *		    queues with "old" ILT addresses.
+ *		c.  PF enable in the PGLC.
+ *		d.  Clear the was_error of the PF in the PGLC. (could have
+ *		    occurred while driver was down)
+ *		e.  PF enable in the CFC (WEAK + STRONG)
+ *		f.  Timers scan enable
+ *	3.  PF driver unload flow:
+ *		a.  Clear the Timers scan_en.
+ *		b.  Polling for scan_on=0 for that PF.
+ *		c.  Clear the PF enable bit in the PXP.
+ *		d.  Clear the PF enable in the CFC (WEAK + STRONG)
+ *		e.  Write zero+valid to all ILT entries (The valid bit must
+ *		    stay set)
+ *		f.  If this is VNIC 3 of a port then also init
+ *		    first_timers_ilt_entry to zero and last_timers_ilt_entry
+ *		    to the last entry in the ILT.
+ *
+ *	Notes:
+ *	Currently the PF error in the PGLC is non recoverable.
+ *	In the future the there will be a recovery routine for this error.
+ *	Currently attention is masked.
+ *	Having an MCP lock on the load/unload process does not guarantee that
+ *	there is no Timer disable during Func6/7 enable. This is because the
+ *	Timers scan is currently being cleared by the MCP on FLR.
+ *	Step 2.d can be done only for PF6/7 and the driver can also check if
+ *	there is error before clearing it. But the flow above is simpler and
+ *	more general.
+ *	All ILT entries are written by zero+valid and not just PF6/7
+ *	ILT entries since in the future the ILT entries allocation for
+ *	PF-s might be dynamic.
+ */
+		struct ilt_client_info ilt_cli;
+		struct bnx2x_ilt ilt;
+		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+		memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+		/* initialize dummy TM client */
+		ilt_cli.start = 0;
+		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+		ilt_cli.client_num = ILT_CLIENT_TM;
+
+		/* Step 1: set zeroes to all ilt page entries with valid bit on
+		 * Step 2: set the timers first/last ilt entry to point
+		 * to the entire range to prevent ILT range error for 3rd/4th
+		 * vnic	(this code assumes existence of the vnic)
+		 *
+		 * both steps performed by call to bnx2x_ilt_client_init_op()
+		 * with dummy TM client
+		 *
+		 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+		 * and his brother are split registers
+		 */
+		bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+		bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+	}
+
+	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+	if (!CHIP_IS_E1x(bp)) {
+		int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+				(CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+		bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
+
+		bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
+
+		/* let the HW do it's magic ... */
+		do {
+			msleep(200);
+			val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+		} while (factor-- && (val != 1));
+
+		if (val != 1) {
+			BNX2X_ERR("ATC_INIT failed\n");
+			return -EBUSY;
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+
+	bnx2x_iov_init_dmae(bp);
+
+	/* clean the DMAE memory */
+	bp->dmae_ready = 1;
+	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+	bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
+
+	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+	bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
+
+	/* QM queues pointers table */
+	bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
+
+	/* soft reset pulse */
+	REG_WR(bp, QM_REG_SOFT_RESET, 1);
+	REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+	if (CNIC_SUPPORT(bp))
+		bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
+
+	if (!CHIP_REV_IS_SLOW(bp))
+		/* enable hw interrupt from doorbell Q */
+		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+
+	if (!CHIP_IS_E1(bp))
+		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure that VNTag and VLAN headers must be
+			 * received in afex mode
+			 */
+			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+			REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
+			REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+			REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+			REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
+		} else {
+			/* Bit-map indicating which L2 hdrs may appear
+			 * after the basic Ethernet header
+			 */
+			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+			       bp->path_has_ovlan ? 7 : 6);
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* reset VFC memories */
+		REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+			   VFC_MEMORIES_RST_REG_CAM_RST |
+			   VFC_MEMORIES_RST_REG_RAM_RST);
+		REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+			   VFC_MEMORIES_RST_REG_CAM_RST |
+			   VFC_MEMORIES_RST_REG_RAM_RST);
+
+		msleep(20);
+	}
+
+	bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
+
+	/* sync semi rtc */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       0x80000000);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+	       0x80000000);
+
+	bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure that VNTag and VLAN headers must be
+			 * sent in afex mode
+			 */
+			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+			REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
+			REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+			REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+			REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
+		} else {
+			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+			       bp->path_has_ovlan ? 7 : 6);
+		}
+	}
+
+	REG_WR(bp, SRC_REG_SOFT_RST, 1);
+
+	bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
+	if (CNIC_SUPPORT(bp)) {
+		REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+		REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+		REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+		REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+		REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+		REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+		REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+		REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+		REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+		REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+	}
+	REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+	if (sizeof(union cdu_context) != 1024)
+		/* we currently assume that a context is 1024 bytes */
+		dev_alert(&bp->pdev->dev,
+			  "please adjust the size of cdu_context(%ld)\n",
+			  (long)sizeof(union cdu_context));
+
+	bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
+	val = (4 << 24) + (0 << 12) + 1024;
+	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+	bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
+	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+	/* enable context validation interrupt from CFC */
+	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+	/* set the thresholds to prevent CFC/CDU race */
+	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+	bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
+		REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+	bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
+
+	/* Reset PCIE errors for debug */
+	REG_WR(bp, 0x2814, 0xffffffff);
+	REG_WR(bp, 0x3820, 0xffffffff);
+
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+			   (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+				PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+			   (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+			   (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+	}
+
+	bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
+	if (!CHIP_IS_E1(bp)) {
+		/* in E3 this done in per-port section */
+		if (!CHIP_IS_E3(bp))
+			REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+	}
+	if (CHIP_IS_E1H(bp))
+		/* not applicable for E2 (and above ...) */
+		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
+
+	if (CHIP_REV_IS_SLOW(bp))
+		msleep(200);
+
+	/* finish CFC init */
+	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC LL_INIT failed\n");
+		return -EBUSY;
+	}
+	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC AC_INIT failed\n");
+		return -EBUSY;
+	}
+	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC CAM_INIT failed\n");
+		return -EBUSY;
+	}
+	REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+	if (CHIP_IS_E1(bp)) {
+		/* read NIG statistic
+		   to see if this is our first up since powerup */
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+
+		/* do internal memory self test */
+		if ((val == 0) && bnx2x_int_mem_test(bp)) {
+			BNX2X_ERR("internal mem self test failed\n");
+			return -EBUSY;
+		}
+	}
+
+	bnx2x_setup_fan_failure_detection(bp);
+
+	/* clear PXP2 attentions */
+	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+	bnx2x_enable_blocks_attention(bp);
+	bnx2x_enable_blocks_parity(bp);
+
+	if (!BP_NOMCP(bp)) {
+		if (CHIP_IS_E1x(bp))
+			bnx2x__common_init_phy(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+	if (SHMEM2_HAS(bp, netproc_fw_ver))
+		SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+	return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp:		driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+	int rc = bnx2x_init_hw_common(bp);
+
+	if (rc)
+		return rc;
+
+	/* In E2 2-PORT mode, same ext phy is used for the two paths */
+	if (!BP_NOMCP(bp))
+		bnx2x__common_init_phy(bp);
+
+	return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+	u32 low, high;
+	u32 val, reg;
+
+	DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
+
+	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+	/* Timers bug workaround: disables the pf_master bit in pglue at
+	 * common phase, we need to enable it here before any dmae access are
+	 * attempted. Therefore we manually added the enable-master to the
+	 * port phase (it also happens in the function phase)
+	 */
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+	bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+
+	/* QM cid (connection) count */
+	bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+
+	if (CNIC_SUPPORT(bp)) {
+		bnx2x_init_block(bp, BLOCK_TM, init_phase);
+		REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+		REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+	}
+
+	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+	if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+
+		if (IS_MF(bp))
+			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+		else if (bp->dev->mtu > 4096) {
+			if (bp->flags & ONE_PORT_FLAG)
+				low = 160;
+			else {
+				val = bp->dev->mtu;
+				/* (24*1024 + val*4)/256 */
+				low = 96 + (val/64) +
+						((val % 64) ? 1 : 0);
+			}
+		} else
+			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+		high = low + 56;	/* 14*1024/256 */
+		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+	}
+
+	if (CHIP_MODE_IS_4_PORT(bp))
+		REG_WR(bp, (BP_PORT(bp) ?
+			    BRB1_REG_MAC_GUARANTIED_1 :
+			    BRB1_REG_MAC_GUARANTIED_0), 40);
+
+	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+	if (CHIP_IS_E3B0(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure headers for AFEX mode */
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
+			       PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_MUST_HAVE_HDRS_PORT_1 :
+			       PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+		} else {
+			/* Ovlan exists only if we are in multi-function +
+			 * switch-dependent mode, in switch-independent there
+			 * is no ovlan headers
+			 */
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+			       (bp->path_has_ovlan ? 7 : 6));
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+	if (CHIP_IS_E1x(bp)) {
+		/* configure PBF to work without PAUSE mtu 9000 */
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+		/* update threshold */
+		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+		/* update init credit */
+		REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+		/* probe changes */
+		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+		udelay(50);
+		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+	}
+
+	if (CNIC_SUPPORT(bp))
+		bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+	if (CHIP_IS_E1(bp)) {
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+	}
+	bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+	/* init aeu_mask_attn_func_0/1:
+	 *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
+	 *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
+	 *             bits 4-7 are used for "per vn group attention" */
+	val = IS_MF(bp) ? 0xF7 : 0x7;
+	/* Enable DCBX attention for all but E1 */
+	val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+
+	/* SCPAD_PARITY should NOT trigger close the gates */
+	reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+	REG_WR(bp, reg,
+	       REG_RD(bp, reg) &
+	       ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+	reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+	REG_WR(bp, reg,
+	       REG_RD(bp, reg) &
+	       ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* Bit-map indicating which L2 hdrs may appear after the
+		 * basic Ethernet header
+		 */
+		if (IS_MF_AFEX(bp))
+			REG_WR(bp, BP_PORT(bp) ?
+			       NIG_REG_P1_HDRS_AFTER_BASIC :
+			       NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+		else
+			REG_WR(bp, BP_PORT(bp) ?
+			       NIG_REG_P1_HDRS_AFTER_BASIC :
+			       NIG_REG_P0_HDRS_AFTER_BASIC,
+			       IS_MF_SD(bp) ? 7 : 6);
+
+		if (CHIP_IS_E3(bp))
+			REG_WR(bp, BP_PORT(bp) ?
+				   NIG_REG_LLH1_MF_MODE :
+				   NIG_REG_LLH_MF_MODE, IS_MF(bp));
+	}
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+	if (!CHIP_IS_E1(bp)) {
+		/* 0x2 disable mf_ov, 0x1 enable */
+		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+		       (IS_MF_SD(bp) ? 0x1 : 0x2));
+
+		if (!CHIP_IS_E1x(bp)) {
+			val = 0;
+			switch (bp->mf_mode) {
+			case MULTI_FUNCTION_SD:
+				val = 1;
+				break;
+			case MULTI_FUNCTION_SI:
+			case MULTI_FUNCTION_AFEX:
+				val = 2;
+				break;
+			}
+
+			REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+						  NIG_REG_LLH0_CLS_TYPE), val);
+		}
+		{
+			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+		}
+	}
+
+	/* If SPIO5 is set to generate interrupts, enable it for this port */
+	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+	if (val & MISC_SPIO_SPIO5) {
+		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+		val = REG_RD(bp, reg_addr);
+		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+		REG_WR(bp, reg_addr, val);
+	}
+
+	return 0;
+}
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+	int reg;
+	u32 wb_write[2];
+
+	if (CHIP_IS_E1(bp))
+		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+	else
+		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+
+	wb_write[0] = ONCHIP_ADDR1(addr);
+	wb_write[1] = ONCHIP_ADDR2(addr);
+	REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
+{
+	u32 data, ctl, cnt = 100;
+	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+	u32 sb_bit =  1 << (idu_sb_id%32);
+	u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+	/* Not supported in BC mode */
+	if (CHIP_INT_MODE_IS_BC(bp))
+		return;
+
+	data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+			<< IGU_REGULAR_CLEANUP_TYPE_SHIFT)	|
+		IGU_REGULAR_CLEANUP_SET				|
+		IGU_REGULAR_BCLEANUP;
+
+	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
+	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
+	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+			 data, igu_addr_data);
+	REG_WR(bp, igu_addr_data, data);
+	mmiowb();
+	barrier();
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+			  ctl, igu_addr_ctl);
+	REG_WR(bp, igu_addr_ctl, ctl);
+	mmiowb();
+	barrier();
+
+	/* wait for clean up to finish */
+	while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+		msleep(20);
+
+	if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+		DP(NETIF_MSG_HW,
+		   "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
+			  idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+	}
+}
+
+static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+	bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
+}
+
+static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+	u32 i, base = FUNC_ILT_BASE(func);
+	for (i = base; i < base + ILT_PER_FUNC; i++)
+		bnx2x_ilt_wr(bp, i, 0);
+}
+
+static void bnx2x_init_searcher(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+	/* T1 hash bits value determines the T1 number of entries */
+	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+}
+
+static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
+{
+	int rc;
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_switch_update_params *switch_update_params =
+		&func_params.params.switch_update;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+	/* Function parameters */
+	__set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+		  &switch_update_params->changes);
+	if (suspend)
+		__set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+			  &switch_update_params->changes);
+
+	rc = bnx2x_func_state_change(bp, &func_params);
+
+	return rc;
+}
+
+static int bnx2x_reset_nic_mode(struct bnx2x *bp)
+{
+	int rc, i, port = BP_PORT(bp);
+	int vlan_en = 0, mac_en[NUM_MACS];
+
+	/* Close input from network */
+	if (bp->mf_mode == SINGLE_FUNCTION) {
+		bnx2x_set_rx_filter(&bp->link_params, 0);
+	} else {
+		vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
+				   NIG_REG_LLH0_FUNC_EN);
+		REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+			  NIG_REG_LLH0_FUNC_EN, 0);
+		for (i = 0; i < NUM_MACS; i++) {
+			mac_en[i] = REG_RD(bp, port ?
+					     (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+					      4 * i) :
+					     (NIG_REG_LLH0_FUNC_MEM_ENABLE +
+					      4 * i));
+			REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+					      4 * i) :
+				  (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
+		}
+	}
+
+	/* Close BMC to host */
+	REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+	       NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
+
+	/* Suspend Tx switching to the PF. Completion of this ramrod
+	 * further guarantees that all the packets of that PF / child
+	 * VFs in BRB were processed by the Parser, so it is safe to
+	 * change the NIC_MODE register.
+	 */
+	rc = bnx2x_func_switch_update(bp, 1);
+	if (rc) {
+		BNX2X_ERR("Can't suspend tx-switching!\n");
+		return rc;
+	}
+
+	/* Change NIC_MODE register */
+	REG_WR(bp, PRS_REG_NIC_MODE, 0);
+
+	/* Open input from network */
+	if (bp->mf_mode == SINGLE_FUNCTION) {
+		bnx2x_set_rx_filter(&bp->link_params, 1);
+	} else {
+		REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+			  NIG_REG_LLH0_FUNC_EN, vlan_en);
+		for (i = 0; i < NUM_MACS; i++) {
+			REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+					      4 * i) :
+				  (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
+				  mac_en[i]);
+		}
+	}
+
+	/* Enable BMC to host */
+	REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+	       NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
+
+	/* Resume Tx switching to the PF */
+	rc = bnx2x_func_switch_update(bp, 0);
+	if (rc) {
+		BNX2X_ERR("Can't resume tx-switching!\n");
+		return rc;
+	}
+
+	DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+	return 0;
+}
+
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
+{
+	int rc;
+
+	bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
+
+	if (CONFIGURE_NIC_MODE(bp)) {
+		/* Configure searcher as part of function hw init */
+		bnx2x_init_searcher(bp);
+
+		/* Reset NIC mode */
+		rc = bnx2x_reset_nic_mode(bp);
+		if (rc)
+			BNX2X_ERR("Can't change NIC mode!\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
+{
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+		       1 << BP_ABS_FUNC(bp));
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int init_phase = PHASE_PF0 + func;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	u16 cdu_ilt_start;
+	u32 addr, val;
+	u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+	int i, main_mem_width, rc;
+
+	DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
+
+	/* FLR cleanup - hmmm */
+	if (!CHIP_IS_E1x(bp)) {
+		rc = bnx2x_pf_flr_clnup(bp);
+		if (rc) {
+			bnx2x_fw_dump(bp);
+			return rc;
+		}
+	}
+
+	/* set MSI reconfigure capability */
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+		val = REG_RD(bp, addr);
+		val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+		REG_WR(bp, addr, val);
+	}
+
+	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+	ilt = BP_ILT(bp);
+	cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+	if (IS_SRIOV(bp))
+		cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+	cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+	/* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+	 * those of the VFs, so start line should be reset
+	 */
+	cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+	for (i = 0; i < L2_ILT_LINES(bp); i++) {
+		ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
+		ilt->lines[cdu_ilt_start + i].page_mapping =
+			bp->context[i].cxt_mapping;
+		ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
+	}
+
+	bnx2x_ilt_init_op(bp, INITOP_SET);
+
+	if (!CONFIGURE_NIC_MODE(bp)) {
+		bnx2x_init_searcher(bp);
+		REG_WR(bp, PRS_REG_NIC_MODE, 0);
+		DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+	} else {
+		/* Set NIC mode */
+		REG_WR(bp, PRS_REG_NIC_MODE, 1);
+		DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
+	}
+
+	if (!CHIP_IS_E1x(bp)) {
+		u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+		/* Turn on a single ISR mode in IGU if driver is going to use
+		 * INT#x or MSI
+		 */
+		if (!(bp->flags & USING_MSIX_FLAG))
+			pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+		/*
+		 * Timers workaround bug: function init part.
+		 * Need to wait 20msec after initializing ILT,
+		 * needed to make sure there are no requests in
+		 * one of the PXP internal queues with "old" ILT addresses
+		 */
+		msleep(20);
+		/*
+		 * Master enable - Due to WB DMAE writes performed before this
+		 * register is re-initialized as part of the regular function
+		 * init
+		 */
+		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+		/* Enable the function in IGU */
+		REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
+	}
+
+	bp->dmae_ready = 1;
+
+	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+
+	bnx2x_clean_pglue_errors(bp);
+
+	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, QM_REG_PF_EN, 1);
+
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+	}
+	bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TM, init_phase);
+	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+	REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
+
+	bnx2x_iov_init_dq(bp);
+
+	bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+	if (IS_MF(bp)) {
+		if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
+			REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+			REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
+			       bp->mf_ov);
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+
+	/* HC init per function */
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		if (CHIP_IS_E1H(bp)) {
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+			REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+			REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+		}
+		bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+	} else {
+		int num_segs, sb_idx, prod_offset;
+
+		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+		if (!CHIP_IS_E1x(bp)) {
+			REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+			REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+		}
+
+		bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+		if (!CHIP_IS_E1x(bp)) {
+			int dsb_idx = 0;
+			/**
+			 * Producer memory:
+			 * E2 mode: address 0-135 match to the mapping memory;
+			 * 136 - PF0 default prod; 137 - PF1 default prod;
+			 * 138 - PF2 default prod; 139 - PF3 default prod;
+			 * 140 - PF0 attn prod;    141 - PF1 attn prod;
+			 * 142 - PF2 attn prod;    143 - PF3 attn prod;
+			 * 144-147 reserved.
+			 *
+			 * E1.5 mode - In backward compatible mode;
+			 * for non default SB; each even line in the memory
+			 * holds the U producer and each odd line hold
+			 * the C producer. The first 128 producers are for
+			 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+			 * producers are for the DSB for each PF.
+			 * Each PF has five segments: (the order inside each
+			 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+			 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+			 * 144-147 attn prods;
+			 */
+			/* non-default-status-blocks */
+			num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+				IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+			for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+				prod_offset = (bp->igu_base_sb + sb_idx) *
+					num_segs;
+
+				for (i = 0; i < num_segs; i++) {
+					addr = IGU_REG_PROD_CONS_MEMORY +
+							(prod_offset + i) * 4;
+					REG_WR(bp, addr, 0);
+				}
+				/* send consumer update with value 0 */
+				bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+					     USTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_igu_clear_sb(bp,
+						   bp->igu_base_sb + sb_idx);
+			}
+
+			/* default-status-blocks */
+			num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+				IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+			if (CHIP_MODE_IS_4_PORT(bp))
+				dsb_idx = BP_FUNC(bp);
+			else
+				dsb_idx = BP_VN(bp);
+
+			prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+				       IGU_BC_BASE_DSB_PROD + dsb_idx :
+				       IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+			/*
+			 * igu prods come in chunks of E1HVN_MAX (4) -
+			 * does not matters what is the current chip mode
+			 */
+			for (i = 0; i < (num_segs * E1HVN_MAX);
+			     i += E1HVN_MAX) {
+				addr = IGU_REG_PROD_CONS_MEMORY +
+							(prod_offset + i)*4;
+				REG_WR(bp, addr, 0);
+			}
+			/* send consumer update with 0 */
+			if (CHIP_INT_MODE_IS_BC(bp)) {
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     USTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     CSTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     XSTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     TSTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     ATTENTION_ID, 0, IGU_INT_NOP, 1);
+			} else {
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     USTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     ATTENTION_ID, 0, IGU_INT_NOP, 1);
+			}
+			bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+			/* !!! These should become driver const once
+			   rf-tool supports split-68 const */
+			REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+			REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+			REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+			REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+			REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+			REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+		}
+	}
+
+	/* Reset PCIE errors for debug */
+	REG_WR(bp, 0x2114, 0xffffffff);
+	REG_WR(bp, 0x2120, 0xffffffff);
+
+	if (CHIP_IS_E1x(bp)) {
+		main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+		main_mem_base = HC_REG_MAIN_MEMORY +
+				BP_PORT(bp) * (main_mem_size * 4);
+		main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+		main_mem_width = 8;
+
+		val = REG_RD(bp, main_mem_prty_clr);
+		if (val)
+			DP(NETIF_MSG_HW,
+			   "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
+			   val);
+
+		/* Clear "false" parity errors in MSI-X table */
+		for (i = main_mem_base;
+		     i < main_mem_base + main_mem_size * 4;
+		     i += main_mem_width) {
+			bnx2x_read_dmae(bp, i, main_mem_width / 4);
+			bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+					 i, main_mem_width / 4);
+		}
+		/* Clear HC parity attention */
+		REG_RD(bp, main_mem_prty_clr);
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	/* Enable STORMs SP logging */
+	REG_WR8(bp, BAR_USTRORM_INTMEM +
+	       USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
+	bnx2x_phy_probe(&bp->link_params);
+
+	return 0;
+}
+
+void bnx2x_free_mem_cnic(struct bnx2x *bp)
+{
+	bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
+
+	if (!CHIP_IS_E1x(bp))
+		BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+			       sizeof(struct host_hc_status_block_e2));
+	else
+		BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+			       sizeof(struct host_hc_status_block_e1x));
+
+	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+}
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+	int i;
+
+	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+	if (IS_VF(bp))
+		return;
+
+	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+		       sizeof(struct host_sp_status_block));
+
+	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+		       sizeof(struct bnx2x_slowpath));
+
+	for (i = 0; i < L2_ILT_LINES(bp); i++)
+		BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+			       bp->context[i].size);
+	bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+	BNX2X_FREE(bp->ilt->lines);
+
+	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+	BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+		       BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+
+	bnx2x_iov_free_mem(bp);
+}
+
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
+{
+	if (!CHIP_IS_E1x(bp)) {
+		/* size = the status block + ramrod buffers */
+		bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+						    sizeof(struct host_hc_status_block_e2));
+		if (!bp->cnic_sb.e2_sb)
+			goto alloc_mem_err;
+	} else {
+		bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+						     sizeof(struct host_hc_status_block_e1x));
+		if (!bp->cnic_sb.e1x_sb)
+			goto alloc_mem_err;
+	}
+
+	if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
+		/* allocate searcher T2 table, as it wasn't allocated before */
+		bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+		if (!bp->t2)
+			goto alloc_mem_err;
+	}
+
+	/* write address to which L5 should insert its values */
+	bp->cnic_eth_dev.addr_drv_info_to_mcp =
+		&bp->slowpath->drv_info_to_mcp;
+
+	if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
+		goto alloc_mem_err;
+
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_mem_cnic(bp);
+	BNX2X_ERR("Can't allocate memory\n");
+	return -ENOMEM;
+}
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+	int i, allocated, context_size;
+
+	if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
+		/* allocate searcher T2 table */
+		bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+		if (!bp->t2)
+			goto alloc_mem_err;
+	}
+
+	bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+					     sizeof(struct host_sp_status_block));
+	if (!bp->def_status_blk)
+		goto alloc_mem_err;
+
+	bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+				       sizeof(struct bnx2x_slowpath));
+	if (!bp->slowpath)
+		goto alloc_mem_err;
+
+	/* Allocate memory for CDU context:
+	 * This memory is allocated separately and not in the generic ILT
+	 * functions because CDU differs in few aspects:
+	 * 1. There are multiple entities allocating memory for context -
+	 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+	 * its own ILT lines.
+	 * 2. Since CDU page-size is not a single 4KB page (which is the case
+	 * for the other ILT clients), to be efficient we want to support
+	 * allocation of sub-page-size in the last entry.
+	 * 3. Context pointers are used by the driver to pass to FW / update
+	 * the context (for the other ILT clients the pointers are used just to
+	 * free the memory during unload).
+	 */
+	context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+
+	for (i = 0, allocated = 0; allocated < context_size; i++) {
+		bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+					  (context_size - allocated));
+		bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+						      bp->context[i].size);
+		if (!bp->context[i].vcxt)
+			goto alloc_mem_err;
+		allocated += bp->context[i].size;
+	}
+	bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+				 GFP_KERNEL);
+	if (!bp->ilt->lines)
+		goto alloc_mem_err;
+
+	if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+		goto alloc_mem_err;
+
+	if (bnx2x_iov_alloc_mem(bp))
+		goto alloc_mem_err;
+
+	/* Slow path ring */
+	bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+	if (!bp->spq)
+		goto alloc_mem_err;
+
+	/* EQ */
+	bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+				      BCM_PAGE_SIZE * NUM_EQ_PAGES);
+	if (!bp->eq_ring)
+		goto alloc_mem_err;
+
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_mem(bp);
+	BNX2X_ERR("Can't allocate memory\n");
+	return -ENOMEM;
+}
+
+/*
+ * Init service functions
+ */
+
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+		      struct bnx2x_vlan_mac_obj *obj, bool set,
+		      int mac_type, unsigned long *ramrod_flags)
+{
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
+
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
+
+		__set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+	}
+
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+	if (rc == -EEXIST) {
+		DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+		/* do not treat adding same MAC as error */
+		rc = 0;
+	} else if (rc < 0)
+		BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+
+	return rc;
+}
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags)
+{
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
+
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		ramrod_param.user_req.u.vlan.vlan = vlan;
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+	}
+
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+	if (rc == -EEXIST) {
+		/* Do not treat adding same vlan as error. */
+		DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+		rc = 0;
+	} else if (rc < 0) {
+		BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+	}
+
+	return rc;
+}
+
+int bnx2x_del_all_macs(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_obj *mac_obj,
+		       int mac_type, bool wait_for_comp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+	/* Wait for completion of requested */
+	if (wait_for_comp)
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+	/* Set the mac type of addresses we want to clear */
+	__set_bit(mac_type, &vlan_mac_flags);
+
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+	if (rc < 0)
+		BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+	return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+	if (IS_PF(bp)) {
+		unsigned long ramrod_flags = 0;
+
+		DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+		return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+					 &bp->sp_objs->mac_obj, set,
+					 BNX2X_ETH_MAC, &ramrod_flags);
+	} else { /* vf */
+		return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+					     bp->fp->index, set);
+	}
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+	if (IS_PF(bp))
+		return bnx2x_setup_queue(bp, &bp->fp[0], true);
+	else /* VF */
+		return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
+}
+
+/**
+ * bnx2x_set_int_mode - configure interrupt mode
+ *
+ * @bp:		driver handle
+ *
+ * In case of MSI-X it will also try to enable MSI-X.
+ */
+int bnx2x_set_int_mode(struct bnx2x *bp)
+{
+	int rc = 0;
+
+	if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+		BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
+		return -EINVAL;
+	}
+
+	switch (int_mode) {
+	case BNX2X_INT_MODE_MSIX:
+		/* attempt to enable msix */
+		rc = bnx2x_enable_msix(bp);
+
+		/* msix attained */
+		if (!rc)
+			return 0;
+
+		/* vfs use only msix */
+		if (rc && IS_VF(bp))
+			return rc;
+
+		/* failed to enable multiple MSI-X */
+		BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+			       bp->num_queues,
+			       1 + bp->num_cnic_queues);
+
+		/* falling through... */
+	case BNX2X_INT_MODE_MSI:
+		bnx2x_enable_msi(bp);
+
+		/* falling through... */
+	case BNX2X_INT_MODE_INTX:
+		bp->num_ethernet_queues = 1;
+		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+		BNX2X_DEV_INFO("set number of queues to 1\n");
+		break;
+	default:
+		BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* must be called prior to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
+{
+	if (IS_SRIOV(bp))
+		return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
+	return L2_ILT_LINES(bp);
+}
+
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+	struct ilt_client_info *ilt_client;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	u16 line = 0;
+
+	ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+	DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+
+	/* CDU */
+	ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+	ilt_client->client_num = ILT_CLIENT_CDU;
+	ilt_client->page_size = CDU_ILT_PAGE_SZ;
+	ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+	ilt_client->start = line;
+	line += bnx2x_cid_ilt_lines(bp);
+
+	if (CNIC_SUPPORT(bp))
+		line += CNIC_ILT_LINES;
+	ilt_client->end = line - 1;
+
+	DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+	   ilt_client->start,
+	   ilt_client->end,
+	   ilt_client->page_size,
+	   ilt_client->flags,
+	   ilog2(ilt_client->page_size >> 12));
+
+	/* QM */
+	if (QM_INIT(bp->qm_cid_count)) {
+		ilt_client = &ilt->clients[ILT_CLIENT_QM];
+		ilt_client->client_num = ILT_CLIENT_QM;
+		ilt_client->page_size = QM_ILT_PAGE_SZ;
+		ilt_client->flags = 0;
+		ilt_client->start = line;
+
+		/* 4 bytes for each cid */
+		line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+							 QM_ILT_PAGE_SZ);
+
+		ilt_client->end = line - 1;
+
+		DP(NETIF_MSG_IFUP,
+		   "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+		   ilt_client->start,
+		   ilt_client->end,
+		   ilt_client->page_size,
+		   ilt_client->flags,
+		   ilog2(ilt_client->page_size >> 12));
+	}
+
+	if (CNIC_SUPPORT(bp)) {
+		/* SRC */
+		ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+		ilt_client->client_num = ILT_CLIENT_SRC;
+		ilt_client->page_size = SRC_ILT_PAGE_SZ;
+		ilt_client->flags = 0;
+		ilt_client->start = line;
+		line += SRC_ILT_LINES;
+		ilt_client->end = line - 1;
+
+		DP(NETIF_MSG_IFUP,
+		   "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+		   ilt_client->start,
+		   ilt_client->end,
+		   ilt_client->page_size,
+		   ilt_client->flags,
+		   ilog2(ilt_client->page_size >> 12));
+
+		/* TM */
+		ilt_client = &ilt->clients[ILT_CLIENT_TM];
+		ilt_client->client_num = ILT_CLIENT_TM;
+		ilt_client->page_size = TM_ILT_PAGE_SZ;
+		ilt_client->flags = 0;
+		ilt_client->start = line;
+		line += TM_ILT_LINES;
+		ilt_client->end = line - 1;
+
+		DP(NETIF_MSG_IFUP,
+		   "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+		   ilt_client->start,
+		   ilt_client->end,
+		   ilt_client->page_size,
+		   ilt_client->flags,
+		   ilog2(ilt_client->page_size >> 12));
+	}
+
+	BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp:			driver handle
+ * @fp:			pointer to fastpath
+ * @init_params:	pointer to parameters structure
+ *
+ * parameters configured:
+ *      - HC configuration
+ *      - Queue's CDU context
+ */
+static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+	u8 cos;
+	int cxt_index, cxt_offset;
+
+	/* FCoE Queue uses Default SB, thus has no HC capabilities */
+	if (!IS_FCOE_FP(fp)) {
+		__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+		__set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+		/* If HC is supported, enable host coalescing in the transition
+		 * to INIT state.
+		 */
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+		/* HC rate */
+		init_params->rx.hc_rate = bp->rx_ticks ?
+			(1000000 / bp->rx_ticks) : 0;
+		init_params->tx.hc_rate = bp->tx_ticks ?
+			(1000000 / bp->tx_ticks) : 0;
+
+		/* FW SB ID */
+		init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+			fp->fw_sb_id;
+
+		/*
+		 * CQ index among the SB indices: FCoE clients uses the default
+		 * SB, therefore it's different.
+		 */
+		init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+		init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+	}
+
+	/* set maximum number of COSs supported by this queue */
+	init_params->max_cos = fp->max_cos;
+
+	DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
+	    fp->index, init_params->max_cos);
+
+	/* set the context pointers queue object */
+	for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+		cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+		cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+				ILT_PAGE_CIDS);
+		init_params->cxts[cos] =
+			&bp->context[cxt_index].vcxt[cxt_offset].eth;
+	}
+}
+
+static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			struct bnx2x_queue_state_params *q_params,
+			struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+			int tx_index, bool leading)
+{
+	memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+	/* Set the command */
+	q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+	/* Set tx-only QUEUE flags: don't zero statistics */
+	tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+	/* choose the index of the cid to send the slow path on */
+	tx_only_params->cid_index = tx_index;
+
+	/* Set general TX_ONLY_SETUP parameters */
+	bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+	/* Set Tx TX_ONLY_SETUP parameters */
+	bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+	DP(NETIF_MSG_IFUP,
+	   "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
+	   tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+	   q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+	   tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+	/* send the ramrod */
+	return bnx2x_queue_state_change(bp, q_params);
+}
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to fastpath
+ * @leading:	is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ *      actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading)
+{
+	struct bnx2x_queue_state_params q_params = {NULL};
+	struct bnx2x_queue_setup_params *setup_params =
+						&q_params.params.setup;
+	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+						&q_params.params.tx_only;
+	int rc;
+	u8 tx_index;
+
+	DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
+
+	/* reset IGU state skip FCoE L2 queue */
+	if (!IS_FCOE_FP(fp))
+		bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+			     IGU_INT_ENABLE, 0);
+
+	q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+	/* We want to wait for completion in this context */
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+	/* Prepare the INIT parameters */
+	bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
+
+	/* Set the command */
+	q_params.cmd = BNX2X_Q_CMD_INIT;
+
+	/* Change the state to INIT */
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc) {
+		BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+		return rc;
+	}
+
+	DP(NETIF_MSG_IFUP, "init complete\n");
+
+	/* Now move the Queue to the SETUP state... */
+	memset(setup_params, 0, sizeof(*setup_params));
+
+	/* Set QUEUE flags */
+	setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
+
+	/* Set general SETUP parameters */
+	bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+				FIRST_TX_COS_INDEX);
+
+	bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+			    &setup_params->rxq_params);
+
+	bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+			   FIRST_TX_COS_INDEX);
+
+	/* Set the command */
+	q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+	if (IS_FCOE_FP(fp))
+		bp->fcoe_init = true;
+
+	/* Change the state to SETUP */
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc) {
+		BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
+		return rc;
+	}
+
+	/* loop through the relevant tx-only indices */
+	for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+	      tx_index < fp->max_cos;
+	      tx_index++) {
+
+		/* prepare and send tx-only ramrod*/
+		rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+					  tx_only_params, tx_index, leading);
+		if (rc) {
+			BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+				  fp->index, tx_index);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	struct bnx2x_fp_txdata *txdata;
+	struct bnx2x_queue_state_params q_params = {NULL};
+	int rc, tx_index;
+
+	DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
+
+	q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+	/* We want to wait for completion in this context */
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+	/* close tx-only connections */
+	for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+	     tx_index < fp->max_cos;
+	     tx_index++){
+
+		/* ascertain this is a normal queue*/
+		txdata = fp->txdata_ptr[tx_index];
+
+		DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
+							txdata->txq_index);
+
+		/* send halt terminate on tx-only connection */
+		q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+		memset(&q_params.params.terminate, 0,
+		       sizeof(q_params.params.terminate));
+		q_params.params.terminate.cid_index = tx_index;
+
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc)
+			return rc;
+
+		/* send halt terminate on tx-only connection */
+		q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+		memset(&q_params.params.cfc_del, 0,
+		       sizeof(q_params.params.cfc_del));
+		q_params.params.cfc_del.cid_index = tx_index;
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc)
+			return rc;
+	}
+	/* Stop the primary connection: */
+	/* ...halt the connection */
+	q_params.cmd = BNX2X_Q_CMD_HALT;
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
+		return rc;
+
+	/* ...terminate the connection */
+	q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+	memset(&q_params.params.terminate, 0,
+	       sizeof(q_params.params.terminate));
+	q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
+		return rc;
+	/* ...delete cfc entry */
+	q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+	memset(&q_params.params.cfc_del, 0,
+	       sizeof(q_params.params.cfc_del));
+	q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+	return bnx2x_queue_state_change(bp, &q_params);
+}
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int i;
+
+	/* Disable the function in the FW */
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+	/* FP SBs */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			   CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+			   SB_DISABLED);
+	}
+
+	if (CNIC_LOADED(bp))
+		/* CNIC SB */
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
+			(bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
+
+	/* SP SB */
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+		CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+		SB_DISABLED);
+
+	for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+		REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+		       0);
+
+	/* Configure IGU */
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+	} else {
+		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+	}
+
+	if (CNIC_LOADED(bp)) {
+		/* Disable Timer scan */
+		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+		/*
+		 * Wait for at least 10ms and up to 2 second for the timers
+		 * scan to complete
+		 */
+		for (i = 0; i < 200; i++) {
+			usleep_range(10000, 20000);
+			if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+				break;
+		}
+	}
+	/* Clear ILT */
+	bnx2x_clear_func_ilt(bp, func);
+
+	/* Timers workaround bug for E2: if this is vnic-3,
+	 * we need to set the entire ilt range for this timers.
+	 */
+	if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
+		struct ilt_client_info ilt_cli;
+		/* use dummy TM client */
+		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+		ilt_cli.start = 0;
+		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+		ilt_cli.client_num = ILT_CLIENT_TM;
+
+		bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+	}
+
+	/* this assumes that reset_port() called before reset_func()*/
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_pf_disable(bp);
+
+	bp->dmae_ready = 0;
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val;
+
+	/* Reset physical Link */
+	bnx2x__link_reset(bp);
+
+	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+	/* Do not rcv packets to BRB */
+	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+	/* Do not direct rcv packets that are not for MCP to the BRB */
+	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+	/* Configure AEU */
+	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+	msleep(100);
+	/* Check for BRB port occupancy */
+	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+	if (val)
+		DP(NETIF_MSG_IFDOWN,
+		   "BRB1 is not empty  %d blocks are occupied\n", val);
+
+	/* TODO: Close Doorbell port? */
+}
+
+static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_RESET;
+
+	func_params.params.hw_init.load_phase = load_code;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_func_stop(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	int rc;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_STOP;
+
+	/*
+	 * Try to stop the function the 'good way'. If fails (in case
+	 * of a parity error during bnx2x_chip_cleanup()) and we are
+	 * not in a debug mode, perform a state transaction in order to
+	 * enable further HW_RESET transaction.
+	 */
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc) {
+#ifdef BNX2X_STOP_ON_ERROR
+		return rc;
+#else
+		BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
+		__set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+		return bnx2x_func_state_change(bp, &func_params);
+#endif
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+	u32 reset_code = 0;
+	int port = BP_PORT(bp);
+
+	/* Select the UNLOAD request mode */
+	if (unload_mode == UNLOAD_NORMAL)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+	else if (bp->flags & NO_WOL_FLAG)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+	else if (bp->wol) {
+		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+		u8 *mac_addr = bp->dev->dev_addr;
+		struct pci_dev *pdev = bp->pdev;
+		u32 val;
+		u16 pmc;
+
+		/* The mac address is written to entries 1-4 to
+		 * preserve entry 0 which is used by the PMF
+		 */
+		u8 entry = (BP_VN(bp) + 1)*8;
+
+		val = (mac_addr[0] << 8) | mac_addr[1];
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		      (mac_addr[4] << 8) | mac_addr[5];
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
+		/* Enable the PME and clear the status */
+		pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
+		pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
+		pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
+
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+	} else
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+	/* Send the request to the MCP */
+	if (!BP_NOMCP(bp))
+		reset_code = bnx2x_fw_command(bp, reset_code, 0);
+	else {
+		int path = BP_PATH(bp);
+
+		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
+		   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+		   bnx2x_load_count[path][2]);
+		bnx2x_load_count[path][0]--;
+		bnx2x_load_count[path][1 + port]--;
+		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
+		   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+		   bnx2x_load_count[path][2]);
+		if (bnx2x_load_count[path][0] == 0)
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+		else if (bnx2x_load_count[path][1 + port] == 0)
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+		else
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+	}
+
+	return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ * @keep_link:		true iff link should be kept up
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
+{
+	u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
+	/* Report UNLOAD_DONE to MCP */
+	if (!BP_NOMCP(bp))
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
+}
+
+static int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+	int tout = 50;
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+	if (!bp->port.pmf)
+		return 0;
+
+	/*
+	 * (assumption: No Attention from MCP at this stage)
+	 * PMF probably in the middle of TX disable/enable transaction
+	 * 1. Sync IRS for default SB
+	 * 2. Sync SP queue - this guarantees us that attention handling started
+	 * 3. Wait, that TX disable/enable transaction completes
+	 *
+	 * 1+2 guarantee that if DCBx attention was scheduled it already changed
+	 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
+	 * received completion for the transaction the state is TX_STOPPED.
+	 * State will return to STARTED after completion of TX_STOPPED-->STARTED
+	 * transaction.
+	 */
+
+	/* make sure default SB ISR is done */
+	if (msix)
+		synchronize_irq(bp->msix_table[0].vector);
+	else
+		synchronize_irq(bp->pdev->irq);
+
+	flush_workqueue(bnx2x_wq);
+	flush_workqueue(bnx2x_iov_wq);
+
+	while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+				BNX2X_F_STATE_STARTED && tout--)
+		msleep(20);
+
+	if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+						BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+		BNX2X_ERR("Wrong function state\n");
+		return -EBUSY;
+#else
+		/*
+		 * Failed to complete the transaction in a "good way"
+		 * Force both transactions with CLR bit
+		 */
+		struct bnx2x_func_state_params func_params = {NULL};
+
+		DP(NETIF_MSG_IFDOWN,
+		   "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
+
+		func_params.f_obj = &bp->func_obj;
+		__set_bit(RAMROD_DRV_CLR_ONLY,
+					&func_params.ramrod_flags);
+
+		/* STARTED-->TX_ST0PPED */
+		func_params.cmd = BNX2X_F_CMD_TX_STOP;
+		bnx2x_func_state_change(bp, &func_params);
+
+		/* TX_ST0PPED-->STARTED */
+		func_params.cmd = BNX2X_F_CMD_TX_START;
+		return bnx2x_func_state_change(bp, &func_params);
+#endif
+	}
+
+	return 0;
+}
+
+static void bnx2x_disable_ptp(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	/* Disable sending PTP packets to host */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+	       NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+	/* Reset PTP event detection rules */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+	       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+	       NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+	/* Disable the PTP feature */
+	REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+	       NIG_REG_P0_PTP_EN, 0x0);
+}
+
+/* Called during unload, to stop PTP-related stuff */
+static void bnx2x_stop_ptp(struct bnx2x *bp)
+{
+	/* Cancel PTP work queue. Should be done after the Tx queues are
+	 * drained to prevent additional scheduling.
+	 */
+	cancel_work_sync(&bp->ptp_task);
+
+	if (bp->ptp_tx_skb) {
+		dev_kfree_skb_any(bp->ptp_tx_skb);
+		bp->ptp_tx_skb = NULL;
+	}
+
+	/* Disable PTP in HW */
+	bnx2x_disable_ptp(bp);
+
+	DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
+{
+	int port = BP_PORT(bp);
+	int i, rc = 0;
+	u8 cos;
+	struct bnx2x_mcast_ramrod_params rparam = {NULL};
+	u32 reset_code;
+
+	/* Wait until tx fastpath tasks complete */
+	for_each_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		for_each_cos_in_tx_queue(fp, cos)
+			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+		if (rc)
+			return;
+#endif
+	}
+
+	/* Give HW time to discard old tx messages */
+	usleep_range(1000, 2000);
+
+	/* Clean all ETH MACs */
+	rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+				false);
+	if (rc < 0)
+		BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+	/* Clean up UC list  */
+	rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
+				true);
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
+			  rc);
+
+	/* Disable LLH */
+	if (!CHIP_IS_E1(bp))
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+	/* Set "drop all" (stop Rx).
+	 * We need to take a netif_addr_lock() here in order to prevent
+	 * a race between the completion code and this code.
+	 */
+	netif_addr_lock_bh(bp->dev);
+	/* Schedule the rx_mode command */
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+	else
+		bnx2x_set_storm_rx_mode(bp);
+
+	/* Cleanup multicast configuration */
+	rparam.mcast_obj = &bp->mcast_obj;
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+	netif_addr_unlock_bh(bp->dev);
+
+	bnx2x_iov_chip_cleanup(bp);
+
+	/*
+	 * Send the UNLOAD_REQUEST to the MCP. This will return if
+	 * this function should perform FUNC, PORT or COMMON HW
+	 * reset.
+	 */
+	reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+	/*
+	 * (assumption: No Attention from MCP at this stage)
+	 * PMF probably in the middle of TX disable/enable transaction
+	 */
+	rc = bnx2x_func_wait_started(bp);
+	if (rc) {
+		BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		return;
+#endif
+	}
+
+	/* Close multi and leading connections
+	 * Completions for ramrods are collected in a synchronous way
+	 */
+	for_each_eth_queue(bp, i)
+		if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+			return;
+#else
+			goto unload_error;
+#endif
+
+	if (CNIC_LOADED(bp)) {
+		for_each_cnic_queue(bp, i)
+			if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+				return;
+#else
+				goto unload_error;
+#endif
+	}
+
+	/* If SP settings didn't get completed so far - something
+	 * very wrong has happen.
+	 */
+	if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+		BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+	rc = bnx2x_func_stop(bp);
+	if (rc) {
+		BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		return;
+#endif
+	}
+
+	/* stop_ptp should be after the Tx queues are drained to prevent
+	 * scheduling to the cancelled PTP work queue. It should also be after
+	 * function stop ramrod is sent, since as part of this ramrod FW access
+	 * PTP registers.
+	 */
+	if (bp->flags & PTP_SUPPORTED)
+		bnx2x_stop_ptp(bp);
+
+	/* Disable HW interrupts, NAPI */
+	bnx2x_netif_stop(bp, 1);
+	/* Delete all NAPI objects */
+	bnx2x_del_all_napi(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_del_all_napi_cnic(bp);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+
+	/* Reset the chip */
+	rc = bnx2x_reset_hw(bp, reset_code);
+	if (rc)
+		BNX2X_ERR("HW_RESET failed\n");
+
+	/* Report UNLOAD_DONE to MCP */
+	bnx2x_send_unload_done(bp, keep_link);
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+	u32 val;
+
+	DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
+
+	if (CHIP_IS_E1(bp)) {
+		int port = BP_PORT(bp);
+		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+		val = REG_RD(bp, addr);
+		val &= ~(0x300);
+		REG_WR(bp, addr, val);
+	} else {
+		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+	}
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+	u32 val;
+
+	/* Gates #2 and #4a are closed/opened for "not E1" only */
+	if (!CHIP_IS_E1(bp)) {
+		/* #4 */
+		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
+		/* #2 */
+		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
+	}
+
+	/* #3 */
+	if (CHIP_IS_E1x(bp)) {
+		/* Prevent interrupts from HC on both ports */
+		val = REG_RD(bp, HC_REG_CONFIG_1);
+		REG_WR(bp, HC_REG_CONFIG_1,
+		       (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+		       (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+		val = REG_RD(bp, HC_REG_CONFIG_0);
+		REG_WR(bp, HC_REG_CONFIG_0,
+		       (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+		       (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+	} else {
+		/* Prevent incoming interrupts in IGU */
+		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+		REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+		       (!close) ?
+		       (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+		       (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+	}
+
+	DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
+		close ? "closing" : "opening");
+	mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+	/* Do some magic... */
+	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+	*magic_val = val & SHARED_MF_CLP_MAGIC;
+	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
+ *
+ * @bp:		driver handle
+ * @magic_val:	old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+	/* Restore the `magic' bit value... */
+	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+	MF_CFG_WR(bp, shared_mf_config.clp_mb,
+		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/**
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp:		driver handle
+ * @magic_val:	old value of 'magic' bit.
+ *
+ * Takes care of CLP configurations.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+	u32 shmem;
+	u32 validity_offset;
+
+	DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
+
+	/* Set `magic' bit in order to save MF config */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_clp_reset_prep(bp, magic_val);
+
+	/* Get shmem offset */
+	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	validity_offset =
+		offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
+
+	/* Clear validity map flags */
+	if (shmem > 0)
+		REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT  100    /* 100 ms */
+
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
+ *
+ * @bp:	driver handle
+ */
+static void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+	/* special handling for emulation and FPGA,
+	   wait 10 times longer */
+	if (CHIP_REV_IS_SLOW(bp))
+		msleep(MCP_ONE_TIMEOUT*10);
+	else
+		msleep(MCP_ONE_TIMEOUT);
+}
+
+/*
+ * initializes bp->common.shmem_base and waits for validity signature to appear
+ */
+static int bnx2x_init_shmem(struct bnx2x *bp)
+{
+	int cnt = 0;
+	u32 val = 0;
+
+	do {
+		bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+		if (bp->common.shmem_base) {
+			val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+			if (val & SHR_MEM_VALIDITY_MB)
+				return 0;
+		}
+
+		bnx2x_mcp_wait_one(bp);
+
+	} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+	BNX2X_ERR("BAD MCP validity signature\n");
+
+	return -ENODEV;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+	int rc = bnx2x_init_shmem(bp);
+
+	/* Restore the `magic' bit value */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_clp_reset_done(bp, magic_val);
+
+	return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+	if (!CHIP_IS_E1(bp)) {
+		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+		mmiowb();
+	}
+}
+
+/*
+ * Reset the whole chip except for:
+ *      - PCIE core
+ *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ *              one reset bit)
+ *      - IGU
+ *      - MISC (including AEU)
+ *      - GRC
+ *      - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
+{
+	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+	u32 global_bits2, stay_reset2;
+
+	/*
+	 * Bits that have to be set in reset_mask2 if we want to reset 'global'
+	 * (per chip) blocks.
+	 */
+	global_bits2 =
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+	/* Don't reset the following blocks.
+	 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
+	 *            reset, as in 4 port device they might still be owned
+	 *            by the MCP (there is only one leader per path).
+	 */
+	not_reset_mask1 =
+		MISC_REGISTERS_RESET_REG_1_RST_HC |
+		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+		MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+	not_reset_mask2 =
+		MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+		MISC_REGISTERS_RESET_REG_2_RST_ATC |
+		MISC_REGISTERS_RESET_REG_2_PGLC |
+		MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+		MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+		MISC_REGISTERS_RESET_REG_2_UMAC0 |
+		MISC_REGISTERS_RESET_REG_2_UMAC1;
+
+	/*
+	 * Keep the following blocks in reset:
+	 *  - all xxMACs are handled by the bnx2x_link code.
+	 */
+	stay_reset2 =
+		MISC_REGISTERS_RESET_REG_2_XMAC |
+		MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+	/* Full reset masks according to the chip */
+	reset_mask1 = 0xffffffff;
+
+	if (CHIP_IS_E1(bp))
+		reset_mask2 = 0xffff;
+	else if (CHIP_IS_E1H(bp))
+		reset_mask2 = 0x1ffff;
+	else if (CHIP_IS_E2(bp))
+		reset_mask2 = 0xfffff;
+	else /* CHIP_IS_E3 */
+		reset_mask2 = 0x3ffffff;
+
+	/* Don't reset global blocks unless we need to */
+	if (!global)
+		reset_mask2 &= ~global_bits2;
+
+	/*
+	 * In case of attention in the QM, we need to reset PXP
+	 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+	 * because otherwise QM reset would release 'close the gates' shortly
+	 * before resetting the PXP, then the PSWRQ would send a write
+	 * request to PGLUE. Then when PXP is reset, PGLUE would try to
+	 * read the payload data from PSWWR, but PSWWR would not
+	 * respond. The write queue in PGLUE would stuck, dmae commands
+	 * would not return. Therefore it's important to reset the second
+	 * reset register (containing the
+	 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+	 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+	 * bit).
+	 */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       reset_mask2 & (~not_reset_mask2));
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       reset_mask1 & (~not_reset_mask1));
+
+	barrier();
+	mmiowb();
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       reset_mask2 & (~stay_reset2));
+
+	barrier();
+	mmiowb();
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+	mmiowb();
+}
+
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp:	driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+	u32 cnt = 1000;
+	u32 pend_bits = 0;
+
+	do {
+		pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+		if (pend_bits == 0)
+			break;
+
+		usleep_range(1000, 2000);
+	} while (cnt-- > 0);
+
+	if (cnt <= 0) {
+		BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+			  pend_bits);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
+{
+	int cnt = 1000;
+	u32 val = 0;
+	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+	u32 tags_63_32 = 0;
+
+	/* Empty the Tetris buffer, wait for 1s */
+	do {
+		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+		if (CHIP_IS_E3(bp))
+			tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
+
+		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+		    ((port_is_idle_0 & 0x1) == 0x1) &&
+		    ((port_is_idle_1 & 0x1) == 0x1) &&
+		    (pgl_exp_rom2 == 0xffffffff) &&
+		    (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
+			break;
+		usleep_range(1000, 2000);
+	} while (cnt-- > 0);
+
+	if (cnt <= 0) {
+		BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
+		BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+			  pgl_exp_rom2);
+		return -EAGAIN;
+	}
+
+	barrier();
+
+	/* Close gates #2, #3 and #4 */
+	bnx2x_set_234_gates(bp, true);
+
+	/* Poll for IGU VQs for 57712 and newer chips */
+	if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+		return -EAGAIN;
+
+	/* TBD: Indicate that "process kill" is in progress to MCP */
+
+	/* Clear "unprepared" bit */
+	REG_WR(bp, MISC_REG_UNPREPARED, 0);
+	barrier();
+
+	/* Make sure all is written to the chip before the reset */
+	mmiowb();
+
+	/* Wait for 1ms to empty GLUE and PCI-E core queues,
+	 * PSWHST, GRC and PSWRD Tetris buffer.
+	 */
+	usleep_range(1000, 2000);
+
+	/* Prepare to chip reset: */
+	/* MCP */
+	if (global)
+		bnx2x_reset_mcp_prep(bp, &val);
+
+	/* PXP */
+	bnx2x_pxp_prep(bp);
+	barrier();
+
+	/* reset the chip */
+	bnx2x_process_kill_chip_reset(bp, global);
+	barrier();
+
+	/* clear errors in PGB */
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
+
+	/* Recover after reset: */
+	/* MCP */
+	if (global && bnx2x_reset_mcp_comp(bp, val))
+		return -EAGAIN;
+
+	/* TBD: Add resetting the NO_MCP mode DB here */
+
+	/* Open the gates #2, #3 and #4 */
+	bnx2x_set_234_gates(bp, false);
+
+	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+	 * reset state, re-enable attentions. */
+
+	return 0;
+}
+
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+	int rc = 0;
+	bool global = bnx2x_reset_is_global(bp);
+	u32 load_code;
+
+	/* if not going to reset MCP - load "fake" driver to reset HW while
+	 * driver is owner of the HW
+	 */
+	if (!global && !BP_NOMCP(bp)) {
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+					     DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+		if (!load_code) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EAGAIN;
+			goto exit_leader_reset;
+		}
+		if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+		    (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+			BNX2X_ERR("MCP unexpected resp, aborting\n");
+			rc = -EAGAIN;
+			goto exit_leader_reset2;
+		}
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+		if (!load_code) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EAGAIN;
+			goto exit_leader_reset2;
+		}
+	}
+
+	/* Try to recover after the failure */
+	if (bnx2x_process_kill(bp, global)) {
+		BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
+			  BP_PATH(bp));
+		rc = -EAGAIN;
+		goto exit_leader_reset2;
+	}
+
+	/*
+	 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+	 * state.
+	 */
+	bnx2x_set_reset_done(bp);
+	if (global)
+		bnx2x_clear_reset_global(bp);
+
+exit_leader_reset2:
+	/* unload "fake driver" if it was loaded */
+	if (!global && !BP_NOMCP(bp)) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+	}
+exit_leader_reset:
+	bp->is_leader = 0;
+	bnx2x_release_leader_lock(bp);
+	smp_mb();
+	return rc;
+}
+
+static void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+	netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+	/* Disconnect this device */
+	netif_device_detach(bp->dev);
+
+	/*
+	 * Block ifup for all function on this engine until "process kill"
+	 * or power cycle.
+	 */
+	bnx2x_set_reset_in_progress(bp);
+
+	/* Shut down the power */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+	smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+	bool global = false;
+	u32 error_recovered, error_unrecovered;
+	bool is_parity;
+
+	DP(NETIF_MSG_HW, "Handling parity\n");
+	while (1) {
+		switch (bp->recovery_state) {
+		case BNX2X_RECOVERY_INIT:
+			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+			is_parity = bnx2x_chk_parity_attn(bp, &global, false);
+			WARN_ON(!is_parity);
+
+			/* Try to get a LEADER_LOCK HW lock */
+			if (bnx2x_trylock_leader_lock(bp)) {
+				bnx2x_set_reset_in_progress(bp);
+				/*
+				 * Check if there is a global attention and if
+				 * there was a global attention, set the global
+				 * reset bit.
+				 */
+
+				if (global)
+					bnx2x_set_reset_global(bp);
+
+				bp->is_leader = 1;
+			}
+
+			/* Stop the driver */
+			/* If interface has been removed - break */
+			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
+				return;
+
+			bp->recovery_state = BNX2X_RECOVERY_WAIT;
+
+			/* Ensure "is_leader", MCP command sequence and
+			 * "recovery_state" update values are seen on other
+			 * CPUs.
+			 */
+			smp_mb();
+			break;
+
+		case BNX2X_RECOVERY_WAIT:
+			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+			if (bp->is_leader) {
+				int other_engine = BP_PATH(bp) ? 0 : 1;
+				bool other_load_status =
+					bnx2x_get_load_status(bp, other_engine);
+				bool load_status =
+					bnx2x_get_load_status(bp, BP_PATH(bp));
+				global = bnx2x_reset_is_global(bp);
+
+				/*
+				 * In case of a parity in a global block, let
+				 * the first leader that performs a
+				 * leader_reset() reset the global blocks in
+				 * order to clear global attentions. Otherwise
+				 * the gates will remain closed for that
+				 * engine.
+				 */
+				if (load_status ||
+				    (global && other_load_status)) {
+					/* Wait until all other functions get
+					 * down.
+					 */
+					schedule_delayed_work(&bp->sp_rtnl_task,
+								HZ/10);
+					return;
+				} else {
+					/* If all other functions got down -
+					 * try to bring the chip back to
+					 * normal. In any case it's an exit
+					 * point for a leader.
+					 */
+					if (bnx2x_leader_reset(bp)) {
+						bnx2x_recovery_failed(bp);
+						return;
+					}
+
+					/* If we are here, means that the
+					 * leader has succeeded and doesn't
+					 * want to be a leader any more. Try
+					 * to continue as a none-leader.
+					 */
+					break;
+				}
+			} else { /* non-leader */
+				if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
+					/* Try to get a LEADER_LOCK HW lock as
+					 * long as a former leader may have
+					 * been unloaded by the user or
+					 * released a leadership by another
+					 * reason.
+					 */
+					if (bnx2x_trylock_leader_lock(bp)) {
+						/* I'm a leader now! Restart a
+						 * switch case.
+						 */
+						bp->is_leader = 1;
+						break;
+					}
+
+					schedule_delayed_work(&bp->sp_rtnl_task,
+								HZ/10);
+					return;
+
+				} else {
+					/*
+					 * If there was a global attention, wait
+					 * for it to be cleared.
+					 */
+					if (bnx2x_reset_is_global(bp)) {
+						schedule_delayed_work(
+							&bp->sp_rtnl_task,
+							HZ/10);
+						return;
+					}
+
+					error_recovered =
+					  bp->eth_stats.recoverable_error;
+					error_unrecovered =
+					  bp->eth_stats.unrecoverable_error;
+					bp->recovery_state =
+						BNX2X_RECOVERY_NIC_LOADING;
+					if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+						error_unrecovered++;
+						netdev_err(bp->dev,
+							   "Recovery failed. Power cycle needed\n");
+						/* Disconnect this device */
+						netif_device_detach(bp->dev);
+						/* Shut down the power */
+						bnx2x_set_power_state(
+							bp, PCI_D3hot);
+						smp_mb();
+					} else {
+						bp->recovery_state =
+							BNX2X_RECOVERY_DONE;
+						error_recovered++;
+						smp_mb();
+					}
+					bp->eth_stats.recoverable_error =
+						error_recovered;
+					bp->eth_stats.unrecoverable_error =
+						error_unrecovered;
+
+					return;
+				}
+			}
+		default:
+			return;
+		}
+	}
+}
+
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+	struct bnx2x_func_switch_update_params *switch_update_params;
+	struct bnx2x_func_state_params func_params = {NULL};
+	int rc;
+
+	switch_update_params = &func_params.params.switch_update;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+	/* Function parameters */
+	__set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+		  &switch_update_params->changes);
+	switch_update_params->vxlan_dst_port = port;
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc)
+		BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+			  port, rc);
+	return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+	if (!netif_running(bp->dev))
+		return;
+
+	if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
+		bp->vxlan_dst_port_count++;
+		return;
+	}
+
+	if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
+		DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+		return;
+	}
+
+	bp->vxlan_dst_port = port;
+	bp->vxlan_dst_port_count = 1;
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 t_port = ntohs(port);
+
+	__bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+	if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
+	    !IS_PF(bp)) {
+		DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+		return;
+	}
+	bp->vxlan_dst_port_count--;
+	if (bp->vxlan_dst_port_count)
+		return;
+
+	if (netif_running(bp->dev)) {
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+	} else {
+		bp->vxlan_dst_port = 0;
+		netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+	}
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 t_port = ntohs(port);
+
+	__bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
+static int bnx2x_close(struct net_device *dev);
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+	u16 port;
+#endif
+
+	rtnl_lock();
+
+	if (!netif_running(bp->dev)) {
+		rtnl_unlock();
+		return;
+	}
+
+	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+#ifdef BNX2X_STOP_ON_ERROR
+		BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+			  "you will need to reboot when done\n");
+		goto sp_rtnl_not_reset;
+#endif
+		/*
+		 * Clear all pending SP commands as we are going to reset the
+		 * function anyway.
+		 */
+		bp->sp_rtnl_state = 0;
+		smp_mb();
+
+		bnx2x_parity_recover(bp);
+
+		rtnl_unlock();
+		return;
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+#ifdef BNX2X_STOP_ON_ERROR
+		BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+			  "you will need to reboot when done\n");
+		goto sp_rtnl_not_reset;
+#endif
+
+		/*
+		 * Clear all pending SP commands as we are going to reset the
+		 * function anyway.
+		 */
+		bp->sp_rtnl_state = 0;
+		smp_mb();
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+
+		rtnl_unlock();
+		return;
+	}
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+	if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+		bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+	if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
+		bnx2x_after_function_update(bp);
+	/*
+	 * in case of fan failure we need to reset id if the "stop on error"
+	 * debug flag is set, since we trying to prevent permanent overheating
+	 * damage
+	 */
+	if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+		DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
+		netif_device_detach(bp->dev);
+		bnx2x_close(bp->dev);
+		rtnl_unlock();
+		return;
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+		DP(BNX2X_MSG_SP,
+		   "sending set mcast vf pf channel message from rtnl sp-task\n");
+		bnx2x_vfpf_set_mcast(bp->dev);
+	}
+	if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+			       &bp->sp_rtnl_state)){
+		if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
+			bnx2x_tx_disable(bp);
+			BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
+		}
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+		DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+		bnx2x_set_rx_mode_inner(bp);
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+			       &bp->sp_rtnl_state))
+		bnx2x_pf_set_vfs_vlan(bp);
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
+		bnx2x_dcbx_stop_hw_tx(bp);
+		bnx2x_dcbx_resume_hw_tx(bp);
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+			       &bp->sp_rtnl_state))
+		bnx2x_update_mng_version(bp);
+
+#ifdef CONFIG_BNX2X_VXLAN
+	port = bp->vxlan_dst_port;
+	if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+			       &bp->sp_rtnl_state)) {
+		if (!bnx2x_vxlan_port_update(bp, port))
+			netdev_info(bp->dev, "Added vxlan dest port %d", port);
+		else
+			bp->vxlan_dst_port = 0;
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+			       &bp->sp_rtnl_state)) {
+		if (!bnx2x_vxlan_port_update(bp, 0)) {
+			netdev_info(bp->dev,
+				    "Deleted vxlan dest port %d", port);
+			bp->vxlan_dst_port = 0;
+			vxlan_get_rx_port(bp->dev);
+		}
+	}
+#endif
+
+	/* work which needs rtnl lock not-taken (as it takes the lock itself and
+	 * can be called from other contexts as well)
+	 */
+	rtnl_unlock();
+
+	/* enable SR-IOV if applicable */
+	if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+					       &bp->sp_rtnl_state)) {
+		bnx2x_disable_sriov(bp);
+		bnx2x_enable_sriov(bp);
+	}
+}
+
+static void bnx2x_period_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+	if (!netif_running(bp->dev))
+		goto period_task_exit;
+
+	if (CHIP_REV_IS_SLOW(bp)) {
+		BNX2X_ERR("period task called on emulation, ignoring\n");
+		goto period_task_exit;
+	}
+
+	bnx2x_acquire_phy_lock(bp);
+	/*
+	 * The barrier is needed to ensure the ordering between the writing to
+	 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+	 * the reading here.
+	 */
+	smp_mb();
+	if (bp->port.pmf) {
+		bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+		/* Re-queue task in 1 sec */
+		queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+	}
+
+	bnx2x_release_phy_lock(bp);
+period_task_exit:
+	return;
+}
+
+/*
+ * Init service functions
+ */
+
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+	u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+	u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+	return base + (BP_ABS_FUNC(bp)) * stride;
+}
+
+static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
+					 u8 port, u32 reset_reg,
+					 struct bnx2x_mac_vals *vals)
+{
+	u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+	u32 base_addr;
+
+	if (!(mask & reset_reg))
+		return false;
+
+	BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
+	base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+	vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
+	vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
+	REG_WR(bp, vals->umac_addr[port], 0);
+
+	return true;
+}
+
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+					struct bnx2x_mac_vals *vals)
+{
+	u32 val, base_addr, offset, mask, reset_reg;
+	bool mac_stopped = false;
+	u8 port = BP_PORT(bp);
+
+	/* reset addresses as they also mark which values were changed */
+	memset(vals, 0, sizeof(*vals));
+
+	reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
+
+	if (!CHIP_IS_E3(bp)) {
+		val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+		mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
+		if ((mask & reset_reg) && val) {
+			u32 wb_data[2];
+			BNX2X_DEV_INFO("Disable bmac Rx\n");
+			base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
+						: NIG_REG_INGRESS_BMAC0_MEM;
+			offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
+						: BIGMAC_REGISTER_BMAC_CONTROL;
+
+			/*
+			 * use rd/wr since we cannot use dmae. This is safe
+			 * since MCP won't access the bus due to the request
+			 * to unload, and no function on the path can be
+			 * loaded at this time.
+			 */
+			wb_data[0] = REG_RD(bp, base_addr + offset);
+			wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+			vals->bmac_addr = base_addr + offset;
+			vals->bmac_val[0] = wb_data[0];
+			vals->bmac_val[1] = wb_data[1];
+			wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+			REG_WR(bp, vals->bmac_addr, wb_data[0]);
+			REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
+		}
+		BNX2X_DEV_INFO("Disable emac Rx\n");
+		vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+		vals->emac_val = REG_RD(bp, vals->emac_addr);
+		REG_WR(bp, vals->emac_addr, 0);
+		mac_stopped = true;
+	} else {
+		if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
+			BNX2X_DEV_INFO("Disable xmac Rx\n");
+			base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+			val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
+			REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+			       val & ~(1 << 1));
+			REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+			       val | (1 << 1));
+			vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+			vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+			REG_WR(bp, vals->xmac_addr, 0);
+			mac_stopped = true;
+		}
+
+		mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
+							    reset_reg, vals);
+		mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
+							    reset_reg, vals);
+	}
+
+	if (mac_stopped)
+		msleep(20);
+}
+
+#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
+					0x1848 + ((f) << 4))
+#define BNX2X_PREV_UNDI_RCQ(val)	((val) & 0xffff)
+#define BNX2X_PREV_UNDI_BD(val)		((val) >> 16 & 0xffff)
+#define BNX2X_PREV_UNDI_PROD(rcq, bd)	((bd) << 16 | (rcq))
+
+#define BCM_5710_UNDI_FW_MF_MAJOR	(0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR	(0x08)
+#define BCM_5710_UNDI_FW_MF_VERS	(0x05)
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+	/* UNDI marks its presence in DORQ -
+	 * it initializes CID offset for normal bell to 0x7
+	 */
+	if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+	    MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+		return false;
+
+	if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+		BNX2X_DEV_INFO("UNDI previously loaded\n");
+		return true;
+	}
+
+	return false;
+}
+
+static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
+{
+	u16 rcq, bd;
+	u32 addr, tmp_reg;
+
+	if (BP_FUNC(bp) < 2)
+		addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
+	else
+		addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
+
+	tmp_reg = REG_RD(bp, addr);
+	rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+	bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+
+	tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+	REG_WR(bp, addr, tmp_reg);
+
+	BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+		       BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
+}
+
+static int bnx2x_prev_mcp_done(struct bnx2x *bp)
+{
+	u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
+				  DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
+	if (!rc) {
+		BNX2X_ERR("MCP response failure, aborting\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static struct bnx2x_prev_path_list *
+		bnx2x_prev_path_get_entry(struct bnx2x *bp)
+{
+	struct bnx2x_prev_path_list *tmp_list;
+
+	list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
+		if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+		    bp->pdev->bus->number == tmp_list->bus &&
+		    BP_PATH(bp) == tmp_list->path)
+			return tmp_list;
+
+	return NULL;
+}
+
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+	struct bnx2x_prev_path_list *tmp_list;
+	int rc;
+
+	rc = down_interruptible(&bnx2x_prev_sem);
+	if (rc) {
+		BNX2X_ERR("Received %d when tried to take lock\n", rc);
+		return rc;
+	}
+
+	tmp_list = bnx2x_prev_path_get_entry(bp);
+	if (tmp_list) {
+		tmp_list->aer = 1;
+		rc = 0;
+	} else {
+		BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+			  BP_PATH(bp));
+	}
+
+	up(&bnx2x_prev_sem);
+
+	return rc;
+}
+
+static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
+{
+	struct bnx2x_prev_path_list *tmp_list;
+	bool rc = false;
+
+	if (down_trylock(&bnx2x_prev_sem))
+		return false;
+
+	tmp_list = bnx2x_prev_path_get_entry(bp);
+	if (tmp_list) {
+		if (tmp_list->aer) {
+			DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+			   BP_PATH(bp));
+		} else {
+			rc = true;
+			BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
+				       BP_PATH(bp));
+		}
+	}
+
+	up(&bnx2x_prev_sem);
+
+	return rc;
+}
+
+bool bnx2x_port_after_undi(struct bnx2x *bp)
+{
+	struct bnx2x_prev_path_list *entry;
+	bool val;
+
+	down(&bnx2x_prev_sem);
+
+	entry = bnx2x_prev_path_get_entry(bp);
+	val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
+
+	up(&bnx2x_prev_sem);
+
+	return val;
+}
+
+static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
+{
+	struct bnx2x_prev_path_list *tmp_list;
+	int rc;
+
+	rc = down_interruptible(&bnx2x_prev_sem);
+	if (rc) {
+		BNX2X_ERR("Received %d when tried to take lock\n", rc);
+		return rc;
+	}
+
+	/* Check whether the entry for this path already exists */
+	tmp_list = bnx2x_prev_path_get_entry(bp);
+	if (tmp_list) {
+		if (!tmp_list->aer) {
+			BNX2X_ERR("Re-Marking the path.\n");
+		} else {
+			DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+			   BP_PATH(bp));
+			tmp_list->aer = 0;
+		}
+		up(&bnx2x_prev_sem);
+		return 0;
+	}
+	up(&bnx2x_prev_sem);
+
+	/* Create an entry for this path and add it */
+	tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+	if (!tmp_list) {
+		BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
+		return -ENOMEM;
+	}
+
+	tmp_list->bus = bp->pdev->bus->number;
+	tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
+	tmp_list->path = BP_PATH(bp);
+	tmp_list->aer = 0;
+	tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
+
+	rc = down_interruptible(&bnx2x_prev_sem);
+	if (rc) {
+		BNX2X_ERR("Received %d when tried to take lock\n", rc);
+		kfree(tmp_list);
+	} else {
+		DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+		   BP_PATH(bp));
+		list_add(&tmp_list->list, &bnx2x_prev_list);
+		up(&bnx2x_prev_sem);
+	}
+
+	return rc;
+}
+
+static int bnx2x_do_flr(struct bnx2x *bp)
+{
+	struct pci_dev *dev = bp->pdev;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+		return -EINVAL;
+	}
+
+	/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+	if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+		BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+			  bp->common.bc_ver);
+		return -EINVAL;
+	}
+
+	if (!pci_wait_for_pending_transaction(dev))
+		dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+	BNX2X_DEV_INFO("Initiating FLR\n");
+	bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
+
+	return 0;
+}
+
+static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+{
+	int rc;
+
+	BNX2X_DEV_INFO("Uncommon unload Flow\n");
+
+	/* Test if previous unload process was already finished for this path */
+	if (bnx2x_prev_is_path_marked(bp))
+		return bnx2x_prev_mcp_done(bp);
+
+	BNX2X_DEV_INFO("Path is unmarked\n");
+
+	/* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+	if (bnx2x_prev_is_after_undi(bp))
+		goto out;
+
+	/* If function has FLR capabilities, and existing FW version matches
+	 * the one required, then FLR will be sufficient to clean any residue
+	 * left by previous driver
+	 */
+	rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
+
+	if (!rc) {
+		/* fw version is good */
+		BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+		rc = bnx2x_do_flr(bp);
+	}
+
+	if (!rc) {
+		/* FLR was performed */
+		BNX2X_DEV_INFO("FLR successful\n");
+		return 0;
+	}
+
+	BNX2X_DEV_INFO("Could not FLR\n");
+
+out:
+	/* Close the MCP request, return failure*/
+	rc = bnx2x_prev_mcp_done(bp);
+	if (!rc)
+		rc = BNX2X_PREV_WAIT_NEEDED;
+
+	return rc;
+}
+
+static int bnx2x_prev_unload_common(struct bnx2x *bp)
+{
+	u32 reset_reg, tmp_reg = 0, rc;
+	bool prev_undi = false;
+	struct bnx2x_mac_vals mac_vals;
+
+	/* It is possible a previous function received 'common' answer,
+	 * but hasn't loaded yet, therefore creating a scenario of
+	 * multiple functions receiving 'common' on the same path.
+	 */
+	BNX2X_DEV_INFO("Common unload Flow\n");
+
+	memset(&mac_vals, 0, sizeof(mac_vals));
+
+	if (bnx2x_prev_is_path_marked(bp))
+		return bnx2x_prev_mcp_done(bp);
+
+	reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+
+	/* Reset should be performed after BRB is emptied */
+	if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+		u32 timer_count = 1000;
+
+		/* Close the MAC Rx to prevent BRB from filling up */
+		bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+		/* close LLH filters for both ports towards the BRB */
+		bnx2x_set_rx_filter(&bp->link_params, 0);
+		bp->link_params.port ^= 1;
+		bnx2x_set_rx_filter(&bp->link_params, 0);
+		bp->link_params.port ^= 1;
+
+		/* Check if the UNDI driver was previously loaded */
+		if (bnx2x_prev_is_after_undi(bp)) {
+			prev_undi = true;
+			/* clear the UNDI indication */
+			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+			/* clear possible idle check errors */
+			REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
+		}
+		if (!CHIP_IS_E1x(bp))
+			/* block FW from writing to host */
+			REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
+		/* wait until BRB is empty */
+		tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+		while (timer_count) {
+			u32 prev_brb = tmp_reg;
+
+			tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+			if (!tmp_reg)
+				break;
+
+			BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
+
+			/* reset timer as long as BRB actually gets emptied */
+			if (prev_brb > tmp_reg)
+				timer_count = 1000;
+			else
+				timer_count--;
+
+			/* If UNDI resides in memory, manually increment it */
+			if (prev_undi)
+				bnx2x_prev_unload_undi_inc(bp, 1);
+
+			udelay(10);
+		}
+
+		if (!timer_count)
+			BNX2X_ERR("Failed to empty BRB, hope for the best\n");
+	}
+
+	/* No packets are in the pipeline, path is ready for reset */
+	bnx2x_reset_common(bp);
+
+	if (mac_vals.xmac_addr)
+		REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+	if (mac_vals.umac_addr[0])
+		REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
+	if (mac_vals.umac_addr[1])
+		REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
+	if (mac_vals.emac_addr)
+		REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+	if (mac_vals.bmac_addr) {
+		REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+		REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+	}
+
+	rc = bnx2x_prev_mark_path(bp, prev_undi);
+	if (rc) {
+		bnx2x_prev_mcp_done(bp);
+		return rc;
+	}
+
+	return bnx2x_prev_mcp_done(bp);
+}
+
+static int bnx2x_prev_unload(struct bnx2x *bp)
+{
+	int time_counter = 10;
+	u32 rc, fw, hw_lock_reg, hw_lock_val;
+	BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
+
+	/* clear hw from errors which may have resulted from an interrupted
+	 * dmae transaction.
+	 */
+	bnx2x_clean_pglue_errors(bp);
+
+	/* Release previously held locks */
+	hw_lock_reg = (BP_FUNC(bp) <= 5) ?
+		      (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
+		      (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+
+	hw_lock_val = REG_RD(bp, hw_lock_reg);
+	if (hw_lock_val) {
+		if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+			BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
+			REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+			       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
+		}
+
+		BNX2X_DEV_INFO("Release Previously held hw lock\n");
+		REG_WR(bp, hw_lock_reg, 0xffffffff);
+	} else
+		BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
+
+	if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
+		BNX2X_DEV_INFO("Release previously held alr\n");
+		bnx2x_release_alr(bp);
+	}
+
+	do {
+		int aer = 0;
+		/* Lock MCP using an unload request */
+		fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
+		if (!fw) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EBUSY;
+			break;
+		}
+
+		rc = down_interruptible(&bnx2x_prev_sem);
+		if (rc) {
+			BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+				  rc);
+		} else {
+			/* If Path is marked by EEH, ignore unload status */
+			aer = !!(bnx2x_prev_path_get_entry(bp) &&
+				 bnx2x_prev_path_get_entry(bp)->aer);
+			up(&bnx2x_prev_sem);
+		}
+
+		if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
+			rc = bnx2x_prev_unload_common(bp);
+			break;
+		}
+
+		/* non-common reply from MCP might require looping */
+		rc = bnx2x_prev_unload_uncommon(bp);
+		if (rc != BNX2X_PREV_WAIT_NEEDED)
+			break;
+
+		msleep(20);
+	} while (--time_counter);
+
+	if (!time_counter || rc) {
+		BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+		rc = -EPROBE_DEFER;
+	}
+
+	/* Mark function if its port was used to boot from SAN */
+	if (bnx2x_port_after_undi(bp))
+		bp->link_params.feature_config_flags |=
+			FEATURE_CONFIG_BOOT_FROM_SAN;
+
+	BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
+
+	return rc;
+}
+
+static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2, val3, val4, id, boot_mode;
+	u16 pmc;
+
+	/* Get the chip revision id and number. */
+	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+	val = REG_RD(bp, MISC_REG_CHIP_NUM);
+	id = ((val & 0xffff) << 16);
+	val = REG_RD(bp, MISC_REG_CHIP_REV);
+	id |= ((val & 0xf) << 12);
+
+	/* Metal is read from PCI regs, but we can't access >=0x400 from
+	 * the configuration space (so we need to reg_rd)
+	 */
+	val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+	id |= (((val >> 24) & 0xf) << 4);
+	val = REG_RD(bp, MISC_REG_BOND_ID);
+	id |= (val & 0xf);
+	bp->common.chip_id = id;
+
+	/* force 57811 according to MISC register */
+	if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+		if (CHIP_IS_57810(bp))
+			bp->common.chip_id = (CHIP_NUM_57811 << 16) |
+				(bp->common.chip_id & 0x0000FFFF);
+		else if (CHIP_IS_57810_MF(bp))
+			bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
+				(bp->common.chip_id & 0x0000FFFF);
+		bp->common.chip_id |= 0x1;
+	}
+
+	/* Set doorbell size */
+	bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+	if (!CHIP_IS_E1x(bp)) {
+		val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+		if ((val & 1) == 0)
+			val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+		else
+			val = (val >> 1) & 1;
+		BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+						       "2_PORT_MODE");
+		bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+						 CHIP_2_PORT_MODE;
+
+		if (CHIP_MODE_IS_4_PORT(bp))
+			bp->pfid = (bp->pf_num >> 1);	/* 0..3 */
+		else
+			bp->pfid = (bp->pf_num & 0x6);	/* 0, 2, 4, 6 */
+	} else {
+		bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+		bp->pfid = bp->pf_num;			/* 0..7 */
+	}
+
+	BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
+
+	bp->link_params.chip_id = bp->common.chip_id;
+	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+	val = (REG_RD(bp, 0x2874) & 0x55);
+	if ((bp->common.chip_id & 0x1) ||
+	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+		bp->flags |= ONE_PORT_FLAG;
+		BNX2X_DEV_INFO("single port device\n");
+	}
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+	bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
+				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
+	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+		       bp->common.flash_size, bp->common.flash_size);
+
+	bnx2x_init_shmem(bp);
+
+	bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+					MISC_REG_GENERIC_CR_1 :
+					MISC_REG_GENERIC_CR_0));
+
+	bp->link_params.shmem_base = bp->common.shmem_base;
+	bp->link_params.shmem2_base = bp->common.shmem2_base;
+	if (SHMEM2_RD(bp, size) >
+	    (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+		bp->link_params.lfa_base =
+		REG_RD(bp, bp->common.shmem2_base +
+		       (u32)offsetof(struct shmem2_region,
+				     lfa_host_addr[BP_PORT(bp)]));
+	else
+		bp->link_params.lfa_base = 0;
+	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
+		       bp->common.shmem_base, bp->common.shmem2_base);
+
+	if (!bp->common.shmem_base) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		bp->flags |= NO_MCP_FLAG;
+		return;
+	}
+
+	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+	bp->link_params.hw_led_mode = ((bp->common.hw_config &
+					SHARED_HW_CFG_LED_MODE_MASK) >>
+				       SHARED_HW_CFG_LED_MODE_SHIFT);
+
+	bp->link_params.feature_config_flags = 0;
+	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+		bp->link_params.feature_config_flags |=
+				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+	else
+		bp->link_params.feature_config_flags &=
+				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+	bp->common.bc_ver = val;
+	BNX2X_DEV_INFO("bc_ver %X\n", val);
+	if (val < BNX2X_BC_VER) {
+		/* for now only warn
+		 * later we might need to enforce this */
+		BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
+			  BNX2X_BC_VER, val);
+	}
+	bp->link_params.feature_config_flags |=
+				(val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+				FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+		FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+		FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_MT_SUPPORTED) ?
+		FEATURE_CONFIG_MT_SUPPORT : 0;
+
+	bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+			BC_SUPPORTS_PFC_STATS : 0;
+
+	bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+			BC_SUPPORTS_FCOE_FEATURES : 0;
+
+	bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+			BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+	bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+			BC_SUPPORTS_RMMOD_CMD : 0;
+
+	boot_mode = SHMEM_RD(bp,
+			dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+			PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+	switch (boot_mode) {
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+		break;
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+		break;
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+		break;
+	case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+		bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+		break;
+	}
+
+	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
+	bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
+	BNX2X_DEV_INFO("%sWoL capable\n",
+		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+		 val, val2, val3, val4);
+}
+
+#define IGU_FID(val)	GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val)	GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+	int pfid = BP_FUNC(bp);
+	int igu_sb_id;
+	u32 val;
+	u8 fid, igu_sb_cnt = 0;
+
+	bp->igu_base_sb = 0xff;
+	if (CHIP_INT_MODE_IS_BC(bp)) {
+		int vn = BP_VN(bp);
+		igu_sb_cnt = bp->igu_sb_cnt;
+		bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+			FP_SB_MAX_E1x;
+
+		bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
+			(CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+		return 0;
+	}
+
+	/* IGU in normal mode - read CAM */
+	for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+	     igu_sb_id++) {
+		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+			continue;
+		fid = IGU_FID(val);
+		if ((fid & IGU_FID_ENCODE_IS_PF)) {
+			if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+				continue;
+			if (IGU_VEC(val) == 0)
+				/* default status block */
+				bp->igu_dsb_id = igu_sb_id;
+			else {
+				if (bp->igu_base_sb == 0xff)
+					bp->igu_base_sb = igu_sb_id;
+				igu_sb_cnt++;
+			}
+		}
+	}
+
+#ifdef CONFIG_PCI_MSI
+	/* Due to new PF resource allocation by MFW T7.4 and above, it's
+	 * optional that number of CAM entries will not be equal to the value
+	 * advertised in PCI.
+	 * Driver should use the minimal value of both as the actual status
+	 * block count
+	 */
+	bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
+#endif
+
+	if (igu_sb_cnt == 0) {
+		BNX2X_ERR("CAM configuration error\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
+{
+	int cfg_size = 0, idx, port = BP_PORT(bp);
+
+	/* Aggregation of supported attributes of all external phys */
+	bp->port.supported[0] = 0;
+	bp->port.supported[1] = 0;
+	switch (bp->link_params.num_phys) {
+	case 1:
+		bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+		cfg_size = 1;
+		break;
+	case 2:
+		bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+		cfg_size = 1;
+		break;
+	case 3:
+		if (bp->link_params.multi_phy_config &
+		    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+			bp->port.supported[1] =
+				bp->link_params.phy[EXT_PHY1].supported;
+			bp->port.supported[0] =
+				bp->link_params.phy[EXT_PHY2].supported;
+		} else {
+			bp->port.supported[0] =
+				bp->link_params.phy[EXT_PHY1].supported;
+			bp->port.supported[1] =
+				bp->link_params.phy[EXT_PHY2].supported;
+		}
+		cfg_size = 2;
+		break;
+	}
+
+	if (!(bp->port.supported[0] || bp->port.supported[1])) {
+		BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
+			   SHMEM_RD(bp,
+			   dev_info.port_hw_config[port].external_phy_config),
+			   SHMEM_RD(bp,
+			   dev_info.port_hw_config[port].external_phy_config2));
+			return;
+	}
+
+	if (CHIP_IS_E3(bp))
+		bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+	else {
+		switch (switch_cfg) {
+		case SWITCH_CFG_1G:
+			bp->port.phy_addr = REG_RD(
+				bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+			break;
+		case SWITCH_CFG_10G:
+			bp->port.phy_addr = REG_RD(
+				bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+			break;
+		default:
+			BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+				  bp->port.link_config[0]);
+			return;
+		}
+	}
+	BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+	/* mask what we support according to speed_cap_mask per configuration */
+	for (idx = 0; idx < cfg_size; idx++) {
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+			bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+			bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+			bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+			bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+			bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+						     SUPPORTED_1000baseT_Full);
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+			bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+			bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+			bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
+	}
+
+	BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+		       bp->port.supported[1]);
+}
+
+static void bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+	u32 link_config, idx, cfg_size = 0;
+	bp->port.advertising[0] = 0;
+	bp->port.advertising[1] = 0;
+	switch (bp->link_params.num_phys) {
+	case 1:
+	case 2:
+		cfg_size = 1;
+		break;
+	case 3:
+		cfg_size = 2;
+		break;
+	}
+	for (idx = 0; idx < cfg_size; idx++) {
+		bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+		link_config = bp->port.link_config[idx];
+		switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+		case PORT_FEATURE_LINK_SPEED_AUTO:
+			if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_AUTO_NEG;
+				bp->port.advertising[idx] |=
+					bp->port.supported[idx];
+				if (bp->link_params.phy[EXT_PHY1].type ==
+				    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+					bp->port.advertising[idx] |=
+					(SUPPORTED_100baseT_Half |
+					 SUPPORTED_100baseT_Full);
+			} else {
+				/* force 10G, no AN */
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10000baseT_Full |
+					 ADVERTISED_FIBRE);
+				continue;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_10M_FULL:
+			if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10baseT_Full |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+					    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_10M_HALF:
+			if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10;
+				bp->link_params.req_duplex[idx] =
+					DUPLEX_HALF;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10baseT_Half |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+					    link_config,
+					  bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_100M_FULL:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_100baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_100;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_100baseT_Full |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+					    link_config,
+					  bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_100M_HALF:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_100baseT_Half) {
+				bp->link_params.req_line_speed[idx] =
+								SPEED_100;
+				bp->link_params.req_duplex[idx] =
+								DUPLEX_HALF;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_100baseT_Half |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_1G:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_1000baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_1000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_1000baseT_Full |
+					 ADVERTISED_TP);
+			} else if (bp->port.supported[idx] &
+				   SUPPORTED_1000baseKX_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_1000;
+				bp->port.advertising[idx] |=
+					ADVERTISED_1000baseKX_Full;
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_2_5G:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_2500baseX_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_2500;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_2500baseX_Full |
+						ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_10G_CX4:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_10000baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10000baseT_Full |
+						ADVERTISED_FIBRE);
+			} else if (bp->port.supported[idx] &
+				   SUPPORTED_10000baseKR_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10000baseKR_Full |
+						ADVERTISED_FIBRE);
+			} else {
+				BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+		case PORT_FEATURE_LINK_SPEED_20G:
+			bp->link_params.req_line_speed[idx] = SPEED_20000;
+
+			break;
+		default:
+			BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
+				  link_config);
+				bp->link_params.req_line_speed[idx] =
+							SPEED_AUTO_NEG;
+				bp->port.advertising[idx] =
+						bp->port.supported[idx];
+			break;
+		}
+
+		bp->link_params.req_flow_ctrl[idx] = (link_config &
+					 PORT_FEATURE_FLOW_CONTROL_MASK);
+		if (bp->link_params.req_flow_ctrl[idx] ==
+		    BNX2X_FLOW_CTRL_AUTO) {
+			if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
+				bp->link_params.req_flow_ctrl[idx] =
+							BNX2X_FLOW_CTRL_NONE;
+			else
+				bnx2x_set_requested_fc(bp);
+		}
+
+		BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
+			       bp->link_params.req_line_speed[idx],
+			       bp->link_params.req_duplex[idx],
+			       bp->link_params.req_flow_ctrl[idx],
+			       bp->port.advertising[idx]);
+	}
+}
+
+static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+	__be16 mac_hi_be = cpu_to_be16(mac_hi);
+	__be32 mac_lo_be = cpu_to_be32(mac_lo);
+	memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+	memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
+}
+
+static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 config;
+	u32 ext_phy_type, ext_phy_config, eee_mode;
+
+	bp->link_params.bp = bp;
+	bp->link_params.port = port;
+
+	bp->link_params.lane_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+
+	bp->link_params.speed_cap_mask[0] =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].speed_capability_mask) &
+		PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
+	bp->link_params.speed_cap_mask[1] =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].speed_capability_mask2) &
+		PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
+	bp->port.link_config[0] =
+		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+	bp->port.link_config[1] =
+		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+
+	bp->link_params.multi_phy_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
+	/* If the device is capable of WoL, set the default state according
+	 * to the HW
+	 */
+	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+		   (config & PORT_FEATURE_WOL_ENABLED));
+
+	if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+	    PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+		bp->flags |= NO_ISCSI_FLAG;
+	if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+	    PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+		bp->flags |= NO_FCOE_FLAG;
+
+	BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
+		       bp->link_params.lane_config,
+		       bp->link_params.speed_cap_mask[0],
+		       bp->port.link_config[0]);
+
+	bp->link_params.switch_cfg = (bp->port.link_config[0] &
+				      PORT_FEATURE_CONNECTED_SWITCH_MASK);
+	bnx2x_phy_probe(&bp->link_params);
+	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+	bnx2x_link_settings_requested(bp);
+
+	/*
+	 * If connected directly, work with the internal PHY, otherwise, work
+	 * with the external PHY
+	 */
+	ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+	ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+		bp->mdio.prtad = bp->port.phy_addr;
+
+	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+		bp->mdio.prtad =
+			XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+	/* Configure link feature according to nvram value */
+	eee_mode = (((SHMEM_RD(bp, dev_info.
+		      port_feature_config[port].eee_power_mode)) &
+		     PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+		    PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+	if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+		bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+					   EEE_MODE_ENABLE_LPI |
+					   EEE_MODE_OUTPUT_TIME;
+	} else {
+		bp->link_params.eee_mode = 0;
+	}
+}
+
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
+{
+	u32 no_flags = NO_ISCSI_FLAG;
+	int port = BP_PORT(bp);
+	u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[port].max_iscsi_conn);
+
+	if (!CNIC_SUPPORT(bp)) {
+		bp->flags |= no_flags;
+		return;
+	}
+
+	/* Get the number of maximum allowed iSCSI connections */
+	bp->cnic_eth_dev.max_iscsi_conn =
+		(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+		BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+	BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+		       bp->cnic_eth_dev.max_iscsi_conn);
+
+	/*
+	 * If maximum allowed number of connections is zero -
+	 * disable the feature.
+	 */
+	if (!bp->cnic_eth_dev.max_iscsi_conn)
+		bp->flags |= no_flags;
+}
+
+static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+{
+	/* Port info */
+	bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+		MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
+	bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+		MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
+
+	/* Node info */
+	bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+		MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
+	bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+		MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
+}
+
+static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
+{
+	u8 count = 0;
+
+	if (IS_MF(bp)) {
+		u8 fid;
+
+		/* iterate over absolute function ids for this path: */
+		for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
+			if (IS_MF_SD(bp)) {
+				u32 cfg = MF_CFG_RD(bp,
+						    func_mf_config[fid].config);
+
+				if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
+				    ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
+					    FUNC_MF_CFG_PROTOCOL_FCOE))
+					count++;
+			} else {
+				u32 cfg = MF_CFG_RD(bp,
+						    func_ext_config[fid].
+								      func_cfg);
+
+				if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
+				    (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
+					count++;
+			}
+		}
+	} else { /* SF */
+		int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
+
+		for (port = 0; port < port_cnt; port++) {
+			u32 lic = SHMEM_RD(bp,
+					   drv_lic_key[port].max_fcoe_conn) ^
+				  FW_ENCODE_32BIT_PATTERN;
+			if (lic)
+				count++;
+		}
+	}
+
+	return count;
+}
+
+static void bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_ABS_FUNC(bp);
+	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[port].max_fcoe_conn);
+	u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
+
+	if (!CNIC_SUPPORT(bp)) {
+		bp->flags |= NO_FCOE_FLAG;
+		return;
+	}
+
+	/* Get the number of maximum allowed FCoE connections */
+	bp->cnic_eth_dev.max_fcoe_conn =
+		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+		BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+	/* Calculate the number of maximum allowed FCoE tasks */
+	bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+
+	/* check if FCoE resources must be shared between different functions */
+	if (num_fcoe_func)
+		bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
+
+	/* Read the WWN: */
+	if (!IS_MF(bp)) {
+		/* Port info */
+		bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+			SHMEM_RD(bp,
+				 dev_info.port_hw_config[port].
+				 fcoe_wwn_port_name_upper);
+		bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+			SHMEM_RD(bp,
+				 dev_info.port_hw_config[port].
+				 fcoe_wwn_port_name_lower);
+
+		/* Node info */
+		bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+			SHMEM_RD(bp,
+				 dev_info.port_hw_config[port].
+				 fcoe_wwn_node_name_upper);
+		bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+			SHMEM_RD(bp,
+				 dev_info.port_hw_config[port].
+				 fcoe_wwn_node_name_lower);
+	} else if (!IS_MF_SD(bp)) {
+		/* Read the WWN info only if the FCoE feature is enabled for
+		 * this function.
+		 */
+		if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
+			bnx2x_get_ext_wwn_info(bp, func);
+	} else {
+		if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
+			bnx2x_get_ext_wwn_info(bp, func);
+	}
+
+	BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
+
+	/*
+	 * If maximum allowed number of connections is zero -
+	 * disable the feature.
+	 */
+	if (!bp->cnic_eth_dev.max_fcoe_conn)
+		bp->flags |= NO_FCOE_FLAG;
+}
+
+static void bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+	/*
+	 * iSCSI may be dynamically disabled but reading
+	 * info here we will decrease memory usage by driver
+	 * if the feature is disabled for good
+	 */
+	bnx2x_get_iscsi_info(bp);
+	bnx2x_get_fcoe_info(bp);
+}
+
+static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2;
+	int func = BP_ABS_FUNC(bp);
+	int port = BP_PORT(bp);
+	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+	u8 *fip_mac = bp->fip_mac;
+
+	if (IS_MF(bp)) {
+		/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+		 * FCoE MAC then the appropriate feature should be disabled.
+		 * In non SD mode features configuration comes from struct
+		 * func_ext_config.
+		 */
+		if (!IS_MF_SD(bp)) {
+			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						 iscsi_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						iscsi_mac_addr_lower);
+				bnx2x_set_mac_buf(iscsi_mac, val, val2);
+				BNX2X_DEV_INFO
+					("Read iSCSI MAC: %pM\n", iscsi_mac);
+			} else {
+				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+			}
+
+			if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						 fcoe_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						fcoe_mac_addr_lower);
+				bnx2x_set_mac_buf(fip_mac, val, val2);
+				BNX2X_DEV_INFO
+					("Read FCoE L2 MAC: %pM\n", fip_mac);
+			} else {
+				bp->flags |= NO_FCOE_FLAG;
+			}
+
+			bp->mf_ext_config = cfg;
+
+		} else { /* SD MODE */
+			if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+				/* use primary mac as iscsi mac */
+				memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+
+				BNX2X_DEV_INFO("SD ISCSI MODE\n");
+				BNX2X_DEV_INFO
+					("Read iSCSI MAC: %pM\n", iscsi_mac);
+			} else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+				/* use primary mac as fip mac */
+				memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+				BNX2X_DEV_INFO("SD FCoE MODE\n");
+				BNX2X_DEV_INFO
+					("Read FIP MAC: %pM\n", fip_mac);
+			}
+		}
+
+		/* If this is a storage-only interface, use SAN mac as
+		 * primary MAC. Notice that for SD this is already the case,
+		 * as the SAN mac was copied from the primary MAC.
+		 */
+		if (IS_MF_FCOE_AFEX(bp))
+			memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+	} else {
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				iscsi_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+			       iscsi_mac_lower);
+		bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				fcoe_fip_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+			       fcoe_fip_mac_lower);
+		bnx2x_set_mac_buf(fip_mac, val, val2);
+	}
+
+	/* Disable iSCSI OOO if MAC configuration is invalid. */
+	if (!is_valid_ether_addr(iscsi_mac)) {
+		bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+		eth_zero_addr(iscsi_mac);
+	}
+
+	/* Disable FCoE if MAC configuration is invalid. */
+	if (!is_valid_ether_addr(fip_mac)) {
+		bp->flags |= NO_FCOE_FLAG;
+		eth_zero_addr(bp->fip_mac);
+	}
+}
+
+static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2;
+	int func = BP_ABS_FUNC(bp);
+	int port = BP_PORT(bp);
+
+	/* Zero primary MAC configuration */
+	eth_zero_addr(bp->dev->dev_addr);
+
+	if (BP_NOMCP(bp)) {
+		BNX2X_ERROR("warning: random MAC workaround active\n");
+		eth_hw_addr_random(bp->dev);
+	} else if (IS_MF(bp)) {
+		val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+		val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+		if (CNIC_SUPPORT(bp))
+			bnx2x_get_cnic_mac_hwinfo(bp);
+	} else {
+		/* in SF read MACs from port configuration */
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+		bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+		if (CNIC_SUPPORT(bp))
+			bnx2x_get_cnic_mac_hwinfo(bp);
+	}
+
+	if (!BP_NOMCP(bp)) {
+		/* Read physical port identifier from shmem */
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+		bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+		bp->flags |= HAS_PHYS_PORT_ID;
+	}
+
+	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+
+	if (!is_valid_ether_addr(bp->dev->dev_addr))
+		dev_err(&bp->pdev->dev,
+			"bad Ethernet MAC address configuration: %pM\n"
+			"change it manually before bringing up the appropriate network interface\n",
+			bp->dev->dev_addr);
+}
+
+static bool bnx2x_get_dropless_info(struct bnx2x *bp)
+{
+	int tmp;
+	u32 cfg;
+
+	if (IS_VF(bp))
+		return false;
+
+	if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
+		/* Take function: tmp = func */
+		tmp = BP_ABS_FUNC(bp);
+		cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
+		cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
+	} else {
+		/* Take port: tmp = port */
+		tmp = BP_PORT(bp);
+		cfg = SHMEM_RD(bp,
+			       dev_info.port_hw_config[tmp].generic_features);
+		cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
+	}
+	return cfg;
+}
+
+static void validate_set_si_mode(struct bnx2x *bp)
+{
+	u8 func = BP_ABS_FUNC(bp);
+	u32 val;
+
+	val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+
+	/* check for legal mac (upper bytes) */
+	if (val != 0xffff) {
+		bp->mf_mode = MULTI_FUNCTION_SI;
+		bp->mf_config[BP_VN(bp)] =
+			MF_CFG_RD(bp, func_mf_config[func].config);
+	} else
+		BNX2X_DEV_INFO("illegal MAC address for SI\n");
+}
+
+static int bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+	int /*abs*/func = BP_ABS_FUNC(bp);
+	int vn, mfw_vn;
+	u32 val = 0, val2 = 0;
+	int rc = 0;
+
+	/* Validate that chip access is feasible */
+	if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
+		dev_err(&bp->pdev->dev,
+			"Chip read returns all Fs. Preventing probe from continuing\n");
+		return -EINVAL;
+	}
+
+	bnx2x_get_common_hwinfo(bp);
+
+	/*
+	 * initialize IGU parameters
+	 */
+	if (CHIP_IS_E1x(bp)) {
+		bp->common.int_block = INT_BLOCK_HC;
+
+		bp->igu_dsb_id = DEF_SB_IGU_ID;
+		bp->igu_base_sb = 0;
+	} else {
+		bp->common.int_block = INT_BLOCK_IGU;
+
+		/* do not allow device reset during IGU info processing */
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+			int tout = 5000;
+
+			BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+			val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+			REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+			REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+			while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+				tout--;
+				usleep_range(1000, 2000);
+			}
+
+			if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+				dev_err(&bp->pdev->dev,
+					"FORCING Normal Mode failed!!!\n");
+				bnx2x_release_hw_lock(bp,
+						      HW_LOCK_RESOURCE_RESET);
+				return -EPERM;
+			}
+		}
+
+		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+			BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
+			bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+		} else
+			BNX2X_DEV_INFO("IGU Normal Mode\n");
+
+		rc = bnx2x_get_igu_cam_info(bp);
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+		if (rc)
+			return rc;
+	}
+
+	/*
+	 * set base FW non-default (fast path) status block id, this value is
+	 * used to initialize the fw_sb_id saved on the fp/queue structure to
+	 * determine the id used by the FW.
+	 */
+	if (CHIP_IS_E1x(bp))
+		bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+	else /*
+	      * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+	      * the same queue are indicated on the same IGU SB). So we prefer
+	      * FW and IGU SBs to be the same value.
+	      */
+		bp->base_fw_ndsb = bp->igu_base_sb;
+
+	BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
+		       "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+		       bp->igu_sb_cnt, bp->base_fw_ndsb);
+
+	/*
+	 * Initialize MF configuration
+	 */
+
+	bp->mf_ov = 0;
+	bp->mf_mode = 0;
+	bp->mf_sub_mode = 0;
+	vn = BP_VN(bp);
+	mfw_vn = BP_FW_MB_IDX(bp);
+
+	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+			       bp->common.shmem2_base, SHMEM2_RD(bp, size),
+			      (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
+		if (SHMEM2_HAS(bp, mf_cfg_addr))
+			bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+		else
+			bp->common.mf_cfg_base = bp->common.shmem_base +
+				offsetof(struct shmem_region, func_mb) +
+				E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+		/*
+		 * get mf configuration:
+		 * 1. Existence of MF configuration
+		 * 2. MAC address must be legal (check only upper bytes)
+		 *    for  Switch-Independent mode;
+		 *    OVLAN must be legal for Switch-Dependent mode
+		 * 3. SF_MODE configures specific MF mode
+		 */
+		if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+			/* get mf configuration */
+			val = SHMEM_RD(bp,
+				       dev_info.shared_feature_config.config);
+			val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+			switch (val) {
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+				validate_set_si_mode(bp);
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+				if ((!CHIP_IS_E1x(bp)) &&
+				    (MF_CFG_RD(bp, func_mf_config[func].
+					       mac_upper) != 0xffff) &&
+				    (SHMEM2_HAS(bp,
+						afex_driver_support))) {
+					bp->mf_mode = MULTI_FUNCTION_AFEX;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						func_mf_config[func].config);
+				} else {
+					BNX2X_DEV_INFO("can not configure afex mode\n");
+				}
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+				/* get OV configuration */
+				val = MF_CFG_RD(bp,
+					func_mf_config[FUNC_0].e1hov_tag);
+				val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+				if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+					bp->mf_mode = MULTI_FUNCTION_SD;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						func_mf_config[func].config);
+				} else
+					BNX2X_DEV_INFO("illegal OV for SD\n");
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+				bp->mf_mode = MULTI_FUNCTION_SD;
+				bp->mf_sub_mode = SUB_MF_MODE_BD;
+				bp->mf_config[vn] =
+					MF_CFG_RD(bp,
+						  func_mf_config[func].config);
+
+				if (SHMEM2_HAS(bp, mtu_size)) {
+					int mtu_idx = BP_FW_MB_IDX(bp);
+					u16 mtu_size;
+					u32 mtu;
+
+					mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+					mtu_size = (u16)mtu;
+					DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+					   mtu_size, mtu);
+
+					/* if valid: update device mtu */
+					if (((mtu_size + ETH_HLEN) >=
+					     ETH_MIN_PACKET_SIZE) &&
+					    (mtu_size <=
+					     ETH_MAX_JUMBO_PACKET_SIZE))
+						bp->dev->mtu = mtu_size;
+				}
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
+				bp->mf_mode = MULTI_FUNCTION_SD;
+				bp->mf_sub_mode = SUB_MF_MODE_UFP;
+				bp->mf_config[vn] =
+					MF_CFG_RD(bp,
+						  func_mf_config[func].config);
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+				bp->mf_config[vn] = 0;
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
+				val2 = SHMEM_RD(bp,
+					dev_info.shared_hw_config.config_3);
+				val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
+				switch (val2) {
+				case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
+					validate_set_si_mode(bp);
+					bp->mf_sub_mode =
+							SUB_MF_MODE_NPAR1_DOT_5;
+					break;
+				default:
+					/* Unknown configuration */
+					bp->mf_config[vn] = 0;
+					BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
+						       val);
+				}
+				break;
+			default:
+				/* Unknown configuration: reset mf_config */
+				bp->mf_config[vn] = 0;
+				BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
+			}
+		}
+
+		BNX2X_DEV_INFO("%s function mode\n",
+			       IS_MF(bp) ? "multi" : "single");
+
+		switch (bp->mf_mode) {
+		case MULTI_FUNCTION_SD:
+			val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+			      FUNC_MF_CFG_E1HOV_TAG_MASK;
+			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+				bp->mf_ov = val;
+				bp->path_has_ovlan = true;
+
+				BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
+					       func, bp->mf_ov, bp->mf_ov);
+			} else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+				   (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
+				dev_err(&bp->pdev->dev,
+					"Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
+					func);
+				bp->path_has_ovlan = true;
+			} else {
+				dev_err(&bp->pdev->dev,
+					"No valid MF OV for func %d, aborting\n",
+					func);
+				return -EPERM;
+			}
+			break;
+		case MULTI_FUNCTION_AFEX:
+			BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
+			break;
+		case MULTI_FUNCTION_SI:
+			BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
+				       func);
+			break;
+		default:
+			if (vn) {
+				dev_err(&bp->pdev->dev,
+					"VN %d is in a single function mode, aborting\n",
+					vn);
+				return -EPERM;
+			}
+			break;
+		}
+
+		/* check if other port on the path needs ovlan:
+		 * Since MF configuration is shared between ports
+		 * Possible mixed modes are only
+		 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+		 */
+		if (CHIP_MODE_IS_4_PORT(bp) &&
+		    !bp->path_has_ovlan &&
+		    !IS_MF(bp) &&
+		    bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+			u8 other_port = !BP_PORT(bp);
+			u8 other_func = BP_PATH(bp) + 2*other_port;
+			val = MF_CFG_RD(bp,
+					func_mf_config[other_func].e1hov_tag);
+			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+				bp->path_has_ovlan = true;
+		}
+	}
+
+	/* adjust igu_sb_cnt to MF for E1H */
+	if (CHIP_IS_E1H(bp) && IS_MF(bp))
+		bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
+
+	/* port info */
+	bnx2x_get_port_hwinfo(bp);
+
+	/* Get MAC addresses */
+	bnx2x_get_mac_hwinfo(bp);
+
+	bnx2x_get_cnic_info(bp);
+
+	return rc;
+}
+
+static void bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+	int cnt, i, block_end, rodi;
+	char vpd_start[BNX2X_VPD_LEN+1];
+	char str_id_reg[VENDOR_ID_LEN+1];
+	char str_id_cap[VENDOR_ID_LEN+1];
+	char *vpd_data;
+	char *vpd_extended_data = NULL;
+	u8 len;
+
+	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
+	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+	if (cnt < BNX2X_VPD_LEN)
+		goto out_not_found;
+
+	/* VPD RO tag should be first tag after identifier string, hence
+	 * we should be able to find it in first BNX2X_VPD_LEN chars
+	 */
+	i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
+			     PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto out_not_found;
+
+	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+		    pci_vpd_lrdt_size(&vpd_start[i]);
+
+	i += PCI_VPD_LRDT_TAG_SIZE;
+
+	if (block_end > BNX2X_VPD_LEN) {
+		vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+		if (vpd_extended_data  == NULL)
+			goto out_not_found;
+
+		/* read rest of vpd image into vpd_extended_data */
+		memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+		cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+				   block_end - BNX2X_VPD_LEN,
+				   vpd_extended_data + BNX2X_VPD_LEN);
+		if (cnt < (block_end - BNX2X_VPD_LEN))
+			goto out_not_found;
+		vpd_data = vpd_extended_data;
+	} else
+		vpd_data = vpd_start;
+
+	/* now vpd_data holds full vpd content in both cases */
+
+	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+				   PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (rodi < 0)
+		goto out_not_found;
+
+	len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+	if (len != VENDOR_ID_LEN)
+		goto out_not_found;
+
+	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+	/* vendor specific info */
+	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+						PCI_VPD_RO_KEYWORD_VENDOR0);
+		if (rodi >= 0) {
+			len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+				memcpy(bp->fw_ver, &vpd_data[rodi], len);
+				bp->fw_ver[len] = ' ';
+			}
+		}
+		kfree(vpd_extended_data);
+		return;
+	}
+out_not_found:
+	kfree(vpd_extended_data);
+	return;
+}
+
+static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+	u32 flags = 0;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		SET_FLAGS(flags, MODE_FPGA);
+	else if (CHIP_REV_IS_EMUL(bp))
+		SET_FLAGS(flags, MODE_EMUL);
+	else
+		SET_FLAGS(flags, MODE_ASIC);
+
+	if (CHIP_MODE_IS_4_PORT(bp))
+		SET_FLAGS(flags, MODE_PORT4);
+	else
+		SET_FLAGS(flags, MODE_PORT2);
+
+	if (CHIP_IS_E2(bp))
+		SET_FLAGS(flags, MODE_E2);
+	else if (CHIP_IS_E3(bp)) {
+		SET_FLAGS(flags, MODE_E3);
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			SET_FLAGS(flags, MODE_E3_A0);
+		else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+			SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+	}
+
+	if (IS_MF(bp)) {
+		SET_FLAGS(flags, MODE_MF);
+		switch (bp->mf_mode) {
+		case MULTI_FUNCTION_SD:
+			SET_FLAGS(flags, MODE_MF_SD);
+			break;
+		case MULTI_FUNCTION_SI:
+			SET_FLAGS(flags, MODE_MF_SI);
+			break;
+		case MULTI_FUNCTION_AFEX:
+			SET_FLAGS(flags, MODE_MF_AFEX);
+			break;
+		}
+	} else
+		SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+	SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+	SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+	INIT_MODE_FLAGS(bp) = flags;
+}
+
+static int bnx2x_init_bp(struct bnx2x *bp)
+{
+	int func;
+	int rc;
+
+	mutex_init(&bp->port.phy_mutex);
+	mutex_init(&bp->fw_mb_mutex);
+	mutex_init(&bp->drv_info_mutex);
+	sema_init(&bp->stats_lock, 1);
+	bp->drv_info_mng_owner = false;
+	INIT_LIST_HEAD(&bp->vlan_reg);
+
+	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+	INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
+	if (IS_PF(bp)) {
+		rc = bnx2x_get_hwinfo(bp);
+		if (rc)
+			return rc;
+	} else {
+		eth_zero_addr(bp->dev->dev_addr);
+	}
+
+	bnx2x_set_modes_bitmap(bp);
+
+	rc = bnx2x_alloc_mem_bp(bp);
+	if (rc)
+		return rc;
+
+	bnx2x_read_fwinfo(bp);
+
+	func = BP_FUNC(bp);
+
+	/* need to reset chip if undi was active */
+	if (IS_PF(bp) && !BP_NOMCP(bp)) {
+		/* init fw_seq */
+		bp->fw_seq =
+			SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+							DRV_MSG_SEQ_NUMBER_MASK;
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+		rc = bnx2x_prev_unload(bp);
+		if (rc) {
+			bnx2x_free_mem_bp(bp);
+			return rc;
+		}
+	}
+
+	if (CHIP_REV_IS_FPGA(bp))
+		dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+	if (BP_NOMCP(bp) && (func == 0))
+		dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
+
+	bp->disable_tpa = disable_tpa;
+	bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
+	/* Reduce memory usage in kdump environment by disabling TPA */
+	bp->disable_tpa |= is_kdump_kernel();
+
+	/* Set TPA flags */
+	if (bp->disable_tpa) {
+		bp->dev->hw_features &= ~NETIF_F_LRO;
+		bp->dev->features &= ~NETIF_F_LRO;
+	}
+
+	if (CHIP_IS_E1(bp))
+		bp->dropless_fc = 0;
+	else
+		bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
+
+	bp->mrrs = mrrs;
+
+	bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
+	if (IS_VF(bp))
+		bp->rx_ring_size = MAX_RX_AVAIL;
+
+	/* make sure that the numbers are in the right granularity */
+	bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+	bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
+
+	bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
+
+	init_timer(&bp->timer);
+	bp->timer.expires = jiffies + bp->current_interval;
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = bnx2x_timer;
+
+	if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
+	    SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
+	    SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
+	    SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
+		bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+		bnx2x_dcbx_init_params(bp);
+	} else {
+		bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
+	}
+
+	if (CHIP_IS_E1x(bp))
+		bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+	else
+		bp->cnic_base_cl_id = FP_SB_MAX_E2;
+
+	/* multiple tx priority */
+	if (IS_VF(bp))
+		bp->max_cos = 1;
+	else if (CHIP_IS_E1x(bp))
+		bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+	else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+		bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+	else if (CHIP_IS_E3B0(bp))
+		bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+	else
+		BNX2X_ERR("unknown chip %x revision %x\n",
+			  CHIP_NUM(bp), CHIP_REV(bp));
+	BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
+
+	/* We need at least one default status block for slow-path events,
+	 * second status block for the L2 queue, and a third status block for
+	 * CNIC if supported.
+	 */
+	if (IS_VF(bp))
+		bp->min_msix_vec_cnt = 1;
+	else if (CNIC_SUPPORT(bp))
+		bp->min_msix_vec_cnt = 3;
+	else /* PF w/o cnic */
+		bp->min_msix_vec_cnt = 2;
+	BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
+
+	bp->dump_preset_idx = 1;
+
+	if (CHIP_IS_E3B0(bp))
+		bp->flags |= PTP_SUPPORTED;
+
+	return rc;
+}
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/*
+ * net_device service functions
+ */
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	bp->stats_init = true;
+
+	netif_carrier_off(dev);
+
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	/* If parity had happen during the unload, then attentions
+	 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+	 * want the first function loaded on the current engine to
+	 * complete the recovery.
+	 * Parity recovery is only relevant for PF driver.
+	 */
+	if (IS_PF(bp)) {
+		int other_engine = BP_PATH(bp) ? 0 : 1;
+		bool other_load_status, load_status;
+		bool global = false;
+
+		other_load_status = bnx2x_get_load_status(bp, other_engine);
+		load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+		if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+		    bnx2x_chk_parity_attn(bp, &global, true)) {
+			do {
+				/* If there are attentions and they are in a
+				 * global blocks, set the GLOBAL_RESET bit
+				 * regardless whether it will be this function
+				 * that will complete the recovery or not.
+				 */
+				if (global)
+					bnx2x_set_reset_global(bp);
+
+				/* Only the first function on the current
+				 * engine should try to recover in open. In case
+				 * of attentions in global blocks only the first
+				 * in the chip should try to recover.
+				 */
+				if ((!load_status &&
+				     (!global || !other_load_status)) &&
+				      bnx2x_trylock_leader_lock(bp) &&
+				      !bnx2x_leader_reset(bp)) {
+					netdev_info(bp->dev,
+						    "Recovered in open\n");
+					break;
+				}
+
+				/* recovery has failed... */
+				bnx2x_set_power_state(bp, PCI_D3hot);
+				bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+				BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+					  "If you still see this message after a few retries then power cycle is required.\n");
+
+				return -EAGAIN;
+			} while (0);
+		}
+	}
+
+	bp->recovery_state = BNX2X_RECOVERY_DONE;
+	rc = bnx2x_nic_load(bp, LOAD_OPEN);
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+	if (IS_PF(bp))
+		vxlan_get_rx_port(dev);
+#endif
+
+	return 0;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Unload the driver, release IRQs */
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
+
+	return 0;
+}
+
+static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p)
+{
+	int mc_count = netdev_mc_count(bp->dev);
+	struct bnx2x_mcast_list_elem *mc_mac =
+		kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
+	struct netdev_hw_addr *ha;
+
+	if (!mc_mac)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&p->mcast_list);
+
+	netdev_for_each_mc_addr(ha, bp->dev) {
+		mc_mac->mac = bnx2x_mc_addr(ha);
+		list_add_tail(&mc_mac->link, &p->mcast_list);
+		mc_mac++;
+	}
+
+	p->mcast_list_len = mc_count;
+
+	return 0;
+}
+
+static void bnx2x_free_mcast_macs_list(
+	struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_mcast_list_elem *mc_mac =
+		list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+				 link);
+
+	WARN_ON(!mc_mac);
+	kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+	int rc;
+	struct net_device *dev = bp->dev;
+	struct netdev_hw_addr *ha;
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
+	unsigned long ramrod_flags = 0;
+
+	/* First schedule a cleanup up of old configuration */
+	rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+	if (rc < 0) {
+		BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+		return rc;
+	}
+
+	netdev_for_each_uc_addr(ha, dev) {
+		rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+				       BNX2X_UC_LIST_MAC, &ramrod_flags);
+		if (rc == -EEXIST) {
+			DP(BNX2X_MSG_SP,
+			   "Failed to schedule ADD operations: %d\n", rc);
+			/* do not treat adding same MAC as error */
+			rc = 0;
+
+		} else if (rc < 0) {
+
+			BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+				  rc);
+			return rc;
+		}
+	}
+
+	/* Execute the pending commands */
+	__set_bit(RAMROD_CONT, &ramrod_flags);
+	return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+				 BNX2X_UC_LIST_MAC, &ramrod_flags);
+}
+
+static int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct bnx2x_mcast_ramrod_params rparam = {NULL};
+	int rc = 0;
+
+	rparam.mcast_obj = &bp->mcast_obj;
+
+	/* first, clear all configured multicast MACs */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0) {
+		BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
+		return rc;
+	}
+
+	/* then, configure a new MACs list */
+	if (netdev_mc_count(dev)) {
+		rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+		if (rc) {
+			BNX2X_ERR("Failed to create multicast MACs list: %d\n",
+				  rc);
+			return rc;
+		}
+
+		/* Now add the new MACs */
+		rc = bnx2x_config_mcast(bp, &rparam,
+					BNX2X_MCAST_CMD_ADD);
+		if (rc < 0)
+			BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+				  rc);
+
+		bnx2x_free_mcast_macs_list(&rparam);
+	}
+
+	return rc;
+}
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
+static void bnx2x_set_rx_mode(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->state != BNX2X_STATE_OPEN) {
+		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+		return;
+	} else {
+		/* Schedule an SP task to handle rest of change */
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+				       NETIF_MSG_IFUP);
+	}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
+
+	netif_addr_lock_bh(bp->dev);
+
+	if (bp->dev->flags & IFF_PROMISC) {
+		rx_mode = BNX2X_RX_MODE_PROMISC;
+	} else if ((bp->dev->flags & IFF_ALLMULTI) ||
+		   ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+		    CHIP_IS_E1(bp))) {
+		rx_mode = BNX2X_RX_MODE_ALLMULTI;
+	} else {
+		if (IS_PF(bp)) {
+			/* some multicasts */
+			if (bnx2x_set_mc_list(bp) < 0)
+				rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+			/* release bh lock, as bnx2x_set_uc_list might sleep */
+			netif_addr_unlock_bh(bp->dev);
+			if (bnx2x_set_uc_list(bp) < 0)
+				rx_mode = BNX2X_RX_MODE_PROMISC;
+			netif_addr_lock_bh(bp->dev);
+		} else {
+			/* configuring mcast to a vf involves sleeping (when we
+			 * wait for the pf's response).
+			 */
+			bnx2x_schedule_sp_rtnl(bp,
+					       BNX2X_SP_RTNL_VFPF_MCAST, 0);
+		}
+	}
+
+	bp->rx_mode = rx_mode;
+	/* handle ISCSI SD mode */
+	if (IS_MF_ISCSI_ONLY(bp))
+		bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+	/* Schedule the rx_mode command */
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+		netif_addr_unlock_bh(bp->dev);
+		return;
+	}
+
+	if (IS_PF(bp)) {
+		bnx2x_set_storm_rx_mode(bp);
+		netif_addr_unlock_bh(bp->dev);
+	} else {
+		/* VF will need to request the PF to make this change, and so
+		 * the VF needs to release the bottom-half lock prior to the
+		 * request (as it will likely require sleep on the VF side)
+		 */
+		netif_addr_unlock_bh(bp->dev);
+		bnx2x_vfpf_storm_rx_mode(bp);
+	}
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+			   int devad, u16 addr)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 value;
+	int rc;
+
+	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+	   prtad, devad, addr);
+
+	/* The HW expects different devad if CL22 is used */
+	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
+	bnx2x_release_phy_lock(bp);
+	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+	if (!rc)
+		rc = value;
+	return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+			    u16 addr, u16 value)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	int rc;
+
+	DP(NETIF_MSG_LINK,
+	   "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
+	   prtad, devad, addr, value);
+
+	/* The HW expects different devad if CL22 is used */
+	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
+	bnx2x_release_phy_lock(bp);
+	return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct mii_ioctl_data *mdio = if_mii(ifr);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return bnx2x_hwtstamp_ioctl(bp, ifr);
+	default:
+		DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+		   mdio->phy_id, mdio->reg_num, mdio->val_in);
+		return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+	}
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_bnx2x(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+	}
+}
+#endif
+
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* query the bulletin board for mac address configured by the PF */
+	if (IS_VF(bp))
+		bnx2x_sample_bulletin(bp);
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		BNX2X_ERR("Non-valid Ethernet address\n");
+		return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+				  struct netdev_phys_item_id *ppid)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	if (!(bp->flags & HAS_PHYS_PORT_ID))
+		return -EOPNOTSUPP;
+
+	ppid->id_len = sizeof(bp->phys_port_id);
+	memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+	return 0;
+}
+
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+					      struct net_device *dev,
+					      netdev_features_t features)
+{
+	features = vlan_features_check(skb, features);
+	return vxlan_features_check(skb, features);
+}
+
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+	int rc;
+
+	if (IS_PF(bp)) {
+		unsigned long ramrod_flags = 0;
+
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+		rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+					add, &ramrod_flags);
+	} else {
+		rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+	}
+
+	return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!bp->vlan_cnt) {
+		DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+		return 0;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link) {
+		/* Prepare for cleanup in case of errors */
+		if (rc) {
+			vlan->hw = false;
+			continue;
+		}
+
+		if (!vlan->hw)
+			continue;
+
+		DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+		rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+		if (rc) {
+			BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+			vlan->hw = false;
+			rc = -EINVAL;
+			continue;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	bool hw = false;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+	vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
+		return -ENOMEM;
+
+	bp->vlan_cnt++;
+	if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+		DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+		bp->accept_any_vlan = true;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	} else if (bp->vlan_cnt <= bp->vlan_credit) {
+		rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+		hw = true;
+	}
+
+	vlan->vid = vid;
+	vlan->hw = hw;
+
+	if (!rc) {
+		list_add(&vlan->link, &bp->vlan_reg);
+	} else {
+		bp->vlan_cnt--;
+		kfree(vlan);
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+	if (!bp->vlan_cnt) {
+		BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		if (vlan->vid == vid)
+			break;
+
+	if (vlan->vid != vid) {
+		BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+		return -EINVAL;
+	}
+
+	if (vlan->hw)
+		rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+	list_del(&vlan->link);
+	kfree(vlan);
+
+	bp->vlan_cnt--;
+
+	if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+		/* Configure all non-configured entries */
+		list_for_each_entry(vlan, &bp->vlan_reg, link) {
+			if (vlan->hw)
+				continue;
+
+			rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+			if (rc) {
+				BNX2X_ERR("Unable to config VLAN %d\n",
+					  vlan->vid);
+				continue;
+			}
+			DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+			   vlan->vid);
+			vlan->hw = true;
+		}
+		DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+		bp->accept_any_vlan = false;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+	return rc;
+}
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+	.ndo_open		= bnx2x_open,
+	.ndo_stop		= bnx2x_close,
+	.ndo_start_xmit		= bnx2x_start_xmit,
+	.ndo_select_queue	= bnx2x_select_queue,
+	.ndo_set_rx_mode	= bnx2x_set_rx_mode,
+	.ndo_set_mac_address	= bnx2x_change_mac_addr,
+	.ndo_validate_addr	= bnx2x_validate_addr,
+	.ndo_do_ioctl		= bnx2x_ioctl,
+	.ndo_change_mtu		= bnx2x_change_mtu,
+	.ndo_fix_features	= bnx2x_fix_features,
+	.ndo_set_features	= bnx2x_set_features,
+	.ndo_tx_timeout		= bnx2x_tx_timeout,
+	.ndo_vlan_rx_add_vid	= bnx2x_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= bnx2x_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= poll_bnx2x,
+#endif
+	.ndo_setup_tc		= bnx2x_setup_tc,
+#ifdef CONFIG_BNX2X_SRIOV
+	.ndo_set_vf_mac		= bnx2x_set_vf_mac,
+	.ndo_set_vf_vlan	= bnx2x_set_vf_vlan,
+	.ndo_get_vf_config	= bnx2x_get_vf_config,
+#endif
+#ifdef NETDEV_FCOE_WWNN
+	.ndo_fcoe_get_wwn	= bnx2x_fcoe_get_wwn,
+#endif
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= bnx2x_low_latency_recv,
+#endif
+	.ndo_get_phys_port_id	= bnx2x_get_phys_port_id,
+	.ndo_set_vf_link_state	= bnx2x_set_vf_link_state,
+	.ndo_features_check	= bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+	.ndo_add_vxlan_port	= bnx2x_add_vxlan_port,
+	.ndo_del_vxlan_port	= bnx2x_del_vxlan_port,
+#endif
+};
+
+static int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+	struct device *dev = &bp->pdev->dev;
+
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
+		dev_err(dev, "System does not support DMA, aborting\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+	if (bp->flags & AER_ENABLED) {
+		pci_disable_pcie_error_reporting(bp->pdev);
+		bp->flags &= ~AER_ENABLED;
+	}
+}
+
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+			  struct net_device *dev, unsigned long board_type)
+{
+	int rc;
+	u32 pci_cfg_dword;
+	bool chip_is_e1x = (board_type == BCM57710 ||
+			    board_type == BCM57711 ||
+			    board_type == BCM57711E);
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&bp->pdev->dev,
+			"Cannot enable PCI device, aborting\n");
+		goto err_out;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&bp->pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+		dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
+	if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
+	    PCICFG_REVESION_ID_ERROR_VAL) {
+		pr_err("PCI device error, probably due to fan failure, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (atomic_read(&pdev->enable_cnt) == 1) {
+		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+		if (rc) {
+			dev_err(&bp->pdev->dev,
+				"Cannot obtain PCI resources, aborting\n");
+			goto err_out_disable;
+		}
+
+		pci_set_master(pdev);
+		pci_save_state(pdev);
+	}
+
+	if (IS_PF(bp)) {
+		if (!pdev->pm_cap) {
+			dev_err(&bp->pdev->dev,
+				"Cannot find power management capability, aborting\n");
+			rc = -EIO;
+			goto err_out_release;
+		}
+	}
+
+	if (!pci_is_pcie(pdev)) {
+		dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	rc = bnx2x_set_coherency_mask(bp);
+	if (rc)
+		goto err_out_release;
+
+	dev->mem_start = pci_resource_start(pdev, 0);
+	dev->base_addr = dev->mem_start;
+	dev->mem_end = pci_resource_end(pdev, 0);
+
+	dev->irq = pdev->irq;
+
+	bp->regview = pci_ioremap_bar(pdev, 0);
+	if (!bp->regview) {
+		dev_err(&bp->pdev->dev,
+			"Cannot map register space, aborting\n");
+		rc = -ENOMEM;
+		goto err_out_release;
+	}
+
+	/* In E1/E1H use pci device function given by kernel.
+	 * In E2/E3 read physical function from ME register since these chips
+	 * support Physical Device Assignment where kernel BDF maybe arbitrary
+	 * (depending on hypervisor).
+	 */
+	if (chip_is_e1x) {
+		bp->pf_num = PCI_FUNC(pdev->devfn);
+	} else {
+		/* chip is E2/3*/
+		pci_read_config_dword(bp->pdev,
+				      PCICFG_ME_REGISTER, &pci_cfg_dword);
+		bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
+				  ME_REG_ABS_PF_NUM_SHIFT);
+	}
+	BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
+
+	/* clean indirect addresses */
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+
+	/* Set PCIe reset type to fundamental for EEH recovery */
+	pdev->needs_freset = 1;
+
+	/* AER (Advanced Error reporting) configuration */
+	rc = pci_enable_pcie_error_reporting(pdev);
+	if (!rc)
+		bp->flags |= AER_ENABLED;
+	else
+		BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
+	/*
+	 * Clean the following indirect addresses for all functions since it
+	 * is not used by the driver.
+	 */
+	if (IS_PF(bp)) {
+		REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+		REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+		REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+		REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+		if (chip_is_e1x) {
+			REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+			REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+			REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+			REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+		}
+
+		/* Enable internal target-read (in case we are probed after PF
+		 * FLR). Must be done prior to any BAR read access. Only for
+		 * 57712 and up
+		 */
+		if (!chip_is_e1x)
+			REG_WR(bp,
+			       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+	}
+
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	dev->netdev_ops = &bnx2x_netdev_ops;
+	bnx2x_set_ethtool_ops(bp, dev);
+
+	dev->priv_flags |= IFF_UNICAST_FLT;
+
+	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+	if (!chip_is_e1x) {
+		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+				    NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+		dev->hw_enc_features =
+			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+			NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+			NETIF_F_GSO_IPIP |
+			NETIF_F_GSO_SIT |
+			NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+	}
+
+	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+	/* VF with OLD Hypervisor or old PF do not support filtering */
+	if (IS_PF(bp)) {
+		if (chip_is_e1x)
+			bp->accept_any_vlan = true;
+		else
+			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+	} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+	}
+
+	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
+	dev->features |= NETIF_F_HIGHDMA;
+
+	/* Add Loopback capability to the device */
+	dev->hw_features |= NETIF_F_LOOPBACK;
+
+#ifdef BCM_DCBNL
+	dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
+	/* get_port_hwinfo() will set prtad and mmds properly */
+	bp->mdio.prtad = MDIO_PRTAD_NONE;
+	bp->mdio.mmds = 0;
+	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	bp->mdio.dev = dev;
+	bp->mdio.mdio_read = bnx2x_mdio_read;
+	bp->mdio.mdio_write = bnx2x_mdio_write;
+
+	return 0;
+
+err_out_release:
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+err_out_disable:
+	pci_disable_device(pdev);
+
+err_out:
+	return rc;
+}
+
+static int bnx2x_check_firmware(struct bnx2x *bp)
+{
+	const struct firmware *firmware = bp->firmware;
+	struct bnx2x_fw_file_hdr *fw_hdr;
+	struct bnx2x_fw_file_section *sections;
+	u32 offset, len, num_ops;
+	__be16 *ops_offsets;
+	int i;
+	const u8 *fw_ver;
+
+	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
+		BNX2X_ERR("Wrong FW size\n");
+		return -EINVAL;
+	}
+
+	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+	sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+	/* Make sure none of the offsets and sizes make us read beyond
+	 * the end of the firmware data */
+	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+		offset = be32_to_cpu(sections[i].offset);
+		len = be32_to_cpu(sections[i].len);
+		if (offset + len > firmware->size) {
+			BNX2X_ERR("Section %d length is out of bounds\n", i);
+			return -EINVAL;
+		}
+	}
+
+	/* Likewise for the init_ops offsets */
+	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+	ops_offsets = (__force __be16 *)(firmware->data + offset);
+	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+			BNX2X_ERR("Section offset %d is out of bounds\n", i);
+			return -EINVAL;
+		}
+	}
+
+	/* Check FW version */
+	offset = be32_to_cpu(fw_hdr->fw_version.offset);
+	fw_ver = firmware->data + offset;
+	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+		BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+		       fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+		       BCM_5710_FW_MAJOR_VERSION,
+		       BCM_5710_FW_MINOR_VERSION,
+		       BCM_5710_FW_REVISION_VERSION,
+		       BCM_5710_FW_ENGINEERING_VERSION);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	u32 *target = (u32 *)_target;
+	u32 i;
+
+	for (i = 0; i < n/4; i++)
+		target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+   Ops array is stored in the following format:
+   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	struct raw_op *target = (struct raw_op *)_target;
+	u32 i, j, tmp;
+
+	for (i = 0, j = 0; i < n/8; i++, j += 2) {
+		tmp = be32_to_cpu(source[j]);
+		target[i].op = (tmp >> 24) & 0xff;
+		target[i].offset = tmp & 0xffffff;
+		target[i].raw_data = be32_to_cpu(source[j + 1]);
+	}
+}
+
+/* IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	struct iro *target = (struct iro *)_target;
+	u32 i, j, tmp;
+
+	for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+		target[i].base = be32_to_cpu(source[j]);
+		j++;
+		tmp = be32_to_cpu(source[j]);
+		target[i].m1 = (tmp >> 16) & 0xffff;
+		target[i].m2 = tmp & 0xffff;
+		j++;
+		tmp = be32_to_cpu(source[j]);
+		target[i].m3 = (tmp >> 16) & 0xffff;
+		target[i].size = tmp & 0xffff;
+		j++;
+	}
+}
+
+static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be16 *source = (const __be16 *)_source;
+	u16 *target = (u16 *)_target;
+	u32 i;
+
+	for (i = 0; i < n/2; i++)
+		target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
+do {									\
+	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
+	bp->arr = kmalloc(len, GFP_KERNEL);				\
+	if (!bp->arr)							\
+		goto lbl;						\
+	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
+	     (u8 *)bp->arr, len);					\
+} while (0)
+
+static int bnx2x_init_firmware(struct bnx2x *bp)
+{
+	const char *fw_file_name;
+	struct bnx2x_fw_file_hdr *fw_hdr;
+	int rc;
+
+	if (bp->firmware)
+		return 0;
+
+	if (CHIP_IS_E1(bp))
+		fw_file_name = FW_FILE_NAME_E1;
+	else if (CHIP_IS_E1H(bp))
+		fw_file_name = FW_FILE_NAME_E1H;
+	else if (!CHIP_IS_E1x(bp))
+		fw_file_name = FW_FILE_NAME_E2;
+	else {
+		BNX2X_ERR("Unsupported chip revision\n");
+		return -EINVAL;
+	}
+	BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+
+	rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+	if (rc) {
+		BNX2X_ERR("Can't load firmware file %s\n",
+			  fw_file_name);
+		goto request_firmware_exit;
+	}
+
+	rc = bnx2x_check_firmware(bp);
+	if (rc) {
+		BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+		goto request_firmware_exit;
+	}
+
+	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+	/* Initialize the pointers to the init arrays */
+	/* Blob */
+	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+	/* Opcodes */
+	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+	/* Offsets */
+	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+			    be16_to_cpu_n);
+
+	/* STORMs firmware */
+	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->usem_pram_data.offset);
+	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->csem_pram_data.offset);
+	/* IRO */
+	BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
+
+	return 0;
+
+iro_alloc_err:
+	kfree(bp->init_ops_offsets);
+init_offsets_alloc_err:
+	kfree(bp->init_ops);
+init_ops_alloc_err:
+	kfree(bp->init_data);
+request_firmware_exit:
+	release_firmware(bp->firmware);
+	bp->firmware = NULL;
+
+	return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+	kfree(bp->init_ops_offsets);
+	kfree(bp->init_ops);
+	kfree(bp->init_data);
+	release_firmware(bp->firmware);
+	bp->firmware = NULL;
+}
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+	.init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+	.init_hw_cmn      = bnx2x_init_hw_common,
+	.init_hw_port     = bnx2x_init_hw_port,
+	.init_hw_func     = bnx2x_init_hw_func,
+
+	.reset_hw_cmn     = bnx2x_reset_common,
+	.reset_hw_port    = bnx2x_reset_port,
+	.reset_hw_func    = bnx2x_reset_func,
+
+	.gunzip_init      = bnx2x_gunzip_init,
+	.gunzip_end       = bnx2x_gunzip_end,
+
+	.init_fw          = bnx2x_init_firmware,
+	.release_fw       = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+	/* Prepare DMAE related driver resources */
+	bnx2x_setup_dmae(bp);
+
+	bnx2x_init_func_obj(bp, &bp->func_obj,
+			    bnx2x_sp(bp, func_rdata),
+			    bnx2x_sp_mapping(bp, func_rdata),
+			    bnx2x_sp(bp, func_afex_rdata),
+			    bnx2x_sp_mapping(bp, func_afex_rdata),
+			    &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+	int cid_count = BNX2X_L2_MAX_CID(bp);
+
+	if (IS_SRIOV(bp))
+		cid_count += BNX2X_VF_CIDS;
+
+	if (CNIC_SUPPORT(bp))
+		cid_count += CNIC_CID_MAX;
+
+	return roundup(cid_count, QM_CID_ROUND);
+}
+
+/**
+ * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ *
+ * @dev:	pci device
+ *
+ */
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
+{
+	int index;
+	u16 control = 0;
+
+	/*
+	 * If MSI-X is not supported - return number of SBs needed to support
+	 * one fast path queue: one FP queue + SB for CNIC
+	 */
+	if (!pdev->msix_cap) {
+		dev_info(&pdev->dev, "no msix capability found\n");
+		return 1 + cnic_cnt;
+	}
+	dev_info(&pdev->dev, "msix capability found\n");
+
+	/*
+	 * The value in the PCI configuration space is the index of the last
+	 * entry, namely one less than the actual size of the table, which is
+	 * exactly what we want to return from this function: number of all SBs
+	 * without the default SB.
+	 * For VFs there is no default SB, then we return (index+1).
+	 */
+	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
+
+	index = control & PCI_MSIX_FLAGS_QSIZE;
+
+	return index;
+}
+
+static int set_max_cos_est(int chip_id)
+{
+	switch (chip_id) {
+	case BCM57710:
+	case BCM57711:
+	case BCM57711E:
+		return BNX2X_MULTI_TX_COS_E1X;
+	case BCM57712:
+	case BCM57712_MF:
+		return BNX2X_MULTI_TX_COS_E2_E3A0;
+	case BCM57800:
+	case BCM57800_MF:
+	case BCM57810:
+	case BCM57810_MF:
+	case BCM57840_4_10:
+	case BCM57840_2_20:
+	case BCM57840_O:
+	case BCM57840_MFO:
+	case BCM57840_MF:
+	case BCM57811:
+	case BCM57811_MF:
+		return BNX2X_MULTI_TX_COS_E3B0;
+	case BCM57712_VF:
+	case BCM57800_VF:
+	case BCM57810_VF:
+	case BCM57840_VF:
+	case BCM57811_VF:
+		return 1;
+	default:
+		pr_err("Unknown board_type (%d), aborting\n", chip_id);
+		return -ENODEV;
+	}
+}
+
+static int set_is_vf(int chip_id)
+{
+	switch (chip_id) {
+	case BCM57712_VF:
+	case BCM57800_VF:
+	case BCM57810_VF:
+	case BCM57840_VF:
+	case BCM57811_VF:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/* nig_tsgen registers relative address */
+#define tsgen_ctrl 0x0
+#define tsgen_freecount 0x10
+#define tsgen_synctime_t0 0x20
+#define tsgen_offset_t0 0x28
+#define tsgen_drift_t0 0x30
+#define tsgen_synctime_t1 0x58
+#define tsgen_offset_t1 0x60
+#define tsgen_drift_t1 0x68
+
+/* FW workaround for setting drift */
+static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
+					  int best_val, int best_period)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_set_timesync_params *set_timesync_params =
+		&func_params.params.set_timesync;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+	/* Function parameters */
+	set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
+	set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+	set_timesync_params->add_sub_drift_adjust_value =
+		drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
+	set_timesync_params->drift_adjust_value = best_val;
+	set_timesync_params->drift_adjust_period = best_period;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	int rc;
+	int drift_dir = 1;
+	int val, period, period1, period2, dif, dif1, dif2;
+	int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+
+	DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP adjfreq called while the interface is down\n");
+		return -EFAULT;
+	}
+
+	if (ppb < 0) {
+		ppb = -ppb;
+		drift_dir = 0;
+	}
+
+	if (ppb == 0) {
+		best_val = 1;
+		best_period = 0x1FFFFFF;
+	} else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
+		best_val = 31;
+		best_period = 1;
+	} else {
+		/* Changed not to allow val = 8, 16, 24 as these values
+		 * are not supported in workaround.
+		 */
+		for (val = 0; val <= 31; val++) {
+			if ((val & 0x7) == 0)
+				continue;
+			period1 = val * 1000000 / ppb;
+			period2 = period1 + 1;
+			if (period1 != 0)
+				dif1 = ppb - (val * 1000000 / period1);
+			else
+				dif1 = BNX2X_MAX_PHC_DRIFT;
+			if (dif1 < 0)
+				dif1 = -dif1;
+			dif2 = ppb - (val * 1000000 / period2);
+			if (dif2 < 0)
+				dif2 = -dif2;
+			dif = (dif1 < dif2) ? dif1 : dif2;
+			period = (dif1 < dif2) ? period1 : period2;
+			if (dif < best_dif) {
+				best_dif = dif;
+				best_val = val;
+				best_period = period;
+			}
+		}
+	}
+
+	rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
+					    best_period);
+	if (rc) {
+		BNX2X_ERR("Failed to set drift\n");
+		return -EFAULT;
+	}
+
+	DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
+	   best_period);
+
+	return 0;
+}
+
+static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+	DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
+
+	timecounter_adjtime(&bp->timecounter, delta);
+
+	return 0;
+}
+
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	u64 ns;
+
+	ns = timecounter_read(&bp->timecounter);
+
+	DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
+			     const struct timespec64 *ts)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	u64 ns;
+
+	ns = timespec64_to_ns(ts);
+
+	DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
+
+	/* Re-init the timecounter */
+	timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
+
+	return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+	BNX2X_ERR("PHC ancillary features are not supported\n");
+	return -ENOTSUPP;
+}
+
+static void bnx2x_register_phc(struct bnx2x *bp)
+{
+	/* Fill the ptp_clock_info struct and register PTP clock*/
+	bp->ptp_clock_info.owner = THIS_MODULE;
+	snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
+	bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
+	bp->ptp_clock_info.n_alarm = 0;
+	bp->ptp_clock_info.n_ext_ts = 0;
+	bp->ptp_clock_info.n_per_out = 0;
+	bp->ptp_clock_info.pps = 0;
+	bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+	bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
+	bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
+	bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
+	bp->ptp_clock_info.enable = bnx2x_ptp_enable;
+
+	bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
+	if (IS_ERR(bp->ptp_clock)) {
+		bp->ptp_clock = NULL;
+		BNX2X_ERR("PTP clock registeration failed\n");
+	}
+}
+
+static int bnx2x_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	struct net_device *dev = NULL;
+	struct bnx2x *bp;
+	enum pcie_link_width pcie_width;
+	enum pci_bus_speed pcie_speed;
+	int rc, max_non_def_sbs;
+	int rx_count, tx_count, rss_count, doorbell_size;
+	int max_cos_est;
+	bool is_vf;
+	int cnic_cnt;
+
+	/* Management FW 'remembers' living interfaces. Allow it some time
+	 * to forget previously living interfaces, allowing a proper re-load.
+	 */
+	if (is_kdump_kernel()) {
+		ktime_t now = ktime_get_boottime();
+		ktime_t fw_ready_time = ktime_set(5, 0);
+
+		if (ktime_before(now, fw_ready_time))
+			msleep(ktime_ms_delta(fw_ready_time, now));
+	}
+
+	/* An estimated maximum supported CoS number according to the chip
+	 * version.
+	 * We will try to roughly estimate the maximum number of CoSes this chip
+	 * may support in order to minimize the memory allocated for Tx
+	 * netdev_queue's. This number will be accurately calculated during the
+	 * initialization of bp->max_cos based on the chip versions AND chip
+	 * revision in the bnx2x_init_bp().
+	 */
+	max_cos_est = set_max_cos_est(ent->driver_data);
+	if (max_cos_est < 0)
+		return max_cos_est;
+	is_vf = set_is_vf(ent->driver_data);
+	cnic_cnt = is_vf ? 0 : 1;
+
+	max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+
+	/* add another SB for VF as it has no default SB */
+	max_non_def_sbs += is_vf ? 1 : 0;
+
+	/* Maximum number of RSS queues: one IGU SB goes to CNIC */
+	rss_count = max_non_def_sbs - cnic_cnt;
+
+	if (rss_count < 1)
+		return -EINVAL;
+
+	/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+	rx_count = rss_count + cnic_cnt;
+
+	/* Maximum number of netdev Tx queues:
+	 * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
+	 */
+	tx_count = rss_count * max_cos_est + cnic_cnt;
+
+	/* dev zeroed in init_etherdev */
+	dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
+	if (!dev)
+		return -ENOMEM;
+
+	bp = netdev_priv(dev);
+
+	bp->flags = 0;
+	if (is_vf)
+		bp->flags |= IS_VF_FLAG;
+
+	bp->igu_sb_cnt = max_non_def_sbs;
+	bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
+	bp->msg_enable = debug;
+	bp->cnic_support = cnic_cnt;
+	bp->cnic_probe = bnx2x_cnic_probe;
+
+	pci_set_drvdata(pdev, dev);
+
+	rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
+	if (rc < 0) {
+		free_netdev(dev);
+		return rc;
+	}
+
+	BNX2X_DEV_INFO("This is a %s function\n",
+		       IS_PF(bp) ? "physical" : "virtual");
+	BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
+	BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
+	BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+		       tx_count, rx_count);
+
+	rc = bnx2x_init_bp(bp);
+	if (rc)
+		goto init_one_exit;
+
+	/* Map doorbells here as we need the real value of bp->max_cos which
+	 * is initialized in bnx2x_init_bp() to determine the number of
+	 * l2 connections.
+	 */
+	if (IS_VF(bp)) {
+		bp->doorbells = bnx2x_vf_doorbells(bp);
+		rc = bnx2x_vf_pci_alloc(bp);
+		if (rc)
+			goto init_one_exit;
+	} else {
+		doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+		if (doorbell_size > pci_resource_len(pdev, 2)) {
+			dev_err(&bp->pdev->dev,
+				"Cannot map doorbells, bar size too small, aborting\n");
+			rc = -ENOMEM;
+			goto init_one_exit;
+		}
+		bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+						doorbell_size);
+	}
+	if (!bp->doorbells) {
+		dev_err(&bp->pdev->dev,
+			"Cannot map doorbell space, aborting\n");
+		rc = -ENOMEM;
+		goto init_one_exit;
+	}
+
+	if (IS_VF(bp)) {
+		rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+		if (rc)
+			goto init_one_exit;
+	}
+
+	/* Enable SRIOV if capability found in configuration space */
+	rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
+	if (rc)
+		goto init_one_exit;
+
+	/* calc qm_cid_count */
+	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+	BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
+
+	/* disable FCOE L2 queue for E1x*/
+	if (CHIP_IS_E1x(bp))
+		bp->flags |= NO_FCOE_FLAG;
+
+	/* Set bp->num_queues for MSI-X mode*/
+	bnx2x_set_num_queues(bp);
+
+	/* Configure interrupt mode: try to enable MSI-X/MSI if
+	 * needed.
+	 */
+	rc = bnx2x_set_int_mode(bp);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot set interrupts\n");
+		goto init_one_exit;
+	}
+	BNX2X_DEV_INFO("set interrupts successfully\n");
+
+	/* register the net device */
+	rc = register_netdev(dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot register net device\n");
+		goto init_one_exit;
+	}
+	BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
+
+	if (!NO_FCOE(bp)) {
+		/* Add storage MAC address */
+		rtnl_lock();
+		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+		rtnl_unlock();
+	}
+	if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+	    pcie_speed == PCI_SPEED_UNKNOWN ||
+	    pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+		BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+	else
+		BNX2X_DEV_INFO(
+		       "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+		       board_info[ent->driver_data].name,
+		       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+		       pcie_width,
+		       pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+		       pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+		       pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
+		       "Unknown",
+		       dev->base_addr, bp->pdev->irq, dev->dev_addr);
+
+	bnx2x_register_phc(bp);
+
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
+	return 0;
+
+init_one_exit:
+	bnx2x_disable_pcie_error_reporting(bp);
+
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	if (IS_PF(bp) && bp->doorbells)
+		iounmap(bp->doorbells);
+
+	free_netdev(dev);
+
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+
+	return rc;
+}
+
+static void __bnx2x_remove(struct pci_dev *pdev,
+			   struct net_device *dev,
+			   struct bnx2x *bp,
+			   bool remove_netdev)
+{
+	if (bp->ptp_clock) {
+		ptp_clock_unregister(bp->ptp_clock);
+		bp->ptp_clock = NULL;
+	}
+
+	/* Delete storage MAC address */
+	if (!NO_FCOE(bp)) {
+		rtnl_lock();
+		dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+		rtnl_unlock();
+	}
+
+#ifdef BCM_DCBNL
+	/* Delete app tlvs from dcbnl */
+	bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
+	if (IS_PF(bp) &&
+	    !BP_NOMCP(bp) &&
+	    (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+		bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
+	/* Close the interface - either directly or implicitly */
+	if (remove_netdev) {
+		unregister_netdev(dev);
+	} else {
+		rtnl_lock();
+		dev_close(dev);
+		rtnl_unlock();
+	}
+
+	bnx2x_iov_remove_one(bp);
+
+	/* Power on: we can't let PCI layer write to us while we are in D3 */
+	if (IS_PF(bp)) {
+		bnx2x_set_power_state(bp, PCI_D0);
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
+
+		/* Set endianity registers to reset values in case next driver
+		 * boots in different endianty environment.
+		 */
+		bnx2x_reset_endianity(bp);
+	}
+
+	/* Disable MSI/MSI-X */
+	bnx2x_disable_msi(bp);
+
+	/* Power off */
+	if (IS_PF(bp))
+		bnx2x_set_power_state(bp, PCI_D3hot);
+
+	/* Make sure RESET task is not scheduled before continuing */
+	cancel_delayed_work_sync(&bp->sp_rtnl_task);
+
+	/* send message via vfpf channel to release the resources of this vf */
+	if (IS_VF(bp))
+		bnx2x_vfpf_release(bp);
+
+	/* Assumes no further PCIe PM changes will occur */
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, bp->wol);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	bnx2x_disable_pcie_error_reporting(bp);
+	if (remove_netdev) {
+		if (bp->regview)
+			iounmap(bp->regview);
+
+		/* For vfs, doorbells are part of the regview and were unmapped
+		 * along with it. FW is only loaded by PF.
+		 */
+		if (IS_PF(bp)) {
+			if (bp->doorbells)
+				iounmap(bp->doorbells);
+
+			bnx2x_release_firmware(bp);
+		} else {
+			bnx2x_vf_pci_dealloc(bp);
+		}
+		bnx2x_free_mem_bp(bp);
+
+		free_netdev(dev);
+
+		if (atomic_read(&pdev->enable_cnt) == 1)
+			pci_release_regions(pdev);
+
+		pci_disable_device(pdev);
+	}
+}
+
+static void bnx2x_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return;
+	}
+	bp = netdev_priv(dev);
+
+	__bnx2x_remove(pdev, dev, bp, true);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+	if (CNIC_LOADED(bp))
+		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
+	/* Stop Tx */
+	bnx2x_tx_disable(bp);
+	/* Delete all NAPI objects */
+	bnx2x_del_all_napi(bp);
+	if (CNIC_LOADED(bp))
+		bnx2x_del_all_napi_cnic(bp);
+	netdev_reset_tc(bp->dev);
+
+	del_timer_sync(&bp->timer);
+	cancel_delayed_work_sync(&bp->sp_task);
+	cancel_delayed_work_sync(&bp->period_task);
+
+	if (!down_timeout(&bp->stats_lock, HZ / 10)) {
+		bp->stats_state = STATS_STATE_DISABLED;
+		up(&bp->stats_lock);
+	}
+
+	bnx2x_save_statistics(bp);
+
+	netif_carrier_off(bp->dev);
+
+	return 0;
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	BNX2X_ERR("IO error detected\n");
+
+	netif_device_detach(dev);
+
+	if (state == pci_channel_io_perm_failure) {
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (netif_running(dev))
+		bnx2x_eeh_nic_unload(bp);
+
+	bnx2x_prev_path_mark_eeh(bp);
+
+	pci_disable_device(pdev);
+
+	rtnl_unlock();
+
+	/* Request a slot reset */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+	rtnl_lock();
+	BNX2X_ERR("IO slot reset initializing...\n");
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset\n");
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+
+	if (netif_running(dev))
+		bnx2x_set_power_state(bp, PCI_D0);
+
+	if (netif_running(dev)) {
+		BNX2X_ERR("IO slot reset --> driver unload\n");
+
+		/* MCP should have been reset; Need to wait for validity */
+		bnx2x_init_shmem(bp);
+
+		if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+			u32 v;
+
+			v = SHMEM2_RD(bp,
+				      drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+			SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+				  v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+		}
+		bnx2x_drain_tx_queues(bp);
+		bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+		bnx2x_netif_stop(bp, 1);
+		bnx2x_free_irq(bp);
+
+		/* Report UNLOAD_DONE to MCP */
+		bnx2x_send_unload_done(bp, true);
+
+		bp->sp_state = 0;
+		bp->port.pmf = 0;
+
+		bnx2x_prev_unload(bp);
+
+		/* We should have reseted the engine, so It's fair to
+		 * assume the FW will no longer write to the bnx2x driver.
+		 */
+		bnx2x_squeeze_objects(bp);
+		bnx2x_free_skbs(bp);
+		for_each_rx_queue(bp, i)
+			bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+		bnx2x_free_fp_mem(bp);
+		bnx2x_free_mem(bp);
+
+		bp->state = BNX2X_STATE_CLOSED;
+	}
+
+	rtnl_unlock();
+
+	/* If AER, perform cleanup of the PCIe registers */
+	if (bp->flags & AER_ENABLED) {
+		if (pci_cleanup_aer_uncorrect_error_status(pdev))
+			BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+		else
+			DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
+		return;
+	}
+
+	rtnl_lock();
+
+	bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+							DRV_MSG_SEQ_NUMBER_MASK;
+
+	if (netif_running(dev))
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+
+	netif_device_attach(dev);
+
+	rtnl_unlock();
+}
+
+static const struct pci_error_handlers bnx2x_err_handler = {
+	.error_detected = bnx2x_io_error_detected,
+	.slot_reset     = bnx2x_io_slot_reset,
+	.resume         = bnx2x_io_resume,
+};
+
+static void bnx2x_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev)
+		return;
+
+	bp = netdev_priv(dev);
+	if (!bp)
+		return;
+
+	rtnl_lock();
+	netif_device_detach(dev);
+	rtnl_unlock();
+
+	/* Don't remove the netdevice, as there are scenarios which will cause
+	 * the kernel to hang, e.g., when trying to remove bnx2i while the
+	 * rootfs is mounted from SAN.
+	 */
+	__bnx2x_remove(pdev, dev, bp, false);
+}
+
+static struct pci_driver bnx2x_pci_driver = {
+	.name        = DRV_MODULE_NAME,
+	.id_table    = bnx2x_pci_tbl,
+	.probe       = bnx2x_init_one,
+	.remove      = bnx2x_remove_one,
+	.suspend     = bnx2x_suspend,
+	.resume      = bnx2x_resume,
+	.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+	.sriov_configure = bnx2x_sriov_configure,
+#endif
+	.shutdown    = bnx2x_shutdown,
+};
+
+static int __init bnx2x_init(void)
+{
+	int ret;
+
+	pr_info("%s", version);
+
+	bnx2x_wq = create_singlethread_workqueue("bnx2x");
+	if (bnx2x_wq == NULL) {
+		pr_err("Cannot create workqueue\n");
+		return -ENOMEM;
+	}
+	bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+	if (!bnx2x_iov_wq) {
+		pr_err("Cannot create iov workqueue\n");
+		destroy_workqueue(bnx2x_wq);
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&bnx2x_pci_driver);
+	if (ret) {
+		pr_err("Cannot register driver\n");
+		destroy_workqueue(bnx2x_wq);
+		destroy_workqueue(bnx2x_iov_wq);
+	}
+	return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+	struct list_head *pos, *q;
+
+	pci_unregister_driver(&bnx2x_pci_driver);
+
+	destroy_workqueue(bnx2x_wq);
+	destroy_workqueue(bnx2x_iov_wq);
+
+	/* Free globally allocated resources */
+	list_for_each_safe(pos, q, &bnx2x_prev_list) {
+		struct bnx2x_prev_path_list *tmp =
+			list_entry(pos, struct bnx2x_prev_path_list, list);
+		list_del(pos);
+		kfree(tmp);
+	}
+}
+
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+	REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp:		driver handle
+ * @set:	set or clear the CAM entry
+ *
+ * This function will wait until the ramrod completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+	unsigned long ramrod_flags = 0;
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+				 &bp->iscsi_l2_mac_obj, true,
+				 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+	struct eth_spe *spe;
+	int cxt_index, cxt_offset;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+	BUG_ON(bp->cnic_spq_pending < count);
+	bp->cnic_spq_pending -= count;
+
+	for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+		u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+				& SPE_HDR_CONN_TYPE) >>
+				SPE_HDR_CONN_TYPE_SHIFT;
+		u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+				>> SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+		/* Set validation for iSCSI L2 client before sending SETUP
+		 *  ramrod
+		 */
+		if (type == ETH_CONNECTION_TYPE) {
+			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+				cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+					ILT_PAGE_CIDS;
+				cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+					(cxt_index * ILT_PAGE_CIDS);
+				bnx2x_set_ctx_validation(bp,
+					&bp->context[cxt_index].
+							 vcxt[cxt_offset].eth,
+					BNX2X_ISCSI_ETH_CID(bp));
+			}
+		}
+
+		/*
+		 * There may be not more than 8 L2, not more than 8 L5 SPEs
+		 * and in the air. We also check that number of outstanding
+		 * COMMON ramrods is not more than the EQ and SPQ can
+		 * accommodate.
+		 */
+		if (type == ETH_CONNECTION_TYPE) {
+			if (!atomic_read(&bp->cq_spq_left))
+				break;
+			else
+				atomic_dec(&bp->cq_spq_left);
+		} else if (type == NONE_CONNECTION_TYPE) {
+			if (!atomic_read(&bp->eq_spq_left))
+				break;
+			else
+				atomic_dec(&bp->eq_spq_left);
+		} else if ((type == ISCSI_CONNECTION_TYPE) ||
+			   (type == FCOE_CONNECTION_TYPE)) {
+			if (bp->cnic_spq_pending >=
+			    bp->cnic_eth_dev.max_kwqe_pending)
+				break;
+			else
+				bp->cnic_spq_pending++;
+		} else {
+			BNX2X_ERR("Unknown SPE type: %d\n", type);
+			bnx2x_panic();
+			break;
+		}
+
+		spe = bnx2x_sp_get_next(bp);
+		*spe = *bp->cnic_kwq_cons;
+
+		DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
+		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+			bp->cnic_kwq_cons = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_cons++;
+	}
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+			       struct kwqe_16 *kwqes[], u32 count)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic)) {
+		BNX2X_ERR("Can't post to SP queue while panic\n");
+		return -EIO;
+	}
+#endif
+
+	if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
+	    (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+		BNX2X_ERR("Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	spin_lock_bh(&bp->spq_lock);
+
+	for (i = 0; i < count; i++) {
+		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+			break;
+
+		*bp->cnic_kwq_prod = *spe;
+
+		bp->cnic_kwq_pending++;
+
+		DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
+		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
+		   spe->data.update_data_addr.hi,
+		   spe->data.update_data_addr.lo,
+		   bp->cnic_kwq_pending);
+
+		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+			bp->cnic_kwq_prod = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_prod++;
+	}
+
+	spin_unlock_bh(&bp->spq_lock);
+
+	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+		bnx2x_cnic_sp_post(bp, 0);
+
+	return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	mutex_lock(&bp->cnic_mutex);
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_mutex));
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	mutex_unlock(&bp->cnic_mutex);
+
+	return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+	struct cnic_ctl_info ctl = {0};
+
+	ctl.cmd = cmd;
+
+	return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
+{
+	struct cnic_ctl_info ctl = {0};
+
+	/* first we tell CNIC and only then we count this as a completion */
+	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+	ctl.data.comp.cid = cid;
+	ctl.data.comp.error = err;
+
+	bnx2x_cnic_ctl_send_bh(bp, &ctl);
+	bnx2x_cnic_sp_post(bp, 0);
+}
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+	unsigned long accept_flags = 0, ramrod_flags = 0;
+	u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+	int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+	if (start) {
+		/* Start accepting on iSCSI L2 ring. Accept all multicasts
+		 * because it's the only way for UIO Queue to accept
+		 * multicasts (in non-promiscuous mode only one Queue per
+		 * function will receive multicast packets (leading in our
+		 * case).
+		 */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+		/* Clear STOP_PENDING bit if START is requested */
+		clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+		sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+	} else
+		/* Clear START_PENDING bit if STOP is requested */
+		clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+		set_bit(sched_state, &bp->sp_state);
+	else {
+		__set_bit(RAMROD_RX, &ramrod_flags);
+		bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+				    ramrod_flags);
+	}
+}
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	switch (ctl->cmd) {
+	case DRV_CTL_CTXTBL_WR_CMD: {
+		u32 index = ctl->data.io.offset;
+		dma_addr_t addr = ctl->data.io.dma_addr;
+
+		bnx2x_ilt_wr(bp, index, addr);
+		break;
+	}
+
+	case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+		int count = ctl->data.credit.credit_count;
+
+		bnx2x_cnic_sp_post(bp, count);
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_START_L2_CMD: {
+		struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+		unsigned long sp_bits = 0;
+
+		/* Configure the iSCSI classification object */
+		bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+				   cp->iscsi_l2_client_id,
+				   cp->iscsi_l2_cid, BP_FUNC(bp),
+				   bnx2x_sp(bp, mac_rdata),
+				   bnx2x_sp_mapping(bp, mac_rdata),
+				   BNX2X_FILTER_MAC_PENDING,
+				   &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+				   &bp->macs_pool);
+
+		/* Set iSCSI MAC address */
+		rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+		if (rc)
+			break;
+
+		mmiowb();
+		barrier();
+
+		/* Start accepting on iSCSI L2 ring */
+
+		netif_addr_lock_bh(dev);
+		bnx2x_set_iscsi_eth_rx_mode(bp, true);
+		netif_addr_unlock_bh(dev);
+
+		/* bits to wait on */
+		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+		__set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+		if (!bnx2x_wait_sp_comp(bp, sp_bits))
+			BNX2X_ERR("rx_mode completion timed out!\n");
+
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_STOP_L2_CMD: {
+		unsigned long sp_bits = 0;
+
+		/* Stop accepting on iSCSI L2 ring */
+		netif_addr_lock_bh(dev);
+		bnx2x_set_iscsi_eth_rx_mode(bp, false);
+		netif_addr_unlock_bh(dev);
+
+		/* bits to wait on */
+		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+		__set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+		if (!bnx2x_wait_sp_comp(bp, sp_bits))
+			BNX2X_ERR("rx_mode completion timed out!\n");
+
+		mmiowb();
+		barrier();
+
+		/* Unset iSCSI L2 MAC */
+		rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+					BNX2X_ISCSI_ETH_MAC, true);
+		break;
+	}
+	case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+		int count = ctl->data.credit.credit_count;
+
+		smp_mb__before_atomic();
+		atomic_add(count, &bp->cq_spq_left);
+		smp_mb__after_atomic();
+		break;
+	}
+	case DRV_CTL_ULP_REGISTER_CMD: {
+		int ulp_type = ctl->data.register_data.ulp_type;
+
+		if (CHIP_IS_E3(bp)) {
+			int idx = BP_FW_MB_IDX(bp);
+			u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+			int path = BP_PATH(bp);
+			int port = BP_PORT(bp);
+			int i;
+			u32 scratch_offset;
+			u32 *host_addr;
+
+			/* first write capability to shmem2 */
+			if (ulp_type == CNIC_ULP_ISCSI)
+				cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+			else if (ulp_type == CNIC_ULP_FCOE)
+				cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+			if ((ulp_type != CNIC_ULP_FCOE) ||
+			    (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+			    (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
+				break;
+
+			/* if reached here - should write fcoe capabilities */
+			scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+			if (!scratch_offset)
+				break;
+			scratch_offset += offsetof(struct glob_ncsi_oem_data,
+						   fcoe_features[path][port]);
+			host_addr = (u32 *) &(ctl->data.register_data.
+					      fcoe_features);
+			for (i = 0; i < sizeof(struct fcoe_capabilities);
+			     i += 4)
+				REG_WR(bp, scratch_offset + i,
+				       *(host_addr + i/4));
+		}
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+		break;
+	}
+
+	case DRV_CTL_ULP_UNREGISTER_CMD: {
+		int ulp_type = ctl->data.ulp_type;
+
+		if (CHIP_IS_E3(bp)) {
+			int idx = BP_FW_MB_IDX(bp);
+			u32 cap;
+
+			cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+			if (ulp_type == CNIC_ULP_ISCSI)
+				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+			else if (ulp_type == CNIC_ULP_FCOE)
+				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+		}
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+		break;
+	}
+
+	default:
+		BNX2X_ERR("unknown command %x\n", ctl->cmd);
+		rc = -EINVAL;
+	}
+
+	/* For storage-only interfaces, change driver state */
+	if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+		switch (ctl->drv_state) {
+		case DRV_NOP:
+			break;
+		case DRV_ACTIVE:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_ACTIVE);
+			break;
+		case DRV_INACTIVE:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_DISABLED);
+			break;
+		case DRV_UNLOADED:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_NOT_LOADED);
+			break;
+		default:
+		BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+			     struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bdn_fc_npiv_tbl *tbl = NULL;
+	u32 offset, entries;
+	int rc = -EINVAL;
+	int i;
+
+	if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+		goto out;
+
+	DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+	tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+	if (!tbl) {
+		BNX2X_ERR("Failed to allocate fc_npiv table\n");
+		goto out;
+	}
+
+	offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+	if (!offset) {
+		DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
+		goto out;
+	}
+	DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+	/* Read the table contents from nvram */
+	if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+		BNX2X_ERR("Failed to read FC-NPIV table\n");
+		goto out;
+	}
+
+	/* Since bnx2x_nvram_read() returns data in be32, we need to convert
+	 * the number of entries back to cpu endianness.
+	 */
+	entries = tbl->fc_npiv_cfg.num_of_npiv;
+	entries = (__force u32)be32_to_cpu((__force __be32)entries);
+	tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+	if (!tbl->fc_npiv_cfg.num_of_npiv) {
+		DP(BNX2X_MSG_MCP,
+		   "No FC-NPIV table [valid, simply not present]\n");
+		goto out;
+	} else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+		BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+			  tbl->fc_npiv_cfg.num_of_npiv);
+		goto out;
+	} else {
+		DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+		   tbl->fc_npiv_cfg.num_of_npiv);
+	}
+
+	/* Copy the data into cnic-provided struct */
+	cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+	for (i = 0; i < cnic_tbl->count; i++) {
+		memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+		memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+	}
+
+	rc = 0;
+out:
+	kfree(tbl);
+	return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (bp->flags & USING_MSIX_FLAG) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+		cp->irq_arr[0].vector = bp->msix_table[1].vector;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+	if (!CHIP_IS_E1x(bp))
+		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+	else
+		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
+	cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
+	cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
+	cp->irq_arr[1].status_blk = bp->def_status_blk;
+	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+	cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
+
+	cp->num_irq = 2;
+}
+
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+			     bnx2x_cid_ilt_lines(bp);
+	cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+	cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+	DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+	   BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+	   cp->iscsi_l2_cid);
+
+	if (NO_ISCSI_OOO(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			       void *data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+	int rc;
+
+	DP(NETIF_MSG_IFUP, "Register_cnic called\n");
+
+	if (ops == NULL) {
+		BNX2X_ERR("NULL ops received\n");
+		return -EINVAL;
+	}
+
+	if (!CNIC_SUPPORT(bp)) {
+		BNX2X_ERR("Can't register CNIC when not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (!CNIC_LOADED(bp)) {
+		rc = bnx2x_load_cnic(bp);
+		if (rc) {
+			BNX2X_ERR("CNIC-related load failed\n");
+			return rc;
+		}
+	}
+
+	bp->cnic_enabled = true;
+
+	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!bp->cnic_kwq)
+		return -ENOMEM;
+
+	bp->cnic_kwq_cons = bp->cnic_kwq;
+	bp->cnic_kwq_prod = bp->cnic_kwq;
+	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+	bp->cnic_spq_pending = 0;
+	bp->cnic_kwq_pending = 0;
+
+	bp->cnic_data = data;
+
+	cp->num_irq = 0;
+	cp->drv_state |= CNIC_DRV_STATE_REGD;
+	cp->iro_arr = bp->iro_arr;
+
+	bnx2x_setup_cnic_irq_info(bp);
+
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	/* Schedule driver to read CNIC driver versions */
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
+	return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	mutex_lock(&bp->cnic_mutex);
+	cp->drv_state = 0;
+	RCU_INIT_POINTER(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_mutex);
+	synchronize_rcu();
+	bp->cnic_enabled = false;
+	kfree(bp->cnic_kwq);
+	bp->cnic_kwq = NULL;
+
+	return 0;
+}
+
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	/* If both iSCSI and FCoE are disabled - return NULL in
+	 * order to indicate CNIC that it should not try to work
+	 * with this device.
+	 */
+	if (NO_ISCSI(bp) && NO_FCOE(bp))
+		return NULL;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = CHIP_ID(bp);
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->io_base2 = bp->doorbells;
+	cp->max_kwqe_pending = 8;
+	cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+			     bnx2x_cid_ilt_lines(bp);
+	cp->ctx_tbl_len = CNIC_ILT_LINES;
+	cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+	cp->drv_ctl = bnx2x_drv_ctl;
+	cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
+	cp->drv_register_cnic = bnx2x_register_cnic;
+	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+	cp->iscsi_l2_client_id =
+		bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+	cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+	if (NO_ISCSI_OOO(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+	if (NO_ISCSI(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+	if (NO_FCOE(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
+	BNX2X_DEV_INFO(
+		"page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
+	   cp->ctx_blk_size,
+	   cp->ctx_tbl_offset,
+	   cp->ctx_tbl_len,
+	   cp->starting_cid);
+	return cp;
+}
+
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+	u32 offset = BAR_USTRORM_INTMEM;
+
+	if (IS_VF(bp))
+		return bnx2x_vf_ustorm_prods_offset(bp, fp);
+	else if (!CHIP_IS_E1x(bp))
+		offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+	else
+		offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+
+	return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+	u32 pretend_reg;
+
+	if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+		return -1;
+
+	/* get my own pretend register */
+	pretend_reg = bnx2x_get_pretend_reg(bp);
+	REG_WR(bp, pretend_reg, pretend_func_val);
+	REG_RD(bp, pretend_reg);
+	return 0;
+}
+
+static void bnx2x_ptp_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
+	int port = BP_PORT(bp);
+	u32 val_seq;
+	u64 timestamp, ns;
+	struct skb_shared_hwtstamps shhwtstamps;
+
+	/* Read Tx timestamp registers */
+	val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+			 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+	if (val_seq & 0x10000) {
+		/* There is a valid timestamp value */
+		timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
+				   NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
+		timestamp <<= 32;
+		timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
+				    NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
+		/* Reset timestamp register to allow new timestamp */
+		REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+		       NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+		ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+		shhwtstamps.hwtstamp = ns_to_ktime(ns);
+		skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
+		dev_kfree_skb_any(bp->ptp_tx_skb);
+		bp->ptp_tx_skb = NULL;
+
+		DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+		   timestamp, ns);
+	} else {
+		DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
+		/* Reschedule to keep checking for a valid timestamp value */
+		schedule_work(&bp->ptp_task);
+	}
+}
+
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
+{
+	int port = BP_PORT(bp);
+	u64 timestamp, ns;
+
+	timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
+			    NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
+	timestamp <<= 32;
+	timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
+			    NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
+
+	/* Reset timestamp register to allow new timestamp */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+	       NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+
+	ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+	DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+	   timestamp, ns);
+}
+
+/* Read the PHC */
+static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+{
+	struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
+	int port = BP_PORT(bp);
+	u32 wb_data[2];
+	u64 phc_cycles;
+
+	REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
+		    NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
+	phc_cycles = wb_data[1];
+	phc_cycles = (phc_cycles << 32) + wb_data[0];
+
+	DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
+
+	return phc_cycles;
+}
+
+static void bnx2x_init_cyclecounter(struct bnx2x *bp)
+{
+	memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
+	bp->cyclecounter.read = bnx2x_cyclecounter_read;
+	bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
+	bp->cyclecounter.shift = 1;
+	bp->cyclecounter.mult = 1;
+}
+
+static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_set_timesync_params *set_timesync_params =
+		&func_params.params.set_timesync;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+	/* Function parameters */
+	set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
+	set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+{
+	struct bnx2x_queue_state_params q_params;
+	int rc, i;
+
+	/* send queue update ramrod to enable PTP packets */
+	memset(&q_params, 0, sizeof(q_params));
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+	q_params.cmd = BNX2X_Q_CMD_UPDATE;
+	__set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+		  &q_params.params.update.update_flags);
+	__set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
+		  &q_params.params.update.update_flags);
+
+	/* send the ramrod on all the queues of the PF */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		/* Set the appropriate Queue object */
+		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to enable PTP packets\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int rc;
+
+	if (!bp->hwtstamp_ioctl_called)
+		return 0;
+
+	switch (bp->tx_type) {
+	case HWTSTAMP_TX_ON:
+		bp->flags |= TX_TIMESTAMPING_EN;
+		REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
+		REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+		       NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+		break;
+	case HWTSTAMP_TX_ONESTEP_SYNC:
+		BNX2X_ERR("One-step timestamping is not supported\n");
+		return -ERANGE;
+	}
+
+	switch (bp->rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+		bp->rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		/* Initialize PTP detection for UDP/IPv4 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+		/* Initialize PTP detection L2 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+		break;
+	}
+
+	/* Indicate to FW that this PF expects recorded PTP packets */
+	rc = bnx2x_enable_ptp_packets(bp);
+	if (rc)
+		return rc;
+
+	/* Enable sending PTP packets to host */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+	       NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
+
+	return 0;
+}
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	int rc;
+
+	DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
+	   config.tx_type, config.rx_filter);
+
+	if (config.flags) {
+		BNX2X_ERR("config.flags is reserved for future use\n");
+		return -EINVAL;
+	}
+
+	bp->hwtstamp_ioctl_called = 1;
+	bp->tx_type = config.tx_type;
+	bp->rx_filter = config.rx_filter;
+
+	rc = bnx2x_configure_ptp_filters(bp);
+	if (rc)
+		return rc;
+
+	config.rx_filter = bp->rx_filter;
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+/* Configures HW for PTP */
+static int bnx2x_configure_ptp(struct bnx2x *bp)
+{
+	int rc, port = BP_PORT(bp);
+	u32 wb_data[2];
+
+	/* Reset PTP event detection rules - will be configured in the IOCTL */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+	       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+	       NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+	/* Disable PTP packets to host - will be configured in the IOCTL*/
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+	       NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+	/* Enable the PTP feature */
+	REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+	       NIG_REG_P0_PTP_EN, 0x3F);
+
+	/* Enable the free-running counter */
+	wb_data[0] = 0;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
+
+	/* Reset drift register (offset register is not reset) */
+	rc = bnx2x_send_reset_timesync_ramrod(bp);
+	if (rc) {
+		BNX2X_ERR("Failed to reset PHC drift register\n");
+		return -EFAULT;
+	}
+
+	/* Reset possibly old timestamps */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+	       NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+	       NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+
+	return 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+void bnx2x_init_ptp(struct bnx2x *bp)
+{
+	int rc;
+
+	/* Configure PTP in HW */
+	rc = bnx2x_configure_ptp(bp);
+	if (rc) {
+		BNX2X_ERR("Stopping PTP initialization\n");
+		return;
+	}
+
+	/* Init work queue for Tx timestamping */
+	INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
+
+	/* Init cyclecounter and timecounter. This is done only in the first
+	 * load. If done in every load, PTP application will fail when doing
+	 * unload / load (e.g. MTU change) while it is running.
+	 */
+	if (!bp->timecounter_init_done) {
+		bnx2x_init_cyclecounter(bp);
+		timecounter_init(&bp->timecounter, &bp->cyclecounter,
+				 ktime_to_ns(ktime_get_real()));
+		bp->timecounter_init_done = 1;
+	}
+
+	DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 0000000..a91ccbf
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,170 @@
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0			0
+#define PORT_1			1
+#define PORT_MAX		2
+#define NVM_PATH_MAX		2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+	u32 capability1;
+	/* Maximum number of I/Os per connection */
+	#define FCOE_IOS_PER_CONNECTION_MASK    0x0000ffff
+	#define FCOE_IOS_PER_CONNECTION_SHIFT   0
+	/* Maximum number of Logins per port */
+	#define FCOE_LOGINS_PER_PORT_MASK       0xffff0000
+	#define FCOE_LOGINS_PER_PORT_SHIFT   16
+
+	u32 capability2;
+	/* Maximum number of exchanges */
+	#define FCOE_NUMBER_OF_EXCHANGES_MASK   0x0000ffff
+	#define FCOE_NUMBER_OF_EXCHANGES_SHIFT  0
+	/* Maximum NPIV WWN per port */
+	#define FCOE_NPIV_WWN_PER_PORT_MASK     0xffff0000
+	#define FCOE_NPIV_WWN_PER_PORT_SHIFT    16
+
+	u32 capability3;
+	/* Maximum number of targets supported */
+	#define FCOE_TARGETS_SUPPORTED_MASK     0x0000ffff
+	#define FCOE_TARGETS_SUPPORTED_SHIFT    0
+	/* Maximum number of outstanding commands across all connections */
+	#define FCOE_OUTSTANDING_COMMANDS_MASK  0xffff0000
+	#define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+	u32 capability4;
+	#define FCOE_CAPABILITY4_STATEFUL			0x00000001
+	#define FCOE_CAPABILITY4_STATELESS			0x00000002
+	#define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID	0x00000004
+};
+
+struct glob_ncsi_oem_data {
+	u32 driver_version;
+	u32 unused[3];
+	struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+	ETH_STATS_OPCODE,
+	FCOE_STATS_OPCODE,
+	ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN	12
+/*  Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+	/* Function's Driver Version. padded to 12 */
+	u8 version[ETH_STAT_INFO_VERSION_LEN];
+	/* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+	u8 mac_local[8];
+	u8 mac_add1[8];		/* Additional Programmed MAC Addr 1. */
+	u8 mac_add2[8];		/* Additional Programmed MAC Addr 2. */
+	u32 mtu_size;		/* MTU Size. Note   : Negotiated MTU */
+	u32 feature_flags;	/* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK		0x01
+#define FEATURE_ETH_LSO_MASK			0x02
+#define FEATURE_ETH_BOOTMODE_MASK		0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT		2
+#define FEATURE_ETH_BOOTMODE_NONE		(0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE		(0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI		(0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE		(0x3 << 2)
+#define FEATURE_ETH_TOE_MASK			0x20
+	u32 lso_max_size;	/* LSO MaxOffloadSize. */
+	u32 lso_min_seg_cnt;	/* LSO MinSegmentCount. */
+	/* Num Offloaded Connections TCP_IPv4. */
+	u32 ipv4_ofld_cnt;
+	/* Num Offloaded Connections TCP_IPv6. */
+	u32 ipv6_ofld_cnt;
+	u32 promiscuous_mode;	/* Promiscuous Mode. non-zero true */
+	u32 txq_size;		/* TX Descriptors Queue Size */
+	u32 rxq_size;		/* RX Descriptors Queue Size */
+	/* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+	u32 txq_avg_depth;
+	/* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+	u32 rxq_avg_depth;
+	/* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+	u32 iov_offload;
+	/* Number of NetQueue/VMQ Config'd. */
+	u32 netq_cnt;
+	u32 vf_cnt;		/* Num VF assigned to this PF. */
+};
+
+/*  Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+	u8 version[12];		/* Function's Driver Version. */
+	u8 mac_local[8];	/* Locally Admin Addr. */
+	u8 mac_add1[8];		/* Additional Programmed MAC Addr 1. */
+	u8 mac_add2[8];		/* Additional Programmed MAC Addr 2. */
+	/* QoS Priority (per 802.1p). 0-7255 */
+	u32 qos_priority;
+	u32 txq_size;		/* FCoE TX Descriptors Queue Size. */
+	u32 rxq_size;		/* FCoE RX Descriptors Queue Size. */
+	/* FCoE TX Descriptor Queue Avg Depth. */
+	u32 txq_avg_depth;
+	/* FCoE RX Descriptors Queue Avg Depth. */
+	u32 rxq_avg_depth;
+	u32 rx_frames_lo;	/* FCoE RX Frames received. */
+	u32 rx_frames_hi;	/* FCoE RX Frames received. */
+	u32 rx_bytes_lo;	/* FCoE RX Bytes received. */
+	u32 rx_bytes_hi;	/* FCoE RX Bytes received. */
+	u32 tx_frames_lo;	/* FCoE TX Frames sent. */
+	u32 tx_frames_hi;	/* FCoE TX Frames sent. */
+	u32 tx_bytes_lo;	/* FCoE TX Bytes sent. */
+	u32 tx_bytes_hi;	/* FCoE TX Bytes sent. */
+};
+
+/* Per PCI  Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+	u8 version[12];		/* Function's Driver Version. */
+	u8 mac_local[8];	/* Locally Admin iSCSI MAC Addr. */
+	u8 mac_add1[8];		/* Additional Programmed MAC Addr 1. */
+	/* QoS Priority (per 802.1p). 0-7255 */
+	u32 qos_priority;
+	u8 initiator_name[64];	/* iSCSI Boot Initiator Node name. */
+	u8 ww_port_name[64];	/* iSCSI World wide port name */
+	u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+	u8 boot_target_ip[16];	/* iSCSI Boot Target IP. */
+	u32 boot_target_portal;	/* iSCSI Boot Target Portal. */
+	u8 boot_init_ip[16];	/* iSCSI Boot Initiator IP Address. */
+	u32 max_frame_size;	/* Max Frame Size. bytes */
+	u32 txq_size;		/* PDU TX Descriptors Queue Size. */
+	u32 rxq_size;		/* PDU RX Descriptors Queue Size. */
+	u32 txq_avg_depth;	/* PDU TX Descriptor Queue Avg Depth. */
+	u32 rxq_avg_depth;	/* PDU RX Descriptors Queue Avg Depth. */
+	u32 rx_pdus_lo;		/* iSCSI PDUs received. */
+	u32 rx_pdus_hi;		/* iSCSI PDUs received. */
+	u32 rx_bytes_lo;	/* iSCSI RX Bytes received. */
+	u32 rx_bytes_hi;	/* iSCSI RX Bytes received. */
+	u32 tx_pdus_lo;		/* iSCSI PDUs sent. */
+	u32 tx_pdus_hi;		/* iSCSI PDUs sent. */
+	u32 tx_bytes_lo;	/* iSCSI PDU TX Bytes sent. */
+	u32 tx_bytes_hi;	/* iSCSI PDU TX Bytes sent. */
+	u32 pcp_prior_map_tbl;	/* C-PCP to S-PCP Priority MapTable.
+				 * 9 nibbles, the position of each nibble
+				 * represents the C-PCP value, the value
+				 * of the nibble = S-PCP value.
+				 */
+};
+
+union drv_info_to_mcp {
+	struct eth_stats_info	ether_stat;
+	struct fcoe_stats_info	fcoe_stat;
+	struct iscsi_stats_info	iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
new file mode 100644
index 0000000..4dead49
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -0,0 +1,7696 @@
+/* bnx2x_reg.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R  - Read only
+ * RC - Clear on read
+ * RW - Read/Write
+ * ST - Statistics register (clear on read)
+ * W  - Write only
+ * WB - Wide bus register - the size is over 32 bits and it should be
+ *      read/write in consecutive 32 bits accesses
+ * WR - Write Clear (write 1 to clear the bit)
+ *
+ */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR			 (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS		 (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU		 (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT		 (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR			 (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND		 (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY					 0x1100b8
+/* [R 1] ATC initialization done */
+#define ATC_REG_ATC_INIT_DONE					 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR					 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK					 0x1101d8
+/* [R 5] Parity register #0 read */
+#define ATC_REG_ATC_PRTY_STS					 0x1101cc
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR				 0x1101d0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK					 0x60128
+/* [R 19] Interrupt register #0 read */
+#define BRB1_REG_BRB1_INT_STS					 0x6011c
+/* [RW 4] Parity mask register #0 read/write */
+#define BRB1_REG_BRB1_PRTY_MASK 				 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS					 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR				 0x60130
+/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
+#define BRB1_REG_FREE_LIST_PRS_CRDT				 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0			 0x601d0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1			 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0				 0x601d4
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1				 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0			 0x601d8
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1			 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0				 0x601dc
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1				 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD				 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD				 0x601e4
+/* [RW 10] The number of free blocks above which the High_llfc signal to
+   interface #n is de-asserted. */
+#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0			 0x6014c
+/* [RW 10] The number of free blocks below which the High_llfc signal to
+   interface #n is asserted. */
+#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0			 0x6013c
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED					 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST				 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM						 0x61000
+/* [RW 10] The number of free blocks above which the Low_llfc signal to
+   interface #n is de-asserted. */
+#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0			 0x6016c
+/* [RW 10] The number of free blocks below which the Low_llfc signal to
+   interface #n is asserted. */
+#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0			 0x6015c
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED			 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST			 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED			 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST			 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED			 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST			 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED			 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST			 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
+#define BRB1_REG_MAC_GUARANTIED_0				 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1				 0x60240
+/* [R 24] The number of full blocks. */
+#define BRB1_REG_NUM_OF_FULL_BLOCKS				 0x60090
+/* [ST 32] The number of cycles that the write_full signal towards MAC #0
+   was asserted. */
+#define BRB1_REG_NUM_OF_FULL_CYCLES_0				 0x600c8
+#define BRB1_REG_NUM_OF_FULL_CYCLES_1				 0x600cc
+#define BRB1_REG_NUM_OF_FULL_CYCLES_4				 0x600d8
+/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+   asserted. */
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0				 0x600b8
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1				 0x600bc
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0			 0x601c0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1			 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0			 0x601c4
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1			 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0			 0x601c8
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1			 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0			 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1			 0x6022c
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 			 0x60078
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 			 0x6007c
+/* [RW 10] Write client 0: Assert pause threshold. */
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0				 0x60068
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE			 0x60268
+/* [R 24] The number of full blocks occpied by port. */
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0				 0x60094
+/* [RW 1] Reset the design by software. */
+#define BRB1_REG_SOFT_RESET					 0x600dc
+/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+#define CCM_REG_CAM_OCCUP					 0xd0188
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_CFC_IFEN					 0xd003c
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_CQM_IFEN					 0xd000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
+   Otherwise 0 is inserted. */
+#define CCM_REG_CCM_CQM_USE_Q					 0xd00c0
+/* [RW 11] Interrupt mask register #0 read/write */
+#define CCM_REG_CCM_INT_MASK					 0xd01e4
+/* [R 11] Interrupt register #0 read */
+#define CCM_REG_CCM_INT_STS					 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK					 0xd01f4
+/* [R 27] Parity register #0 read */
+#define CCM_REG_CCM_PRTY_STS					 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR				 0xd01ec
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the input message Reg1WbFlg isn't set. */
+#define CCM_REG_CCM_REG0_SZ					 0xd00c4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_STORM0_IFEN 				 0xd0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_STORM1_IFEN 				 0xd0008
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define CCM_REG_CDU_AG_RD_IFEN					 0xd0030
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define CCM_REG_CDU_AG_WR_IFEN					 0xd002c
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define CCM_REG_CDU_SM_RD_IFEN					 0xd0038
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define CCM_REG_CDU_SM_WR_IFEN					 0xd0034
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define CCM_REG_CFC_INIT_CRD					 0xd0204
+/* [RW 2] Auxiliary counter flag Q number 1. */
+#define CCM_REG_CNT_AUX1_Q					 0xd00c8
+/* [RW 2] Auxiliary counter flag Q number 2. */
+#define CCM_REG_CNT_AUX2_Q					 0xd00cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define CCM_REG_CQM_CCM_HDR_P					 0xd008c
+/* [RW 28] The CM header value for QM request (secondary). */
+#define CCM_REG_CQM_CCM_HDR_S					 0xd0090
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CQM_CCM_IFEN					 0xd0014
+/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define CCM_REG_CQM_INIT_CRD					 0xd020c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_P_WEIGHT					 0xd00b8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_S_WEIGHT					 0xd00bc
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CSDM_IFEN					 0xd0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the SDM interface is detected. */
+#define CCM_REG_CSDM_LENGTH_MIS 				 0xd0170
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CSDM_WEIGHT					 0xd00b4
+/* [RW 28] The CM header for QM formatting in case of an error in the QM
+   inputs. */
+#define CCM_REG_ERR_CCM_HDR					 0xd0094
+/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
+#define CCM_REG_ERR_EVNT_ID					 0xd0098
+/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC0_INIT_CRD					 0xd0210
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC1_INIT_CRD					 0xd0214
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
+   ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
+   outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
+#define CCM_REG_GR_ARB_TYPE					 0xd015c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed; that the Store channel priority is
+   the compliment to 4 of the rest priorities - Aggregation channel; Load
+   (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD0_PR					 0xd0164
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed; that the Store channel priority is
+   the compliment to 4 of the rest priorities - Aggregation channel; Load
+   (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD1_PR					 0xd0168
+/* [RW 2] General flags index. */
+#define CCM_REG_INV_DONE_Q					 0xd0108
+/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
+   context and sent to STORM; for a specific connection type. The double
+   REG-pairs are used in order to align to STORM context row size of 128
+   bits. The offset of these data in the STORM context is always 0. Index
+   _(0..15) stands for the connection type (one of 16). */
+#define CCM_REG_N_SM_CTX_LD_0					 0xd004c
+#define CCM_REG_N_SM_CTX_LD_1					 0xd0050
+#define CCM_REG_N_SM_CTX_LD_2					 0xd0054
+#define CCM_REG_N_SM_CTX_LD_3					 0xd0058
+#define CCM_REG_N_SM_CTX_LD_4					 0xd005c
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_PBF_IFEN					 0xd0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the pbf interface is detected. */
+#define CCM_REG_PBF_LENGTH_MIS					 0xd0180
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_PBF_WEIGHT					 0xd00ac
+#define CCM_REG_PHYS_QNUM1_0					 0xd0134
+#define CCM_REG_PHYS_QNUM1_1					 0xd0138
+#define CCM_REG_PHYS_QNUM2_0					 0xd013c
+#define CCM_REG_PHYS_QNUM2_1					 0xd0140
+#define CCM_REG_PHYS_QNUM3_0					 0xd0144
+#define CCM_REG_PHYS_QNUM3_1					 0xd0148
+#define CCM_REG_QOS_PHYS_QNUM0_0				 0xd0114
+#define CCM_REG_QOS_PHYS_QNUM0_1				 0xd0118
+#define CCM_REG_QOS_PHYS_QNUM1_0				 0xd011c
+#define CCM_REG_QOS_PHYS_QNUM1_1				 0xd0120
+#define CCM_REG_QOS_PHYS_QNUM2_0				 0xd0124
+#define CCM_REG_QOS_PHYS_QNUM2_1				 0xd0128
+#define CCM_REG_QOS_PHYS_QNUM3_0				 0xd012c
+#define CCM_REG_QOS_PHYS_QNUM3_1				 0xd0130
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_STORM_CCM_IFEN					 0xd0010
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the STORM interface is detected. */
+#define CCM_REG_STORM_LENGTH_MIS				 0xd016c
+/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
+   mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
+   weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
+   tc. */
+#define CCM_REG_STORM_WEIGHT					 0xd009c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_TSEM_IFEN					 0xd001c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the tsem interface is detected. */
+#define CCM_REG_TSEM_LENGTH_MIS 				 0xd0174
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_TSEM_WEIGHT					 0xd00a0
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_USEM_IFEN					 0xd0024
+/* [RC 1] Set when message length mismatch (relative to last indication) at
+   the usem interface is detected. */
+#define CCM_REG_USEM_LENGTH_MIS 				 0xd017c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_USEM_WEIGHT					 0xd00a8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_XSEM_IFEN					 0xd0020
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the xsem interface is detected. */
+#define CCM_REG_XSEM_LENGTH_MIS 				 0xd0178
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_XSEM_WEIGHT					 0xd00a4
+/* [RW 19] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are: [5:0] - message length; [12:6] - message
+   pointer; 18:13] - next pointer. */
+#define CCM_REG_XX_DESCR_TABLE					 0xd0300
+#define CCM_REG_XX_DESCR_TABLE_SIZE				 24
+/* [R 7] Used to read the value of XX protection Free counter. */
+#define CCM_REG_XX_FREE 					 0xd0184
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Max credit available - 127. Write writes the initial credit
+   value; read returns the current value of the credit counter. Must be
+   initialized to maximum XX protected message size - 2 at start-up. */
+#define CCM_REG_XX_INIT_CRD					 0xd0220
+/* [RW 7] The maximum number of pending messages; which may be stored in XX
+   protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
+   At write comprises the start value of the ~ccm_registers_xx_free.xx_free
+   counter. */
+#define CCM_REG_XX_MSG_NUM					 0xd0224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define CCM_REG_XX_OVFL_EVNT_ID 				 0xd0044
+/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
+   The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
+   header pointer. */
+#define CCM_REG_XX_TABLE					 0xd0280
+#define CDU_REG_CDU_CHK_MASK0					 0x101000
+#define CDU_REG_CDU_CHK_MASK1					 0x101004
+#define CDU_REG_CDU_CONTROL0					 0x101008
+#define CDU_REG_CDU_DEBUG					 0x101010
+#define CDU_REG_CDU_GLOBAL_PARAMS				 0x101020
+/* [RW 7] Interrupt mask register #0 read/write */
+#define CDU_REG_CDU_INT_MASK					 0x10103c
+/* [R 7] Interrupt register #0 read */
+#define CDU_REG_CDU_INT_STS					 0x101030
+/* [RW 5] Parity mask register #0 read/write */
+#define CDU_REG_CDU_PRTY_MASK					 0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS					 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR				 0x101044
+/* [RC 32] logging of error data in case of a CDU load error:
+   {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
+   ype_error; ctual_active; ctual_compressed_context}; */
+#define CDU_REG_ERROR_DATA					 0x101014
+/* [WB 216] L1TT ram access. each entry has the following format :
+   {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
+   ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
+#define CDU_REG_L1TT						 0x101800
+/* [WB 24] MATT ram access. each entry has the following
+   format:{RegionLength[11:0]; egionOffset[11:0]} */
+#define CDU_REG_MATT						 0x101100
+/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
+#define CDU_REG_MF_MODE 					 0x101050
+/* [R 1] indication the initializing the activity counter by the hardware
+   was done. */
+#define CFC_REG_AC_INIT_DONE					 0x104078
+/* [RW 13] activity counter ram access */
+#define CFC_REG_ACTIVITY_COUNTER				 0x104400
+#define CFC_REG_ACTIVITY_COUNTER_SIZE				 256
+/* [R 1] indication the initializing the cams by the hardware was done. */
+#define CFC_REG_CAM_INIT_DONE					 0x10407c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define CFC_REG_CFC_INT_MASK					 0x104108
+/* [R 2] Interrupt register #0 read */
+#define CFC_REG_CFC_INT_STS					 0x1040fc
+/* [RC 2] Interrupt register #0 read clear */
+#define CFC_REG_CFC_INT_STS_CLR 				 0x104100
+/* [RW 4] Parity mask register #0 read/write */
+#define CFC_REG_CFC_PRTY_MASK					 0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS					 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR				 0x104110
+/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
+#define CFC_REG_CID_CAM 					 0x104800
+#define CFC_REG_CONTROL0					 0x104028
+#define CFC_REG_DEBUG0						 0x104050
+/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
+   vector) whether the cfc should be disabled upon it */
+#define CFC_REG_DISABLE_ON_ERROR				 0x104044
+/* [RC 14] CFC error vector. when the CFC detects an internal error it will
+   set one of these bits. the bit description can be found in CFC
+   specifications */
+#define CFC_REG_ERROR_VECTOR					 0x10403c
+/* [WB 93] LCID info ram access */
+#define CFC_REG_INFO_RAM					 0x105000
+#define CFC_REG_INFO_RAM_SIZE					 1024
+#define CFC_REG_INIT_REG					 0x10404c
+#define CFC_REG_INTERFACES					 0x104058
+/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+   field allows changing the priorities of the weighted-round-robin arbiter
+   which selects which CFC load client should be served next */
+#define CFC_REG_LCREQ_WEIGHTS					 0x104084
+/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
+#define CFC_REG_LINK_LIST					 0x104c00
+#define CFC_REG_LINK_LIST_SIZE					 256
+/* [R 1] indication the initializing the link list by the hardware was done. */
+#define CFC_REG_LL_INIT_DONE					 0x104074
+/* [R 9] Number of allocated LCIDs which are at empty state */
+#define CFC_REG_NUM_LCIDS_ALLOC 				 0x104020
+/* [R 9] Number of Arriving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_ARRIVING				 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF				 0x104120
+/* [R 9] Number of Leaving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_LEAVING				 0x104018
+#define CFC_REG_WEAK_ENABLE_PF					 0x104124
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define CSDM_REG_AGG_INT_EVENT_0				 0xc2038
+#define CSDM_REG_AGG_INT_EVENT_10				 0xc2060
+#define CSDM_REG_AGG_INT_EVENT_11				 0xc2064
+#define CSDM_REG_AGG_INT_EVENT_12				 0xc2068
+#define CSDM_REG_AGG_INT_EVENT_13				 0xc206c
+#define CSDM_REG_AGG_INT_EVENT_14				 0xc2070
+#define CSDM_REG_AGG_INT_EVENT_15				 0xc2074
+#define CSDM_REG_AGG_INT_EVENT_16				 0xc2078
+#define CSDM_REG_AGG_INT_EVENT_2				 0xc2040
+#define CSDM_REG_AGG_INT_EVENT_3				 0xc2044
+#define CSDM_REG_AGG_INT_EVENT_4				 0xc2048
+#define CSDM_REG_AGG_INT_EVENT_5				 0xc204c
+#define CSDM_REG_AGG_INT_EVENT_6				 0xc2050
+#define CSDM_REG_AGG_INT_EVENT_7				 0xc2054
+#define CSDM_REG_AGG_INT_EVENT_8				 0xc2058
+#define CSDM_REG_AGG_INT_EVENT_9				 0xc205c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+   or auto-mask-mode (1) */
+#define CSDM_REG_AGG_INT_MODE_10				 0xc21e0
+#define CSDM_REG_AGG_INT_MODE_11				 0xc21e4
+#define CSDM_REG_AGG_INT_MODE_12				 0xc21e8
+#define CSDM_REG_AGG_INT_MODE_13				 0xc21ec
+#define CSDM_REG_AGG_INT_MODE_14				 0xc21f0
+#define CSDM_REG_AGG_INT_MODE_15				 0xc21f4
+#define CSDM_REG_AGG_INT_MODE_16				 0xc21f8
+#define CSDM_REG_AGG_INT_MODE_6 				 0xc21d0
+#define CSDM_REG_AGG_INT_MODE_7 				 0xc21d4
+#define CSDM_REG_AGG_INT_MODE_8 				 0xc21d8
+#define CSDM_REG_AGG_INT_MODE_9 				 0xc21dc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define CSDM_REG_CFC_RSP_START_ADDR				 0xc2008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define CSDM_REG_CMP_COUNTER_MAX0				 0xc201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define CSDM_REG_CMP_COUNTER_MAX1				 0xc2020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define CSDM_REG_CMP_COUNTER_MAX2				 0xc2024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define CSDM_REG_CMP_COUNTER_MAX3				 0xc2028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define CSDM_REG_CMP_COUNTER_START_ADDR 			 0xc200c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSDM_REG_CSDM_INT_MASK_0				 0xc229c
+#define CSDM_REG_CSDM_INT_MASK_1				 0xc22ac
+/* [R 32] Interrupt register #0 read */
+#define CSDM_REG_CSDM_INT_STS_0 				 0xc2290
+#define CSDM_REG_CSDM_INT_STS_1 				 0xc22a0
+/* [RW 11] Parity mask register #0 read/write */
+#define CSDM_REG_CSDM_PRTY_MASK 				 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS					 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR				 0xc22b4
+#define CSDM_REG_ENABLE_IN1					 0xc2238
+#define CSDM_REG_ENABLE_IN2					 0xc223c
+#define CSDM_REG_ENABLE_OUT1					 0xc2240
+#define CSDM_REG_ENABLE_OUT2					 0xc2244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define CSDM_REG_INIT_CREDIT_PXP_CTRL				 0xc24bc
+/* [ST 32] The number of ACK after placement messages received */
+#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0xc227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define CSDM_REG_NUM_OF_PKT_END_MSG				 0xc2274
+/* [ST 32] The number of requests received from the pxp async if */
+#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ				 0xc2278
+/* [ST 32] The number of commands received in queue 0 */
+#define CSDM_REG_NUM_OF_Q0_CMD					 0xc2248
+/* [ST 32] The number of commands received in queue 10 */
+#define CSDM_REG_NUM_OF_Q10_CMD 				 0xc226c
+/* [ST 32] The number of commands received in queue 11 */
+#define CSDM_REG_NUM_OF_Q11_CMD 				 0xc2270
+/* [ST 32] The number of commands received in queue 1 */
+#define CSDM_REG_NUM_OF_Q1_CMD					 0xc224c
+/* [ST 32] The number of commands received in queue 3 */
+#define CSDM_REG_NUM_OF_Q3_CMD					 0xc2250
+/* [ST 32] The number of commands received in queue 4 */
+#define CSDM_REG_NUM_OF_Q4_CMD					 0xc2254
+/* [ST 32] The number of commands received in queue 5 */
+#define CSDM_REG_NUM_OF_Q5_CMD					 0xc2258
+/* [ST 32] The number of commands received in queue 6 */
+#define CSDM_REG_NUM_OF_Q6_CMD					 0xc225c
+/* [ST 32] The number of commands received in queue 7 */
+#define CSDM_REG_NUM_OF_Q7_CMD					 0xc2260
+/* [ST 32] The number of commands received in queue 8 */
+#define CSDM_REG_NUM_OF_Q8_CMD					 0xc2264
+/* [ST 32] The number of commands received in queue 9 */
+#define CSDM_REG_NUM_OF_Q9_CMD					 0xc2268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define CSDM_REG_Q_COUNTER_START_ADDR				 0xc2010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0xc2548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_PARSER_EMPTY				 0xc2550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_SYNC_EMPTY				 0xc2558
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define CSDM_REG_TIMER_TICK					 0xc2000
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define CSEM_REG_ARB_CYCLE_SIZE 				 0x200034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define CSEM_REG_ARB_ELEMENT0					 0x200020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
+#define CSEM_REG_ARB_ELEMENT1					 0x200024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~csem_registers_arb_element0.arb_element0
+   and ~csem_registers_arb_element1.arb_element1 */
+#define CSEM_REG_ARB_ELEMENT2					 0x200028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~csem_registers_arb_element0.arb_element0 and
+   ~csem_registers_arb_element1.arb_element1 and
+   ~csem_registers_arb_element2.arb_element2 */
+#define CSEM_REG_ARB_ELEMENT3					 0x20002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~csem_registers_arb_element0.arb_element0
+   and ~csem_registers_arb_element1.arb_element1 and
+   ~csem_registers_arb_element2.arb_element2 and
+   ~csem_registers_arb_element3.arb_element3 */
+#define CSEM_REG_ARB_ELEMENT4					 0x200030
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSEM_REG_CSEM_INT_MASK_0				 0x200110
+#define CSEM_REG_CSEM_INT_MASK_1				 0x200120
+/* [R 32] Interrupt register #0 read */
+#define CSEM_REG_CSEM_INT_STS_0 				 0x200104
+#define CSEM_REG_CSEM_INT_STS_1 				 0x200114
+/* [RW 32] Parity mask register #0 read/write */
+#define CSEM_REG_CSEM_PRTY_MASK_0				 0x200130
+#define CSEM_REG_CSEM_PRTY_MASK_1				 0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0				 0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1				 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0				 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1				 0x200138
+#define CSEM_REG_ENABLE_IN					 0x2000a4
+#define CSEM_REG_ENABLE_OUT					 0x2000a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define CSEM_REG_FAST_MEMORY					 0x220000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define CSEM_REG_FIC0_DISABLE					 0x200224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define CSEM_REG_FIC1_DISABLE					 0x200234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define CSEM_REG_INT_TABLE					 0x200400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define CSEM_REG_MSG_NUM_FIC0					 0x200000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define CSEM_REG_MSG_NUM_FIC1					 0x200004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define CSEM_REG_MSG_NUM_FOC0					 0x200008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define CSEM_REG_MSG_NUM_FOC1					 0x20000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define CSEM_REG_MSG_NUM_FOC2					 0x200010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define CSEM_REG_MSG_NUM_FOC3					 0x200014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define CSEM_REG_PAS_DISABLE					 0x20024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define CSEM_REG_PASSIVE_BUFFER 				 0x202000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define CSEM_REG_PRAM						 0x240000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define CSEM_REG_SLEEP_THREADS_VALID				 0x20026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define CSEM_REG_SLOW_EXT_STORE_EMPTY				 0x2002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define CSEM_REG_THREADS_LIST					 0x2002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define CSEM_REG_TS_0_AS					 0x200038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define CSEM_REG_TS_10_AS					 0x200060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define CSEM_REG_TS_11_AS					 0x200064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define CSEM_REG_TS_12_AS					 0x200068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define CSEM_REG_TS_13_AS					 0x20006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define CSEM_REG_TS_14_AS					 0x200070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define CSEM_REG_TS_15_AS					 0x200074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define CSEM_REG_TS_16_AS					 0x200078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define CSEM_REG_TS_17_AS					 0x20007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define CSEM_REG_TS_18_AS					 0x200080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define CSEM_REG_TS_1_AS					 0x20003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define CSEM_REG_TS_2_AS					 0x200040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define CSEM_REG_TS_3_AS					 0x200044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define CSEM_REG_TS_4_AS					 0x200048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define CSEM_REG_TS_5_AS					 0x20004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define CSEM_REG_TS_6_AS					 0x200050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define CSEM_REG_TS_7_AS					 0x200054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define CSEM_REG_TS_8_AS					 0x200058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define CSEM_REG_TS_9_AS					 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM					 0x200380
+/* [RW 1] Parity mask register #0 read/write */
+#define DBG_REG_DBG_PRTY_MASK					 0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS					 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR				 0xc0a0
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN				 0x10207c
+/* [RW 32] Commands memory. The address to command X; row Y is to calculated
+   as 14*X+Y. */
+#define DMAE_REG_CMD_MEM					 0x102400
+#define DMAE_REG_CMD_MEM_SIZE					 224
+/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+   initial value is all ones. */
+#define DMAE_REG_CRC16C_INIT					 0x10201c
+/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
+   CRC-16 T10 initial value is all ones. */
+#define DMAE_REG_CRC16T10_INIT					 0x102020
+/* [RW 2] Interrupt mask register #0 read/write */
+#define DMAE_REG_DMAE_INT_MASK					 0x102054
+/* [RW 4] Parity mask register #0 read/write */
+#define DMAE_REG_DMAE_PRTY_MASK 				 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS					 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR				 0x10205c
+/* [RW 1] Command 0 go. */
+#define DMAE_REG_GO_C0						 0x102080
+/* [RW 1] Command 1 go. */
+#define DMAE_REG_GO_C1						 0x102084
+/* [RW 1] Command 10 go. */
+#define DMAE_REG_GO_C10 					 0x102088
+/* [RW 1] Command 11 go. */
+#define DMAE_REG_GO_C11 					 0x10208c
+/* [RW 1] Command 12 go. */
+#define DMAE_REG_GO_C12 					 0x102090
+/* [RW 1] Command 13 go. */
+#define DMAE_REG_GO_C13 					 0x102094
+/* [RW 1] Command 14 go. */
+#define DMAE_REG_GO_C14 					 0x102098
+/* [RW 1] Command 15 go. */
+#define DMAE_REG_GO_C15 					 0x10209c
+/* [RW 1] Command 2 go. */
+#define DMAE_REG_GO_C2						 0x1020a0
+/* [RW 1] Command 3 go. */
+#define DMAE_REG_GO_C3						 0x1020a4
+/* [RW 1] Command 4 go. */
+#define DMAE_REG_GO_C4						 0x1020a8
+/* [RW 1] Command 5 go. */
+#define DMAE_REG_GO_C5						 0x1020ac
+/* [RW 1] Command 6 go. */
+#define DMAE_REG_GO_C6						 0x1020b0
+/* [RW 1] Command 7 go. */
+#define DMAE_REG_GO_C7						 0x1020b4
+/* [RW 1] Command 8 go. */
+#define DMAE_REG_GO_C8						 0x1020b8
+/* [RW 1] Command 9 go. */
+#define DMAE_REG_GO_C9						 0x1020bc
+/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
+   input is disregarded; valid is deasserted; all other signals are treated
+   as usual; if 1 - normal activity. */
+#define DMAE_REG_GRC_IFEN					 0x102008
+/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
+   acknowledge input is disregarded; valid is deasserted; full is asserted;
+   all other signals are treated as usual; if 1 - normal activity. */
+#define DMAE_REG_PCI_IFEN					 0x102004
+/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
+   initial value to the credit counter; related to the address. Read returns
+   the current value of the counter. */
+#define DMAE_REG_PXP_REQ_INIT_CRD				 0x1020c0
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD0					 0x170060
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD1					 0x170064
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD2					 0x170068
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD3					 0x17006c
+/* [RW 28] UCM Header. */
+#define DORQ_REG_CMHEAD_RX					 0x170050
+/* [RW 32] Doorbell address for RBC doorbells (function 0). */
+#define DORQ_REG_DB_ADDR0					 0x17008c
+/* [RW 5] Interrupt mask register #0 read/write */
+#define DORQ_REG_DORQ_INT_MASK					 0x170180
+/* [R 5] Interrupt register #0 read */
+#define DORQ_REG_DORQ_INT_STS					 0x170174
+/* [RC 5] Interrupt register #0 read clear */
+#define DORQ_REG_DORQ_INT_STS_CLR				 0x170178
+/* [RW 2] Parity mask register #0 read/write */
+#define DORQ_REG_DORQ_PRTY_MASK 				 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS					 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR				 0x170188
+/* [RW 8] The address to write the DPM CID to STORM. */
+#define DORQ_REG_DPM_CID_ADDR					 0x170044
+/* [RW 5] The DPM mode CID extraction offset. */
+#define DORQ_REG_DPM_CID_OFST					 0x170030
+/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
+#define DORQ_REG_DQ_FIFO_AFULL_TH				 0x17007c
+/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
+#define DORQ_REG_DQ_FIFO_FULL_TH				 0x170078
+/* [R 13] Current value of the DQ FIFO fill level according to following
+   pointer. The range is 0 - 256 FIFO rows; where each row stands for the
+   doorbell. */
+#define DORQ_REG_DQ_FILL_LVLF					 0x1700a4
+/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
+   equal to full threshold; reset on full clear. */
+#define DORQ_REG_DQ_FULL_ST					 0x1700c0
+/* [RW 28] The value sent to CM header in the case of CFC load error. */
+#define DORQ_REG_ERR_CMHEAD					 0x170058
+#define DORQ_REG_IF_EN						 0x170004
+#define DORQ_REG_MAX_RVFID_SIZE				 0x1701ec
+#define DORQ_REG_MODE_ACT					 0x170008
+/* [RW 5] The normal mode CID extraction offset. */
+#define DORQ_REG_NORM_CID_OFST					 0x17002c
+/* [RW 28] TCM Header when only TCP context is loaded. */
+#define DORQ_REG_NORM_CMHEAD_TX 				 0x17004c
+/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
+   Interface. */
+#define DORQ_REG_OUTST_REQ					 0x17003c
+#define DORQ_REG_PF_USAGE_CNT					 0x1701d0
+#define DORQ_REG_REGN						 0x170038
+/* [R 4] Current value of response A counter credit. Initial credit is
+   configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+   register. */
+#define DORQ_REG_RSPA_CRD_CNT					 0x1700ac
+/* [R 4] Current value of response B counter credit. Initial credit is
+   configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+   register. */
+#define DORQ_REG_RSPB_CRD_CNT					 0x1700b0
+/* [RW 4] The initial credit at the Doorbell Response Interface. The write
+   writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
+   read reads this written value. */
+#define DORQ_REG_RSP_INIT_CRD					 0x170048
+#define DORQ_REG_RSPB_CRD_CNT					 0x1700b0
+#define DORQ_REG_VF_NORM_CID_BASE				 0x1701a0
+#define DORQ_REG_VF_NORM_CID_OFST				 0x1701f4
+#define DORQ_REG_VF_NORM_CID_WND_SIZE				 0x1701a4
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT				 0x1701e4
+#define DORQ_REG_VF_NORM_VF_BASE				 0x1701a8
+/* [RW 10] VF type validation mask value */
+#define DORQ_REG_VF_TYPE_MASK_0					 0x170218
+/* [RW 17] VF type validation Min MCID value */
+#define DORQ_REG_VF_TYPE_MAX_MCID_0				 0x1702d8
+/* [RW 17] VF type validation Max MCID value */
+#define DORQ_REG_VF_TYPE_MIN_MCID_0				 0x170298
+/* [RW 10] VF type validation comp value */
+#define DORQ_REG_VF_TYPE_VALUE_0				 0x170258
+#define DORQ_REG_VF_USAGE_CT_LIMIT				 0x170340
+
+/* [RW 4] Initial activity counter value on the load request; when the
+   shortcut is done. */
+#define DORQ_REG_SHRT_ACT_CNT					 0x170070
+/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
+#define DORQ_REG_SHRT_CMHEAD					 0x170054
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0				 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0				 (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0				 (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0				 (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0			 (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0				 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1				 (0x1<<0)
+#define DORQ_REG_VF_USAGE_CNT					 0x170320
+#define HC_REG_AGG_INT_0					 0x108050
+#define HC_REG_AGG_INT_1					 0x108054
+#define HC_REG_ATTN_BIT 					 0x108120
+#define HC_REG_ATTN_IDX 					 0x108100
+#define HC_REG_ATTN_MSG0_ADDR_L 				 0x108018
+#define HC_REG_ATTN_MSG1_ADDR_L 				 0x108020
+#define HC_REG_ATTN_NUM_P0					 0x108038
+#define HC_REG_ATTN_NUM_P1					 0x10803c
+#define HC_REG_COMMAND_REG					 0x108180
+#define HC_REG_CONFIG_0 					 0x108000
+#define HC_REG_CONFIG_1 					 0x108004
+#define HC_REG_FUNC_NUM_P0					 0x1080ac
+#define HC_REG_FUNC_NUM_P1					 0x1080b0
+/* [RW 3] Parity mask register #0 read/write */
+#define HC_REG_HC_PRTY_MASK					 0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS					 0x108094
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR					 0x108098
+#define HC_REG_INT_MASK						 0x108108
+#define HC_REG_LEADING_EDGE_0					 0x108040
+#define HC_REG_LEADING_EDGE_1					 0x108048
+#define HC_REG_MAIN_MEMORY					 0x108800
+#define HC_REG_MAIN_MEMORY_SIZE					 152
+#define HC_REG_P0_PROD_CONS					 0x108200
+#define HC_REG_P1_PROD_CONS					 0x108400
+#define HC_REG_PBA_COMMAND					 0x108140
+#define HC_REG_PCI_CONFIG_0					 0x108010
+#define HC_REG_PCI_CONFIG_1					 0x108014
+#define HC_REG_STATISTIC_COUNTERS				 0x109000
+#define HC_REG_TRAILING_EDGE_0					 0x108044
+#define HC_REG_TRAILING_EDGE_1					 0x10804c
+#define HC_REG_UC_RAM_ADDR_0					 0x108028
+#define HC_REG_UC_RAM_ADDR_1					 0x108030
+#define HC_REG_USTORM_ADDR_FOR_COALESCE 			 0x108068
+#define HC_REG_VQID_0						 0x108008
+#define HC_REG_VQID_1						 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN		 (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE		 (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS				 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM					 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H				 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L				 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didn't receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING			 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION				 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA				 0x130124
+#define IGU_REG_COMMAND_REG_CTRL				 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP			 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM					 0x130064
+/* [R 1] data available for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID			 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK					 0x1300a8
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS					 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR				 0x1300a0
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM					 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH				 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY					 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE				 136
+#define IGU_REG_PBA_STATUS_LSB					 0x130138
+#define IGU_REG_PBA_STATUS_MSB					 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN					 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN					 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK				 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS				 0x130300
+#define IGU_REG_PF_CONFIGURATION				 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY				 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM					 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES					 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM					 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB				 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB				 0x130160
+#define IGU_REG_SB_MASK_LSB					 0x130164
+#define IGU_REG_SB_MASK_MSB					 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP					 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT			 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE				 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH				 0x130104
+#define IGU_REG_VF_CONFIGURATION				 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING				 0x130480
+#define MCP_A_REG_MCPR_SCRATCH					 0x3a0000
+#define MCP_REG_MCPR_ACCESS_LOCK				 0x8009c
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS					 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE					 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS					 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND				 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0				 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL				 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE				 0x86424
+#define MCP_REG_MCPR_NVM_ADDR					 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4					 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND				 0x86400
+#define MCP_REG_MCPR_NVM_READ					 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB 				 0x86420
+#define MCP_REG_MCPR_NVM_WRITE					 0x86408
+#define MCP_REG_MCPR_SCRATCH					 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK		 (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK		 (0x1<<0)
+/* [R 32] read first 32 bit after inversion of function 0. mapped as
+   follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
+   [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
+   GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
+   glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
+   [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
+   MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
+   Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
+   interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
+   error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
+   interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
+   Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0			 0xa42c
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1			 0xa430
+/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
+   NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+   mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+   [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+   PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+   function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+   Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+   mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+   BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+   Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+   interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+   Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+   interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_MCP 			 0xa434
+/* [R 32] read second 32 bit after inversion of function 0. mapped as
+   follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0			 0xa438
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1			 0xa43c
+/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
+   PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
+   [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
+   [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
+   XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+   DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+   error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+   PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+   [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+   [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+   [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+   [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_MCP 			 0xa440
+/* [R 32] read third 32 bit after inversion of function 0. mapped as
+   follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
+   error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
+   PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0			 0xa444
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1			 0xa448
+/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
+   CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
+   Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
+   Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
+   error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
+   interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
+   MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
+   Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
+   timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
+   func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
+   func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
+   timers attn_4 func1; [30] General attn0; [31] General attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_MCP 			 0xa44c
+/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
+   follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0			 0xa450
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1			 0xa454
+/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
+   General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
+   [4] General attn6; [5] General attn7; [6] General attn8; [7] General
+   attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
+   General attn13; [12] General attn14; [13] General attn15; [14] General
+   attn16; [15] General attn17; [16] General attn18; [17] General attn19;
+   [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
+   RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
+   RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
+   attention; [27] GRC Latched reserved access attention; [28] MCP Latched
+   rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+   ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP 			 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0			 0xa700
+/* [W 14] write to this register results with the clear of the latched
+   signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+   d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+   latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+   GRC Latched reserved access attention; one in d7 clears Latched
+   rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+   Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
+   ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
+   pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
+   from this register return zero */
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL				 0xa45c
+/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+   0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0			 0xa06c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1			 0xa07c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2			 0xa08c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3			 0xa09c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5			 0xa0bc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6			 0xa0cc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7			 0xa0dc
+/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+   1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0			 0xa10c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1			 0xa11c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2			 0xa12c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3			 0xa13c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5			 0xa15c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6			 0xa16c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7			 0xa17c
+/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+   0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_NIG_0				 0xa0ec
+#define MISC_REG_AEU_ENABLE1_NIG_1				 0xa18c
+/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+   0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_PXP_0				 0xa0fc
+#define MISC_REG_AEU_ENABLE1_PXP_1				 0xa19c
+/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0			 0xa070
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1			 0xa080
+/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0			 0xa110
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1			 0xa120
+/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_NIG_0				 0xa0f0
+#define MISC_REG_AEU_ENABLE2_NIG_1				 0xa190
+/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_PXP_0				 0xa100
+#define MISC_REG_AEU_ENABLE2_PXP_1				 0xa1a0
+/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0			 0xa074
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1			 0xa084
+/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0			 0xa114
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1			 0xa124
+/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_NIG_0				 0xa0f4
+#define MISC_REG_AEU_ENABLE3_NIG_1				 0xa194
+/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_PXP_0				 0xa104
+#define MISC_REG_AEU_ENABLE3_PXP_1				 0xa1a4
+/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0			 0xa078
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2			 0xa098
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4			 0xa0b8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5			 0xa0c8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6			 0xa0d8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7			 0xa0e8
+/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0			 0xa118
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2			 0xa138
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4			 0xa158
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5			 0xa168
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6			 0xa178
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7			 0xa188
+/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_NIG_0				 0xa0f8
+#define MISC_REG_AEU_ENABLE4_NIG_1				 0xa198
+/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_PXP_0				 0xa108
+#define MISC_REG_AEU_ENABLE4_PXP_1				 0xa1a8
+/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0			 0xa688
+/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0			 0xa6b0
+/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+   128 bit vector */
+#define MISC_REG_AEU_GENERAL_ATTN_0				 0xa000
+#define MISC_REG_AEU_GENERAL_ATTN_1				 0xa004
+#define MISC_REG_AEU_GENERAL_ATTN_10				 0xa028
+#define MISC_REG_AEU_GENERAL_ATTN_11				 0xa02c
+#define MISC_REG_AEU_GENERAL_ATTN_12				 0xa030
+#define MISC_REG_AEU_GENERAL_ATTN_2				 0xa008
+#define MISC_REG_AEU_GENERAL_ATTN_3				 0xa00c
+#define MISC_REG_AEU_GENERAL_ATTN_4				 0xa010
+#define MISC_REG_AEU_GENERAL_ATTN_5				 0xa014
+#define MISC_REG_AEU_GENERAL_ATTN_6				 0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7				 0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8				 0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9				 0xa024
+#define MISC_REG_AEU_GENERAL_MASK				 0xa61c
+/* [RW 32] first 32b for inverting the input for function 0; for each bit:
+   0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+   function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+   [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
+   [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
+   for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
+   Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
+   interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
+   Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
+   Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_1_FUNC_0				 0xa22c
+#define MISC_REG_AEU_INVERTER_1_FUNC_1				 0xa23c
+/* [RW 32] second 32b for inverting the input for function 0; for each bit:
+   0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
+   error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
+   interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
+   Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
+   interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+   DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+   error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+   PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+   [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+   [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+   [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+   [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_2_FUNC_0				 0xa230
+#define MISC_REG_AEU_INVERTER_2_FUNC_1				 0xa240
+/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+   [9:8] = raserved. Zero = mask; one = unmask */
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0				 0xa060
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1				 0xa064
+/* [RW 1] If set a system kill occurred */
+#define MISC_REG_AEU_SYS_KILL_OCCURRED				 0xa610
+/* [RW 32] Represent the status of the input vector to the AEU when a system
+   kill occurred. The register is reset in por reset. Mapped as follows: [0]
+   NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+   mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+   [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+   PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+   function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+   Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+   mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+   BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+   Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+   interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+   Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+   interrupt; */
+#define MISC_REG_AEU_SYS_KILL_STATUS_0				 0xa600
+#define MISC_REG_AEU_SYS_KILL_STATUS_1				 0xa604
+#define MISC_REG_AEU_SYS_KILL_STATUS_2				 0xa608
+#define MISC_REG_AEU_SYS_KILL_STATUS_3				 0xa60c
+/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+   Port. */
+#define MISC_REG_BOND_ID					 0xa400
+/* [R 16] These bits indicate the part number for the chip. */
+#define MISC_REG_CHIP_NUM					 0xa408
+/* [R 4] These bits indicate the base revision of the chip. This value
+   starts at 0x0 for the A0 tape-out and increments by one for each
+   all-layer tape-out. */
+#define MISC_REG_CHIP_REV					 0xa40c
+/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
+ * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
+ * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
+#define MISC_REG_CHIP_TYPE					 0xac60
+#define MISC_REG_CHIP_TYPE_57811_MASK				 (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE				 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0				 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0				 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0				 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0				 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0				 0xa8b8
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1				 0xa8bc
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+   32 clients. Each client can be controlled by one driver only. One in each
+   bit represent that this driver control the appropriate client (Ex: bit 5
+   is set means this driver control client number 5). addr1 = set; addr0 =
+   clear; read from both addresses will give the same result = status. write
+   to address 1 will set a request to control all the clients that their
+   appropriate bit (in the write command) is set. if the client is free (the
+   appropriate bit in all the other drivers is clear) one will be written to
+   that driver register; if the client isn't free the bit will remain zero.
+   if the appropriate bit is set (the driver request to gain control on a
+   client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+   interrupt will be asserted). write to address 0 will set a request to
+   free all the clients that their appropriate bit (in the write command) is
+   set. if the appropriate bit is clear (the driver request to free a client
+   it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+   be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1				 0xa510
+#define MISC_REG_DRIVER_CONTROL_7				 0xa3c8
+/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
+   only. */
+#define MISC_REG_E1HMF_MODE					 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP				 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+   the path_swap output is equal to 4 port mode path swap input pin; if it
+   is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR			 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP				 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+   the port_swap output is equal to 4 port mode port swap input pin; if it
+   is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR			 0xa734
+/* [RW 32] Debug only: spare RW register reset by core reset */
+#define MISC_REG_GENERIC_CR_0					 0xa460
+#define MISC_REG_GENERIC_CR_1					 0xa464
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1					 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+   use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+   can not be configured as an output. Each output has its output enable in
+   the MCP register space; but this bit needs to be set to make use of that.
+   Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+   set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+   When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+   the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+   spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG					 0xa9a0
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+   these bits is written as a '1'; the corresponding SPIO bit will turn off
+   it's drivers and become an input. This is the reset state of all GPIO
+   pins. The read value of these bits will be a '1' if that last command
+   (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+   [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+   as a '1'; the corresponding GPIO bit will drive low. The read value of
+   these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+   this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+   SET When any of these bits is written as a '1'; the corresponding GPIO
+   bit will drive high (if it has that capability). The read value of these
+   bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+   bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+   RO; These bits indicate the read value of each of the eight GPIO pins.
+   This is the result value of the pin; not the drive value. Writing these
+   bits will have not effect. */
+#define MISC_REG_GPIO						 0xa490
+/* [RW 8] These bits enable the GPIO_INTs to signals event to the
+   IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
+   p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
+   [7] p1_gpio_3; */
+#define MISC_REG_GPIO_EVENT_EN					 0xa2bc
+/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
+   '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
+   This will acknowledge an interrupt on the falling edge of corresponding
+   GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
+   Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
+   register. This will acknowledge an interrupt on the rising edge of
+   corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
+   OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
+   value. When the ~INT_STATE bit is set; this bit indicates the OLD value
+   of the pin such that if ~INT_STATE is set and this bit is '0'; then the
+   interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
+   is '1'; then the interrupt is due to a high to low edge (reset value 0).
+   [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
+   current GPIO interrupt state for each GPIO pin. This bit is cleared when
+   the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
+   set when the GPIO input does not match the current value in #OLD_VALUE
+   (reset value 0). */
+#define MISC_REG_GPIO_INT					 0xa494
+/* [R 28] this field hold the last information that caused reserved
+   attention. bits [19:0] - address; [22:20] function; [23] reserved;
+   [27:24] the master that caused the attention - according to the following
+   encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+   dbu; 8 = dmae */
+#define MISC_REG_GRC_RSV_ATTN					 0xa3c0
+/* [R 28] this field hold the last information that caused timeout
+   attention. bits [19:0] - address; [22:20] function; [23] reserved;
+   [27:24] the master that caused the attention - according to the following
+   encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+   dbu; 8 = dmae */
+#define MISC_REG_GRC_TIMEOUT_ATTN				 0xa3c4
+/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+   access that does not finish within
+   ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+   cleared; this timeout is disabled. If this timeout occurs; the GRC shall
+   assert it attention output. */
+#define MISC_REG_GRC_TIMEOUT_EN 				 0xa280
+/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
+   the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
+   111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
+   (reset value 001) Charge pump current control; 111 for 720u; 011 for
+   600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
+   Global bias control; When bit 7 is high bias current will be 10 0gh; When
+   bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
+   Pll_observe (reset value 010) Bits to control observability. bit 10 is
+   for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
+   (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
+   and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
+   sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
+   internally). [14] reserved (reset value 0) Reset for VCO sequencer is
+   connected to RESET input directly. [15] capRetry_en (reset value 0)
+   enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
+   value 0) bit to continuously monitor vco freq (inverted). [17]
+   freqDetRestart_en (reset value 0) bit to enable restart when not freq
+   locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
+   retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
+   0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
+   pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
+   (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
+   0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
+   bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
+   enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
+   capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
+   restart. [27] capSelectM_en (reset value 0) bit to enable cap select
+   register bits. */
+#define MISC_REG_LCPLL_CTRL_1					 0xa2a4
+#define MISC_REG_LCPLL_CTRL_REG_2				 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN				 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA				 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG				 0xaa7c
+/* [RW 4] Interrupt mask register #0 read/write */
+#define MISC_REG_MISC_INT_MASK					 0xa388
+/* [RW 1] Parity mask register #0 read/write */
+#define MISC_REG_MISC_PRTY_MASK 				 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS					 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR				 0xa390
+#define MISC_REG_NIG_WOL_P0					 0xa270
+#define MISC_REG_NIG_WOL_P1					 0xa274
+/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
+   assertion */
+#define MISC_REG_PCIE_HOT_RESET 				 0xa618
+/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+   inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+   divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+   divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
+   divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
+   divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
+   freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
+   (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
+   1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
+   Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
+   value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
+   1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
+   [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
+   Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
+   testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
+   testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
+   testa_en (reset value 0); */
+#define MISC_REG_PLL_STORM_CTRL_1				 0xa294
+#define MISC_REG_PLL_STORM_CTRL_2				 0xa298
+#define MISC_REG_PLL_STORM_CTRL_3				 0xa29c
+#define MISC_REG_PLL_STORM_CTRL_4				 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN					 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR				 0xa720
+/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+   write/read zero = the specific block is in reset; addr 0-wr- the write
+   value will be written to the register; addr 1-set - one will be written
+   to all the bits that have the value of one in the data written (bits that
+   have the value of zero will not be change) ; addr 2-clear - zero will be
+   written to all the bits that have the value of one in the data written
+   (bits that have the value of zero will not be change); addr 3-ignore;
+   read ignore from all addr except addr 00; inside order of the bits is:
+   [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
+   [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
+   rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
+   [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
+   Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
+   rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_1					 0xa580
+#define MISC_REG_RESET_REG_2					 0xa590
+/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+   shared with the driver resides */
+#define MISC_REG_SHARED_MEM_ADDR				 0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+   the corresponding SPIO bit will turn off it's drivers and become an
+   input. This is the reset state of all SPIO pins. The read value of these
+   bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+   bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+   is written as a '1'; the corresponding SPIO bit will drive low. The read
+   value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+   these bits is written as a '1'; the corresponding SPIO bit will drive
+   high (if it has that capability). The read value of these bits will be a
+   '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+   (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+   each of the eight SPIO pins. This is the result value of the pin; not the
+   drive value. Writing these bits will have not effect. Each 8 bits field
+   is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+   from VAUX. (This is an output pin only; the FLOAT field is not applicable
+   for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+   VAUX. (This is an output pin only; FLOAT field is not applicable for this
+   pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+   select VAUX supply. (This is an output pin only; it is not controlled by
+   the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+   field is not applicable for this pin; only the VALUE fields is relevant -
+   it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+   Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+   device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO						 0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+   according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+   [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN					 0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+   corresponding bit in the #OLD_VALUE register. This will acknowledge an
+   interrupt on the falling edge of corresponding SPIO input (reset value
+   0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+   in the #OLD_VALUE register. This will acknowledge an interrupt on the
+   rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+   RO; These bits indicate the old value of the SPIO input value. When the
+   ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+   that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+   to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+   interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+   RO; These bits indicate the current SPIO interrupt state for each SPIO
+   pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+   command bit is written. This bit is set when the SPIO input does not
+   match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT					 0xa500
+/* [RW 32] reload value for counter 4 if reload; the value will be reload if
+   the counter reached zero and the reload bit
+   (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
+#define MISC_REG_SW_TIMER_RELOAD_VAL_4				 0xa2fc
+/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
+   in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
+   timer 8 */
+#define MISC_REG_SW_TIMER_VAL					 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP				 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+   path_swap output is equal to 2 port mode path swap input pin; if it is 1
+   - the path_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR			 0xa72c
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+   loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED					 0xa424
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST	 (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST	 (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR				 0xa9cc
+#define MISC_REG_WC0_RESET					 0xac30
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+   side. This should be less than or equal to phy_port_mode; if some of the
+   ports are not used. This enables reduction of frequency on the core side.
+   This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+   Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+   input for the XMAC_MP core; and should be changed only while reset is
+   held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE				 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+   Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+   01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+   XMAC_MP core; and should be changed only while reset is held low. Reset
+   on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE				 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO				 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO				 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST	 (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST	 (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN			 (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN			 (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT	 (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS	 (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G 	 (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS	 (0xf<<18)
+/* [RW 1] Input enable for RX_BMAC0 IF */
+#define NIG_REG_BMAC0_IN_EN					 0x100ac
+/* [RW 1] output enable for TX_BMAC0 IF */
+#define NIG_REG_BMAC0_OUT_EN					 0x100e0
+/* [RW 1] output enable for TX BMAC pause port 0 IF */
+#define NIG_REG_BMAC0_PAUSE_OUT_EN				 0x10110
+/* [RW 1] output enable for RX_BMAC0_REGS IF */
+#define NIG_REG_BMAC0_REGS_OUT_EN				 0x100e8
+/* [RW 1] output enable for RX BRB1 port0 IF */
+#define NIG_REG_BRB0_OUT_EN					 0x100f8
+/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
+#define NIG_REG_BRB0_PAUSE_IN_EN				 0x100c4
+/* [RW 1] output enable for RX BRB1 port1 IF */
+#define NIG_REG_BRB1_OUT_EN					 0x100fc
+/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
+#define NIG_REG_BRB1_PAUSE_IN_EN				 0x100c8
+/* [RW 1] output enable for RX BRB1 LP IF */
+#define NIG_REG_BRB_LB_OUT_EN					 0x10100
+/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+   error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
+   72:73]-vnic_num; 81:74]-sideband_info */
+#define NIG_REG_DEBUG_PACKET_LB 				 0x10800
+/* [RW 1] Input enable for TX Debug packet */
+#define NIG_REG_EGRESS_DEBUG_IN_EN				 0x100dc
+/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
+   packets from PBFare not forwarded to the MAC and just deleted from FIFO.
+   First packet may be deleted from the middle. And last packet will be
+   always deleted till the end. */
+#define NIG_REG_EGRESS_DRAIN0_MODE				 0x10060
+/* [RW 1] Output enable to EMAC0 */
+#define NIG_REG_EGRESS_EMAC0_OUT_EN				 0x10120
+/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+   to emac for port0; other way to bmac for port0 */
+#define NIG_REG_EGRESS_EMAC0_PORT				 0x10058
+/* [RW 1] Input enable for TX PBF user packet port0 IF */
+#define NIG_REG_EGRESS_PBF0_IN_EN				 0x100cc
+/* [RW 1] Input enable for TX PBF user packet port1 IF */
+#define NIG_REG_EGRESS_PBF1_IN_EN				 0x100d0
+/* [RW 1] Input enable for TX UMP management packet port0 IF */
+#define NIG_REG_EGRESS_UMP0_IN_EN				 0x100d4
+/* [RW 1] Input enable for RX_EMAC0 IF */
+#define NIG_REG_EMAC0_IN_EN					 0x100a4
+/* [RW 1] output enable for TX EMAC pause port 0 IF */
+#define NIG_REG_EMAC0_PAUSE_OUT_EN				 0x10118
+/* [R 1] status from emac0. This bit is set when MDINT from either the
+   EXT_MDINT pin or from the Copper PHY is driven low. This condition must
+   be cleared in the attached PHY device that is driving the MINT pin. */
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT			 0x10494
+/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
+   are described in appendix A. In order to access the BMAC0 registers; the
+   base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
+   added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC0_MEM				 0x10c00
+/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
+   are described in appendix A. In order to access the BMAC0 registers; the
+   base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
+   added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC1_MEM				 0x11000
+/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
+#define NIG_REG_INGRESS_EOP_LB_EMPTY				 0x104e0
+/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
+   packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
+#define NIG_REG_INGRESS_EOP_LB_FIFO				 0x104e4
+/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
+   logic for interrupts must be used. Enable per bit of interrupt of
+   ~latch_status.latch_status */
+#define NIG_REG_LATCH_BC_0					 0x16210
+/* [RW 27] Latch for each interrupt from Unicore.b[0]
+   status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
+   b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
+   b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
+   b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
+   b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
+   b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
+   b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
+   b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
+   b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
+   b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
+   b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
+   b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
+#define NIG_REG_LATCH_STATUS_0					 0x18000
+/* [RW 1] led 10g for port 0 */
+#define NIG_REG_LED_10G_P0					 0x10320
+/* [RW 1] led 10g for port 1 */
+#define NIG_REG_LED_10G_P1					 0x10324
+/* [RW 1] Port0: This bit is set to enable the use of the
+   ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+   defined below. If this bit is cleared; then the blink rate will be about
+   8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0			 0x10318
+/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
+   Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
+   is reset to 0x080; giving a default blink period of approximately 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0			 0x10310
+/* [RW 1] Port0: If set along with the
+ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+   bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+   bit; the Traffic LED will blink with the blink rate specified in
+   ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+   ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+   fields. */
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0			 0x10308
+/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
+   Traffic LED will then be controlled via bit ~nig_registers_
+   led_control_traffic_p0.led_control_traffic_p0 and bit
+   ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 		 0x102f8
+/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
+   turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
+   set; the LED will blink with blink rate specified in
+   ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+   ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+   fields. */
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0				 0x10300
+/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+   9-11PHY7; 12 MAC4; 13-15 PHY10; */
+#define NIG_REG_LED_MODE_P0					 0x102f0
+/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
+   tsdm enable; b2- usdm enable */
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0			 0x16070
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1			 0x16074
+/* [RW 1] SAFC enable for port0. This register may get 1 only when
+   ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
+   port */
+#define NIG_REG_LLFC_ENABLE_0					 0x16208
+#define NIG_REG_LLFC_ENABLE_1					 0x1620c
+/* [RW 16] classes are high-priority for port0 */
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0			 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1			 0x1605c
+/* [RW 16] classes are low-priority for port0 */
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0			 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1			 0x16064
+/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
+#define NIG_REG_LLFC_OUT_EN_0					 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1					 0x160cc
+#define NIG_REG_LLH0_ACPI_PAT_0_CRC				 0x1015c
+#define NIG_REG_LLH0_ACPI_PAT_6_LEN				 0x10154
+#define NIG_REG_LLH0_BRB1_DRV_MASK				 0x10244
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF				 0x16048
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH0_BRB1_NOT_MCP				 0x1025c
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+   classification upon VLAN id. 2: classification upon MAC address. 3:
+   classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH0_CLS_TYPE					 0x16080
+/* [RW 32] cm header for llh0 */
+#define NIG_REG_LLH0_CM_HEADER					 0x1007c
+#define NIG_REG_LLH0_DEST_IP_0_1				 0x101dc
+#define NIG_REG_LLH0_DEST_MAC_0_0				 0x101c0
+/* [RW 16] destination TCP address 1. The LLH will look for this address in
+   all incoming packets. */
+#define NIG_REG_LLH0_DEST_TCP_0 				 0x10220
+/* [RW 16] destination UDP address 1 The LLH will look for this address in
+   all incoming packets. */
+#define NIG_REG_LLH0_DEST_UDP_0 				 0x10214
+#define NIG_REG_LLH0_ERROR_MASK 				 0x1008c
+/* [RW 8] event id for llh0 */
+#define NIG_REG_LLH0_EVENT_ID					 0x10084
+#define NIG_REG_LLH0_FUNC_EN					 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM					 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE				 0x16140
+#define NIG_REG_LLH0_FUNC_VLAN_ID				 0x16100
+/* [RW 1] Determine the IP version to look for in
+   ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
+#define NIG_REG_LLH0_IPV4_IPV6_0				 0x10208
+/* [RW 1] t bit for llh0 */
+#define NIG_REG_LLH0_T_BIT					 0x10074
+/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
+#define NIG_REG_LLH0_VLAN_ID_0					 0x1022c
+/* [RW 8] init credit counter for port0 in LLH */
+#define NIG_REG_LLH0_XCM_INIT_CREDIT				 0x10554
+#define NIG_REG_LLH0_XCM_MASK					 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK				 0x10248
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH1_BRB1_NOT_MCP				 0x102dc
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+   classification upon VLAN id. 2: classification upon MAC address. 3:
+   classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH1_CLS_TYPE					 0x16084
+/* [RW 32] cm header for llh1 */
+#define NIG_REG_LLH1_CM_HEADER					 0x10080
+#define NIG_REG_LLH1_ERROR_MASK 				 0x10090
+/* [RW 8] event id for llh1 */
+#define NIG_REG_LLH1_EVENT_ID					 0x10088
+#define NIG_REG_LLH1_FUNC_EN					 0x16104
+#define NIG_REG_LLH1_FUNC_MEM					 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE				 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE				 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE					 0x18614
+/* [RW 8] init credit counter for port1 in LLH */
+#define NIG_REG_LLH1_XCM_INIT_CREDIT				 0x10564
+#define NIG_REG_LLH1_XCM_MASK					 0x10134
+/* [RW 1] When this bit is set; the LLH will expect all packets to be with
+   e1hov */
+#define NIG_REG_LLH_E1HOV_MODE					 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1				 0x16028
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+   sending it to the BRB or calculating WoL on it. */
+#define NIG_REG_LLH_MF_MODE					 0x16024
+#define NIG_REG_MASK_INTERRUPT_PORT0				 0x10330
+#define NIG_REG_MASK_INTERRUPT_PORT1				 0x10334
+/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+#define NIG_REG_NIG_EMAC0_EN					 0x1003c
+/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
+#define NIG_REG_NIG_EMAC1_EN					 0x10040
+/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+   EMAC0 to strip the CRC from the ingress packets. */
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC			 0x10044
+/* [R 32] Interrupt register #0 read */
+#define NIG_REG_NIG_INT_STS_0					 0x103b0
+#define NIG_REG_NIG_INT_STS_1					 0x103c0
+/* [RC 32] Interrupt register #0 read clear */
+#define NIG_REG_NIG_INT_STS_CLR_0				 0x103b4
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK					 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0					 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1					 0x183d8
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
+#define NIG_REG_NIG_PRTY_STS					 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0					 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1					 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR				 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0				 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1				 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE					 (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT			 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT			 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT		 8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC				 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE				 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2				 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE			 0x18440
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID			 0x1875c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB			 0x18754
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB			 0x18758
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_LLH_PTP_PARAM_MASK				 0x187a0
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P0_LLH_PTP_RULE_MASK				 0x187a4
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P0_LLH_PTP_TO_HOST				 0x187ac
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN					 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN					 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN				 0x185b4
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS				 0x18054
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P0_PTP_EN					 0x18788
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK			 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK			 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK			 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK			 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK			 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK			 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP			 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB		 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB		 0x1868c
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT			 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ		 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0			 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1			 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2			 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3			 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4			 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5			 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6			 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7			 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8			 0x186ac
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0			 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1			 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2			 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3			 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4			 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5			 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6			 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7			 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8			 0x1869c
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS			 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT			 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC				 0x1818c
+#define NIG_REG_P1_LLH_FUNC_MEM2				 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE			 0x18460a
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID			 0x18774
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB			 0x1876c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB			 0x18770
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_LLH_PTP_PARAM_MASK				 0x187c8
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P1_LLH_PTP_RULE_MASK				 0x187cc
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P1_LLH_PTP_TO_HOST				 0x187d4
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB			 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB			 0x18684
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
+ * for BRB LB interface is bypassed and PBF LB traffic is always selected to
+ * send to BRB LB.
+ */
+#define NIG_REG_P0_TX_MNG_HOST_ENABLE				 0x182f4
+#define NIG_REG_P1_HWPFC_ENABLE					 0x181d0
+#define NIG_REG_P1_MAC_IN_EN					 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN					 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN				 0x185c8
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS				 0x181a8
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P1_PTP_EN					 0x187b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK			 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK			 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK			 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY				 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY				 0x18338
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_SEQID				 0x187e0
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB				 0x187d8
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB				 0x187dc
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_TLLH_PTP_PARAM_MASK				 0x187f0
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P0_TLLH_PTP_RULE_MASK				 0x187f4
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_SEQID				 0x187ec
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB				 0x187e4
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB				 0x187e8
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_TLLH_PTP_PARAM_MASK				 0x187f8
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P1_TLLH_PTP_RULE_MASK				 0x187fc
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB		 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB		 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT			 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ			 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0			 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1			 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2			 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3			 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4			 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5			 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0			 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1			 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2			 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3			 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4			 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5			 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+   two round-robin arbitration slots to avoid starvation. A value of 0 means
+   no strict priority cycles - the strict priority with anti-starvation
+   arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS			 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+   strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+   value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+   client; bits [35-32] are for priority 8 client. The clients are assigned
+   the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+   traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+   6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+   set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+   accommodate the 9 input clients to ETS arbiter. Note that this register
+   is the same as the one for port 0, except that port 1 only has COS 0-2
+   traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB			 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+   strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+   value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+   client; bits [35-32] are for priority 8 client. The clients are assigned
+   the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+   traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+   6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+   set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+   accommodate the 9 input clients to ETS arbiter. Note that this register
+   is the same as the one for port 0, except that port 1 only has COS 0-2
+   traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB			 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY				 0x18594
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ */
+#define NIG_REG_P1_TX_MNG_HOST_ENABLE				 0x182f8
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+   forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY			 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+/* [RW 1] Pause enable for port0. This register may get 1 only when
+   ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
+   port */
+#define NIG_REG_PAUSE_ENABLE_0					 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1					 0x160c4
+/* [RW 1] Input enable for RX PBF LP IF */
+#define NIG_REG_PBF_LB_IN_EN					 0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+   ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP					 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0					 0x160b0
+#define NIG_REG_PPP_ENABLE_1					 0x160b4
+/* [RW 1] output enable for RX parser descriptor IF */
+#define NIG_REG_PRS_EOP_OUT_EN					 0x10104
+/* [RW 1] Input enable for RX parser request IF */
+#define NIG_REG_PRS_REQ_IN_EN					 0x100b8
+/* [RW 5] control to serdes - CL45 DEVAD */
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD				 0x10370
+/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_SERDES0_CTRL_MD_ST				 0x1036c
+/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR				 0x10374
+/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS			 0x10578
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+   for port0 */
+#define NIG_REG_STAT0_BRB_DISCARD				 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+   for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE				 0x105f8
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+   between 1024 and 1522 bytes for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0				 0x10750
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+   between 1523 bytes and above for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1				 0x10760
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+   for port1 */
+#define NIG_REG_STAT1_BRB_DISCARD				 0x10628
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+   between 1024 and 1522 bytes for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0				 0x107a0
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+   between 1523 bytes and above for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1				 0x107b0
+/* [WB_R 64] Rx statistics : User octets received for LP */
+#define NIG_REG_STAT2_BRB_OCTET 				 0x107e0
+#define NIG_REG_STATUS_INTERRUPT_PORT0				 0x10328
+#define NIG_REG_STATUS_INTERRUPT_PORT1				 0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+   swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+   ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE					 0x10398
+/* [WB 64] Addresses for TimeSync related registers in the timesync
+ * generator sub-module.
+ */
+#define NIG_REG_TIMESYNC_GEN_REG				 0x18800
+/* [RW 1] output enable for RX_XCM0 IF */
+#define NIG_REG_XCM0_OUT_EN					 0x100f0
+/* [RW 1] output enable for RX_XCM1 IF */
+#define NIG_REG_XCM1_OUT_EN					 0x100f4
+/* [RW 1] control to xgxs - remote PHY in-band MDIO */
+#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST			 0x10348
+/* [RW 5] control to xgxs - CL45 DEVAD */
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD				 0x1033c
+/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_XGXS0_CTRL_MD_ST				 0x10338
+/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR				 0x10340
+/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+#define NIG_REG_XGXS0_STATUS_LINK10G				 0x10680
+/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS			 0x10684
+/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
+#define NIG_REG_XGXS_LANE_SEL_P0				 0x102e8
+/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
+#define NIG_REG_XGXS_SERDES0_MODE_SEL				 0x102e0
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT  (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G	 (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS  (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND				 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0				 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1				 0x15c2e4
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT					 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0					 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1					 0x15c2c0
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND				 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT					 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0					 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1					 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0					 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1					 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0					 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0					 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0					 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q					 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0					 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1					 0x140340
+/* [RW 1] Disable processing further tasks from port 0 (after ending the
+   current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0			 0x14005c
+/* [RW 1] Disable processing further tasks from port 1 (after ending the
+   current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P1			 0x140060
+/* [RW 1] Disable processing further tasks from port 4 (after ending the
+   current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P4			 0x14006c
+#define PBF_REG_DISABLE_PF					 0x1402e8
+#define PBF_REG_DISABLE_VF					 0x1402ec
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0			 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1			 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0			 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1			 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0		 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1		 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0			 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1			 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0			 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1			 0x15c274
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED					 0x15c050
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC				 0x15c0a8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0				 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM				 0x15c04c
+#define PBF_REG_IF_ENABLE_REG					 0x140044
+/* [RW 1] Init bit. When set the initial credits are copied to the credit
+   registers (except the port credits). Should be set and then reset after
+   the configuration of the block has ended. */
+#define PBF_REG_INIT						 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q					 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0					 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1					 0x15c234
+/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
+   copied to the credit register. Should be set and then reset after the
+   configuration of the port has ended. */
+#define PBF_REG_INIT_P0 					 0x140004
+/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
+   copied to the credit register. Should be set and then reset after the
+   configuration of the port has ended. */
+#define PBF_REG_INIT_P1 					 0x140008
+/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
+   copied to the credit register. Should be set and then reset after the
+   configuration of the port has ended. */
+#define PBF_REG_INIT_P4 					 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q			 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0			 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1			 0x14035c
+/* [RW 1] Enable for mac interface 0. */
+#define PBF_REG_MAC_IF0_ENABLE					 0x140030
+/* [RW 1] Enable for mac interface 1. */
+#define PBF_REG_MAC_IF1_ENABLE					 0x140034
+/* [RW 1] Enable for the loopback interface. */
+#define PBF_REG_MAC_LB_ENABLE					 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS					 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS				 0x15c064
+/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
+   not suppoterd. */
+#define PBF_REG_P0_ARB_THRSH					 0x1400e4
+/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P0_CREDIT					 0x140200
+/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
+   lines. */
+#define PBF_REG_P0_INIT_CRD					 0x1400d0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT			 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE					 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
+#define PBF_REG_P0_TASK_CNT					 0x140204
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT				 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY					 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_CREDIT					 0x140208
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_INIT_CRD					 0x1400d4
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT			 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
+#define PBF_REG_P1_TASK_CNT					 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT				 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY					 0x140300
+/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P4_CREDIT					 0x140210
+/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
+   lines. */
+#define PBF_REG_P4_INIT_CRD					 0x1400e0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT			 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
+#define PBF_REG_P4_TASK_CNT					 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT				 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY					 0x140304
+/* [RW 5] Interrupt mask register #0 read/write */
+#define PBF_REG_PBF_INT_MASK					 0x1401d4
+/* [R 5] Interrupt register #0 read */
+#define PBF_REG_PBF_INT_STS					 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK					 0x1401e4
+/* [R 28] Parity register #0 read */
+#define PBF_REG_PBF_PRTY_STS					 0x1401d8
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0					 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0					 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q				 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0				 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1				 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q				 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0					 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1					 0x1403b0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0					 0x15c06c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define PB_REG_PB_INT_MASK					 0x28
+/* [R 2] Interrupt register #0 read */
+#define PB_REG_PB_INT_STS					 0x1c
+/* [RW 4] Parity mask register #0 read/write */
+#define PB_REG_PB_PRTY_MASK					 0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS					 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR					 0x30
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR		 (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW	 (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR	 (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN		 (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN	 (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN  (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN	 (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN	 (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN		 (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST			 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST			 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE			 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF				 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE			 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A			 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B			 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B				 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF			 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0				 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR			 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96			 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0			 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32			 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64			 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS			 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER		 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ		 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE		 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE			 0x9438
+/* [W 7] Writing 1 to each bit in this register clears a corresponding error
+ * details register and enables logging new error details. Bit 0 - clears
+ * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears
+ * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS
+ * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32
+ * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 -
+ * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears
+ * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6
+ * - clears TCPL_IN_TWO_RCBS_DETAILS. */
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR				 0x943c
+
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS				 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR			 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK				 0x92b4
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS				 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR			 0x92ac
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS				 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS			 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR			 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST			 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32					 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE			 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A			 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B			 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B				 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF			 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0				 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32			 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS				 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2				 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0				 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32			 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS				 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2				 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0				 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1				 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2				 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3				 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4				 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5				 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6				 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE			 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE			 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE			 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A			 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B			 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B				 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF			 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS		 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS		 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0				 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR			 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96			 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR			 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0				 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR			 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32				 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR			 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64				 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR			 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE			 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A			 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B			 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B				 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF			 0x91a8
+#define PRS_REG_A_PRSU_20					 0x40134
+/* [R 8] debug only: CFC load request current credit. Transaction based. */
+#define PRS_REG_CFC_LD_CURRENT_CREDIT				 0x40164
+/* [R 8] debug only: CFC search request current credit. Transaction based. */
+#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT			 0x40168
+/* [RW 6] The initial credit for the search message to the CFC interface.
+   Credit is transaction based. */
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT			 0x4011c
+/* [RW 24] CID for port 0 if no match */
+#define PRS_REG_CID_PORT_0					 0x400fc
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+   load response is reset and packet type is 0. Used in packet start message
+   to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0			 0x400dc
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1			 0x400e0
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2			 0x400e4
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3			 0x400e8
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4			 0x400ec
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5			 0x400f0
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+   load response is set and packet type is 0. Used in packet start message
+   to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0			 0x400bc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1			 0x400c0
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2			 0x400c4
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3			 0x400c8
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4			 0x400cc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5			 0x400d0
+/* [RW 32] The CM header for a match and packet type 1 for loopback port.
+   Used in packet start message to TCM. */
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1				 0x4009c
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2				 0x400a0
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3				 0x400a4
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4				 0x400a8
+/* [RW 32] The CM header for a match and packet type 0. Used in packet start
+   message to TCM. */
+#define PRS_REG_CM_HDR_TYPE_0					 0x40078
+#define PRS_REG_CM_HDR_TYPE_1					 0x4007c
+#define PRS_REG_CM_HDR_TYPE_2					 0x40080
+#define PRS_REG_CM_HDR_TYPE_3					 0x40084
+#define PRS_REG_CM_HDR_TYPE_4					 0x40088
+/* [RW 32] The CM header in case there was not a match on the connection */
+#define PRS_REG_CM_NO_MATCH_HDR 				 0x400b8
+/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
+#define PRS_REG_E1HOV_MODE					 0x401c8
+/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+   start message to TCM. */
+#define PRS_REG_EVENT_ID_1					 0x40054
+#define PRS_REG_EVENT_ID_2					 0x40058
+#define PRS_REG_EVENT_ID_3					 0x4005c
+/* [RW 16] The Ethernet type value for FCoE */
+#define PRS_REG_FCOE_TYPE					 0x401d0
+/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+   load request message. */
+#define PRS_REG_FLUSH_REGIONS_TYPE_0				 0x40004
+#define PRS_REG_FLUSH_REGIONS_TYPE_1				 0x40008
+#define PRS_REG_FLUSH_REGIONS_TYPE_2				 0x4000c
+#define PRS_REG_FLUSH_REGIONS_TYPE_3				 0x40010
+#define PRS_REG_FLUSH_REGIONS_TYPE_4				 0x40014
+#define PRS_REG_FLUSH_REGIONS_TYPE_5				 0x40018
+#define PRS_REG_FLUSH_REGIONS_TYPE_6				 0x4001c
+#define PRS_REG_FLUSH_REGIONS_TYPE_7				 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC				 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0				 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1				 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0				 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0				 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1				 0x402a0
+/* [RW 4] The increment value to send in the CFC load request message */
+#define PRS_REG_INC_VALUE					 0x40048
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS					 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0				 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1				 0x402ac
+#define PRS_REG_NIC_MODE					 0x40138
+/* [RW 8] The 8-bit event ID for cases where there is no match on the
+   connection. Used in packet start message to TCM. */
+#define PRS_REG_NO_MATCH_EVENT_ID				 0x40070
+/* [ST 24] The number of input CFC flush packets */
+#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES			 0x40128
+/* [ST 32] The number of cycles the Parser halted its operation since it
+   could not allocate the next serial number */
+#define PRS_REG_NUM_OF_DEAD_CYCLES				 0x40130
+/* [ST 24] The number of input packets */
+#define PRS_REG_NUM_OF_PACKETS					 0x40124
+/* [ST 24] The number of input transparent flush packets */
+#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES		 0x4012c
+/* [RW 8] Context region for received Ethernet packet with a match and
+   packet type 0. Used in CFC load request message */
+#define PRS_REG_PACKET_REGIONS_TYPE_0				 0x40028
+#define PRS_REG_PACKET_REGIONS_TYPE_1				 0x4002c
+#define PRS_REG_PACKET_REGIONS_TYPE_2				 0x40030
+#define PRS_REG_PACKET_REGIONS_TYPE_3				 0x40034
+#define PRS_REG_PACKET_REGIONS_TYPE_4				 0x40038
+#define PRS_REG_PACKET_REGIONS_TYPE_5				 0x4003c
+#define PRS_REG_PACKET_REGIONS_TYPE_6				 0x40040
+#define PRS_REG_PACKET_REGIONS_TYPE_7				 0x40044
+/* [R 2] debug only: Number of pending requests for CAC on port 0. */
+#define PRS_REG_PENDING_BRB_CAC0_RQ				 0x40174
+/* [R 2] debug only: Number of pending requests for header parsing. */
+#define PRS_REG_PENDING_BRB_PRS_RQ				 0x40170
+/* [R 1] Interrupt register #0 read */
+#define PRS_REG_PRS_INT_STS					 0x40188
+/* [RW 8] Parity mask register #0 read/write */
+#define PRS_REG_PRS_PRTY_MASK					 0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS					 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR				 0x4019c
+/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
+   request message */
+#define PRS_REG_PURE_REGIONS					 0x40024
+/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
+   serail number was released by SDM but cannot be used because a previous
+   serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_LSB				 0x40154
+/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
+   serail number was released by SDM but cannot be used because a previous
+   serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_MSB				 0x40158
+/* [R 4] debug only: SRC current credit. Transaction based. */
+#define PRS_REG_SRC_CURRENT_CREDIT				 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0					 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0					 0x4022c
+/* [R 8] debug only: TCM current credit. Cycle based. */
+#define PRS_REG_TCM_CURRENT_CREDIT				 0x40160
+/* [R 8] debug only: TSDM current credit. Transaction based. */
+#define PRS_REG_TSDM_CURRENT_CREDIT				 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0					 0x401a8
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT			 (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF			 (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN			 (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED		 (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED		 (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR		 (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR		 (0x1<<7)
+/* [R 6] Debug only: Number of used entries in the data FIFO */
+#define PXP2_REG_HST_DATA_FIFO_STATUS				 0x12047c
+/* [R 7] Debug only: Number of used entries in the header FIFO */
+#define PXP2_REG_HST_HEADER_FIFO_STATUS				 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0					 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1					 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0					 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1					 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0					 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1					 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0					 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1					 0x120550
+#define PXP2_REG_PGL_CONTROL0					 0x120490
+#define PXP2_REG_PGL_CONTROL1					 0x120514
+#define PXP2_REG_PGL_DEBUG					 0x120520
+/* [RW 32] third dword data of expansion rom request. this register is
+   special. reading from it provides a vector outstanding read requests. if
+   a bit is zero it means that a read request on the corresponding tag did
+   not finish yet (not all completions have arrived for it) */
+#define PXP2_REG_PGL_EXP_ROM2					 0x120808
+/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_CSDM_0 				 0x1204f4
+#define PXP2_REG_PGL_INT_CSDM_1 				 0x1204f8
+#define PXP2_REG_PGL_INT_CSDM_2 				 0x1204fc
+#define PXP2_REG_PGL_INT_CSDM_3 				 0x120500
+#define PXP2_REG_PGL_INT_CSDM_4 				 0x120504
+#define PXP2_REG_PGL_INT_CSDM_5 				 0x120508
+#define PXP2_REG_PGL_INT_CSDM_6 				 0x12050c
+#define PXP2_REG_PGL_INT_CSDM_7 				 0x120510
+/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_TSDM_0 				 0x120494
+#define PXP2_REG_PGL_INT_TSDM_1 				 0x120498
+#define PXP2_REG_PGL_INT_TSDM_2 				 0x12049c
+#define PXP2_REG_PGL_INT_TSDM_3 				 0x1204a0
+#define PXP2_REG_PGL_INT_TSDM_4 				 0x1204a4
+#define PXP2_REG_PGL_INT_TSDM_5 				 0x1204a8
+#define PXP2_REG_PGL_INT_TSDM_6 				 0x1204ac
+#define PXP2_REG_PGL_INT_TSDM_7 				 0x1204b0
+/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_USDM_0 				 0x1204b4
+#define PXP2_REG_PGL_INT_USDM_1 				 0x1204b8
+#define PXP2_REG_PGL_INT_USDM_2 				 0x1204bc
+#define PXP2_REG_PGL_INT_USDM_3 				 0x1204c0
+#define PXP2_REG_PGL_INT_USDM_4 				 0x1204c4
+#define PXP2_REG_PGL_INT_USDM_5 				 0x1204c8
+#define PXP2_REG_PGL_INT_USDM_6 				 0x1204cc
+#define PXP2_REG_PGL_INT_USDM_7 				 0x1204d0
+/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_XSDM_0 				 0x1204d4
+#define PXP2_REG_PGL_INT_XSDM_1 				 0x1204d8
+#define PXP2_REG_PGL_INT_XSDM_2 				 0x1204dc
+#define PXP2_REG_PGL_INT_XSDM_3 				 0x1204e0
+#define PXP2_REG_PGL_INT_XSDM_4 				 0x1204e4
+#define PXP2_REG_PGL_INT_XSDM_5 				 0x1204e8
+#define PXP2_REG_PGL_INT_XSDM_6 				 0x1204ec
+#define PXP2_REG_PGL_INT_XSDM_7 				 0x1204f0
+/* [RW 3] this field allows one function to pretend being another function
+   when accessing any BAR mapped resource within the device. the value of
+   the field is the number of the function that will be accessed
+   effectively. after software write to this bit it must read it in order to
+   know that the new value is updated */
+#define PXP2_REG_PGL_PRETEND_FUNC_F0				 0x120674
+#define PXP2_REG_PGL_PRETEND_FUNC_F1				 0x120678
+#define PXP2_REG_PGL_PRETEND_FUNC_F2				 0x12067c
+#define PXP2_REG_PGL_PRETEND_FUNC_F3				 0x120680
+#define PXP2_REG_PGL_PRETEND_FUNC_F4				 0x120684
+#define PXP2_REG_PGL_PRETEND_FUNC_F5				 0x120688
+#define PXP2_REG_PGL_PRETEND_FUNC_F6				 0x12068c
+#define PXP2_REG_PGL_PRETEND_FUNC_F7				 0x120690
+/* [R 1] this bit indicates that a read request was blocked because of
+   bus_master_en was deasserted */
+#define PXP2_REG_PGL_READ_BLOCKED				 0x120568
+#define PXP2_REG_PGL_TAGS_LIMIT 				 0x1205a8
+/* [R 18] debug only */
+#define PXP2_REG_PGL_TXW_CDTS					 0x12052c
+/* [R 1] this bit indicates that a write request was blocked because of
+   bus_master_en was deasserted */
+#define PXP2_REG_PGL_WRITE_BLOCKED				 0x120564
+#define PXP2_REG_PSWRQ_BW_ADD1					 0x1201c0
+#define PXP2_REG_PSWRQ_BW_ADD10 				 0x1201e4
+#define PXP2_REG_PSWRQ_BW_ADD11 				 0x1201e8
+#define PXP2_REG_PSWRQ_BW_ADD2					 0x1201c4
+#define PXP2_REG_PSWRQ_BW_ADD28 				 0x120228
+#define PXP2_REG_PSWRQ_BW_ADD3					 0x1201c8
+#define PXP2_REG_PSWRQ_BW_ADD6					 0x1201d4
+#define PXP2_REG_PSWRQ_BW_ADD7					 0x1201d8
+#define PXP2_REG_PSWRQ_BW_ADD8					 0x1201dc
+#define PXP2_REG_PSWRQ_BW_ADD9					 0x1201e0
+#define PXP2_REG_PSWRQ_BW_CREDIT				 0x12032c
+#define PXP2_REG_PSWRQ_BW_L1					 0x1202b0
+#define PXP2_REG_PSWRQ_BW_L10					 0x1202d4
+#define PXP2_REG_PSWRQ_BW_L11					 0x1202d8
+#define PXP2_REG_PSWRQ_BW_L2					 0x1202b4
+#define PXP2_REG_PSWRQ_BW_L28					 0x120318
+#define PXP2_REG_PSWRQ_BW_L3					 0x1202b8
+#define PXP2_REG_PSWRQ_BW_L6					 0x1202c4
+#define PXP2_REG_PSWRQ_BW_L7					 0x1202c8
+#define PXP2_REG_PSWRQ_BW_L8					 0x1202cc
+#define PXP2_REG_PSWRQ_BW_L9					 0x1202d0
+#define PXP2_REG_PSWRQ_BW_RD					 0x120324
+#define PXP2_REG_PSWRQ_BW_UB1					 0x120238
+#define PXP2_REG_PSWRQ_BW_UB10					 0x12025c
+#define PXP2_REG_PSWRQ_BW_UB11					 0x120260
+#define PXP2_REG_PSWRQ_BW_UB2					 0x12023c
+#define PXP2_REG_PSWRQ_BW_UB28					 0x1202a0
+#define PXP2_REG_PSWRQ_BW_UB3					 0x120240
+#define PXP2_REG_PSWRQ_BW_UB6					 0x12024c
+#define PXP2_REG_PSWRQ_BW_UB7					 0x120250
+#define PXP2_REG_PSWRQ_BW_UB8					 0x120254
+#define PXP2_REG_PSWRQ_BW_UB9					 0x120258
+#define PXP2_REG_PSWRQ_BW_WR					 0x120328
+#define PXP2_REG_PSWRQ_CDU0_L2P 				 0x120000
+#define PXP2_REG_PSWRQ_QM0_L2P					 0x120038
+#define PXP2_REG_PSWRQ_SRC0_L2P 				 0x120054
+#define PXP2_REG_PSWRQ_TM0_L2P					 0x12001c
+#define PXP2_REG_PSWRQ_TSDM0_L2P				 0x1200e0
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP2_REG_PXP2_INT_MASK_0				 0x120578
+/* [R 32] Interrupt register #0 read */
+#define PXP2_REG_PXP2_INT_STS_0 				 0x12056c
+#define PXP2_REG_PXP2_INT_STS_1 				 0x120608
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP2_REG_PXP2_INT_STS_CLR_0				 0x120570
+/* [RW 32] Parity mask register #0 read/write */
+#define PXP2_REG_PXP2_PRTY_MASK_0				 0x120588
+#define PXP2_REG_PXP2_PRTY_MASK_1				 0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0				 0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1				 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0				 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1				 0x120590
+/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
+   indication about backpressure) */
+#define PXP2_REG_RD_ALMOST_FULL_0				 0x120424
+/* [R 8] Debug only: The blocks counter - number of unused block ids */
+#define PXP2_REG_RD_BLK_CNT					 0x120418
+/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
+   Must be bigger than 6. Normally should not be changed. */
+#define PXP2_REG_RD_BLK_NUM_CFG 				 0x12040c
+/* [RW 2] CDU byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_CDURD_SWAP_MODE				 0x120404
+/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
+#define PXP2_REG_RD_DISABLE_INPUTS				 0x120374
+/* [R 1] PSWRD internal memories initialization is done */
+#define PXP2_REG_RD_INIT_DONE					 0x120370
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq10 */
+#define PXP2_REG_RD_MAX_BLKS_VQ10				 0x1203a0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq11 */
+#define PXP2_REG_RD_MAX_BLKS_VQ11				 0x1203a4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq17 */
+#define PXP2_REG_RD_MAX_BLKS_VQ17				 0x1203bc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq18 */
+#define PXP2_REG_RD_MAX_BLKS_VQ18				 0x1203c0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq19 */
+#define PXP2_REG_RD_MAX_BLKS_VQ19				 0x1203c4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq22 */
+#define PXP2_REG_RD_MAX_BLKS_VQ22				 0x1203d0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq25 */
+#define PXP2_REG_RD_MAX_BLKS_VQ25				 0x1203dc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq6 */
+#define PXP2_REG_RD_MAX_BLKS_VQ6				 0x120390
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq9 */
+#define PXP2_REG_RD_MAX_BLKS_VQ9				 0x12039c
+/* [RW 2] PBF byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_PBF_SWAP_MODE				 0x1203f4
+/* [R 1] Debug only: Indication if delivery ports are idle */
+#define PXP2_REG_RD_PORT_IS_IDLE_0				 0x12041c
+#define PXP2_REG_RD_PORT_IS_IDLE_1				 0x120420
+/* [RW 2] QM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_QM_SWAP_MODE				 0x1203f8
+/* [R 7] Debug only: The SR counter - number of unused sub request ids */
+#define PXP2_REG_RD_SR_CNT					 0x120414
+/* [RW 2] SRC byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_SRC_SWAP_MODE				 0x120400
+/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
+   be bigger than 1. Normally should not be changed. */
+#define PXP2_REG_RD_SR_NUM_CFG					 0x120408
+/* [RW 1] Signals the PSWRD block to start initializing internal memories */
+#define PXP2_REG_RD_START_INIT					 0x12036c
+/* [RW 2] TM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_TM_SWAP_MODE				 0x1203fc
+/* [RW 10] Bandwidth addition to VQ0 write requests */
+#define PXP2_REG_RQ_BW_RD_ADD0					 0x1201bc
+/* [RW 10] Bandwidth addition to VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD12 				 0x1201ec
+/* [RW 10] Bandwidth addition to VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD13 				 0x1201f0
+/* [RW 10] Bandwidth addition to VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD14 				 0x1201f4
+/* [RW 10] Bandwidth addition to VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD15 				 0x1201f8
+/* [RW 10] Bandwidth addition to VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD16 				 0x1201fc
+/* [RW 10] Bandwidth addition to VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD17 				 0x120200
+/* [RW 10] Bandwidth addition to VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD18 				 0x120204
+/* [RW 10] Bandwidth addition to VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD19 				 0x120208
+/* [RW 10] Bandwidth addition to VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD20 				 0x12020c
+/* [RW 10] Bandwidth addition to VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD22 				 0x120210
+/* [RW 10] Bandwidth addition to VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD23 				 0x120214
+/* [RW 10] Bandwidth addition to VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD24 				 0x120218
+/* [RW 10] Bandwidth addition to VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD25 				 0x12021c
+/* [RW 10] Bandwidth addition to VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD26 				 0x120220
+/* [RW 10] Bandwidth addition to VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD27 				 0x120224
+/* [RW 10] Bandwidth addition to VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD4					 0x1201cc
+/* [RW 10] Bandwidth addition to VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD5					 0x1201d0
+/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
+#define PXP2_REG_RQ_BW_RD_L0					 0x1202ac
+/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
+#define PXP2_REG_RQ_BW_RD_L12					 0x1202dc
+/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
+#define PXP2_REG_RQ_BW_RD_L13					 0x1202e0
+/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
+#define PXP2_REG_RQ_BW_RD_L14					 0x1202e4
+/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
+#define PXP2_REG_RQ_BW_RD_L15					 0x1202e8
+/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
+#define PXP2_REG_RQ_BW_RD_L16					 0x1202ec
+/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
+#define PXP2_REG_RQ_BW_RD_L17					 0x1202f0
+/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
+#define PXP2_REG_RQ_BW_RD_L18					 0x1202f4
+/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
+#define PXP2_REG_RQ_BW_RD_L19					 0x1202f8
+/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
+#define PXP2_REG_RQ_BW_RD_L20					 0x1202fc
+/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
+#define PXP2_REG_RQ_BW_RD_L22					 0x120300
+/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
+#define PXP2_REG_RQ_BW_RD_L23					 0x120304
+/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
+#define PXP2_REG_RQ_BW_RD_L24					 0x120308
+/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
+#define PXP2_REG_RQ_BW_RD_L25					 0x12030c
+/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
+#define PXP2_REG_RQ_BW_RD_L26					 0x120310
+/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
+#define PXP2_REG_RQ_BW_RD_L27					 0x120314
+/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
+#define PXP2_REG_RQ_BW_RD_L4					 0x1202bc
+/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
+#define PXP2_REG_RQ_BW_RD_L5					 0x1202c0
+/* [RW 7] Bandwidth upper bound for VQ0 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND0				 0x120234
+/* [RW 7] Bandwidth upper bound for VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND12				 0x120264
+/* [RW 7] Bandwidth upper bound for VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND13				 0x120268
+/* [RW 7] Bandwidth upper bound for VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND14				 0x12026c
+/* [RW 7] Bandwidth upper bound for VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND15				 0x120270
+/* [RW 7] Bandwidth upper bound for VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND16				 0x120274
+/* [RW 7] Bandwidth upper bound for VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND17				 0x120278
+/* [RW 7] Bandwidth upper bound for VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND18				 0x12027c
+/* [RW 7] Bandwidth upper bound for VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND19				 0x120280
+/* [RW 7] Bandwidth upper bound for VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND20				 0x120284
+/* [RW 7] Bandwidth upper bound for VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND22				 0x120288
+/* [RW 7] Bandwidth upper bound for VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND23				 0x12028c
+/* [RW 7] Bandwidth upper bound for VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND24				 0x120290
+/* [RW 7] Bandwidth upper bound for VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND25				 0x120294
+/* [RW 7] Bandwidth upper bound for VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND26				 0x120298
+/* [RW 7] Bandwidth upper bound for VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND27				 0x12029c
+/* [RW 7] Bandwidth upper bound for VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND4				 0x120244
+/* [RW 7] Bandwidth upper bound for VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND5				 0x120248
+/* [RW 10] Bandwidth addition to VQ29 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD29 				 0x12022c
+/* [RW 10] Bandwidth addition to VQ30 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD30 				 0x120230
+/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
+#define PXP2_REG_RQ_BW_WR_L29					 0x12031c
+/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
+#define PXP2_REG_RQ_BW_WR_L30					 0x120320
+/* [RW 7] Bandwidth upper bound for VQ29 */
+#define PXP2_REG_RQ_BW_WR_UBOUND29				 0x1202a4
+/* [RW 7] Bandwidth upper bound for VQ30 */
+#define PXP2_REG_RQ_BW_WR_UBOUND30				 0x1202a8
+/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
+#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR			 0x120008
+/* [RW 2] Endian mode for cdu */
+#define PXP2_REG_RQ_CDU_ENDIAN_M				 0x1201a0
+#define PXP2_REG_RQ_CDU_FIRST_ILT				 0x12061c
+#define PXP2_REG_RQ_CDU_LAST_ILT				 0x120620
+/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_CDU_P_SIZE					 0x120018
+/* [R 1] 1' indicates that the requester has finished its internal
+   configuration */
+#define PXP2_REG_RQ_CFG_DONE					 0x1201b4
+/* [RW 2] Endian mode for debug */
+#define PXP2_REG_RQ_DBG_ENDIAN_M				 0x1201a4
+/* [RW 1] When '1'; requests will enter input buffers but wont get out
+   towards the glue */
+#define PXP2_REG_RQ_DISABLE_INPUTS				 0x120330
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN					 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD				 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL				 0x120930
+/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
+   be asserted */
+#define PXP2_REG_RQ_ELT_DISABLE 				 0x12066c
+/* [RW 2] Endian mode for hc */
+#define PXP2_REG_RQ_HC_ENDIAN_M 				 0x1201a8
+/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
+   compatibility needs; Note that different registers are used per mode */
+#define PXP2_REG_RQ_ILT_MODE					 0x1205b4
+/* [WB 53] Onchip address table */
+#define PXP2_REG_RQ_ONCHIP_AT					 0x122000
+/* [WB 53] Onchip address table - B0 */
+#define PXP2_REG_RQ_ONCHIP_AT_B0				 0x128000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT					 0x12033c
+/* [RW 2] Endian mode for qm */
+#define PXP2_REG_RQ_QM_ENDIAN_M 				 0x120194
+#define PXP2_REG_RQ_QM_FIRST_ILT				 0x120634
+#define PXP2_REG_RQ_QM_LAST_ILT 				 0x120638
+/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_QM_P_SIZE					 0x120050
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+#define PXP2_REG_RQ_RBC_DONE					 0x1201b0
+/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+   001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS0					 0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+   001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1					 0x120168
+/* [RW 2] Endian mode for src */
+#define PXP2_REG_RQ_SRC_ENDIAN_M				 0x12019c
+#define PXP2_REG_RQ_SRC_FIRST_ILT				 0x12063c
+#define PXP2_REG_RQ_SRC_LAST_ILT				 0x120640
+/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_SRC_P_SIZE					 0x12006c
+/* [RW 2] Endian mode for tm */
+#define PXP2_REG_RQ_TM_ENDIAN_M 				 0x120198
+#define PXP2_REG_RQ_TM_FIRST_ILT				 0x120644
+#define PXP2_REG_RQ_TM_LAST_ILT 				 0x120648
+/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_TM_P_SIZE					 0x120034
+/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY				 0x12080c
+/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
+#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR			 0x120094
+/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+#define PXP2_REG_RQ_VQ0_ENTRY_CNT				 0x120810
+/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+#define PXP2_REG_RQ_VQ10_ENTRY_CNT				 0x120818
+/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
+#define PXP2_REG_RQ_VQ11_ENTRY_CNT				 0x120820
+/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
+#define PXP2_REG_RQ_VQ12_ENTRY_CNT				 0x120828
+/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
+#define PXP2_REG_RQ_VQ13_ENTRY_CNT				 0x120830
+/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
+#define PXP2_REG_RQ_VQ14_ENTRY_CNT				 0x120838
+/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
+#define PXP2_REG_RQ_VQ15_ENTRY_CNT				 0x120840
+/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
+#define PXP2_REG_RQ_VQ16_ENTRY_CNT				 0x120848
+/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
+#define PXP2_REG_RQ_VQ17_ENTRY_CNT				 0x120850
+/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
+#define PXP2_REG_RQ_VQ18_ENTRY_CNT				 0x120858
+/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
+#define PXP2_REG_RQ_VQ19_ENTRY_CNT				 0x120860
+/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
+#define PXP2_REG_RQ_VQ1_ENTRY_CNT				 0x120868
+/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
+#define PXP2_REG_RQ_VQ20_ENTRY_CNT				 0x120870
+/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
+#define PXP2_REG_RQ_VQ21_ENTRY_CNT				 0x120878
+/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
+#define PXP2_REG_RQ_VQ22_ENTRY_CNT				 0x120880
+/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
+#define PXP2_REG_RQ_VQ23_ENTRY_CNT				 0x120888
+/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
+#define PXP2_REG_RQ_VQ24_ENTRY_CNT				 0x120890
+/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
+#define PXP2_REG_RQ_VQ25_ENTRY_CNT				 0x120898
+/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
+#define PXP2_REG_RQ_VQ26_ENTRY_CNT				 0x1208a0
+/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
+#define PXP2_REG_RQ_VQ27_ENTRY_CNT				 0x1208a8
+/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
+#define PXP2_REG_RQ_VQ28_ENTRY_CNT				 0x1208b0
+/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
+#define PXP2_REG_RQ_VQ29_ENTRY_CNT				 0x1208b8
+/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
+#define PXP2_REG_RQ_VQ2_ENTRY_CNT				 0x1208c0
+/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
+#define PXP2_REG_RQ_VQ30_ENTRY_CNT				 0x1208c8
+/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
+#define PXP2_REG_RQ_VQ31_ENTRY_CNT				 0x1208d0
+/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
+#define PXP2_REG_RQ_VQ3_ENTRY_CNT				 0x1208d8
+/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
+#define PXP2_REG_RQ_VQ4_ENTRY_CNT				 0x1208e0
+/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
+#define PXP2_REG_RQ_VQ5_ENTRY_CNT				 0x1208e8
+/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
+#define PXP2_REG_RQ_VQ6_ENTRY_CNT				 0x1208f0
+/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
+#define PXP2_REG_RQ_VQ7_ENTRY_CNT				 0x1208f8
+/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
+#define PXP2_REG_RQ_VQ8_ENTRY_CNT				 0x120900
+/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
+#define PXP2_REG_RQ_VQ9_ENTRY_CNT				 0x120908
+/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
+   001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS0					 0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+   001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1					 0x120164
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CDU_MPS					 0x1205f0
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CSDM_MPS					 0x1205d0
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DBG_MPS					 0x1205e8
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DMAE_MPS					 0x1205ec
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
+   threshold then has_payload indication will be asserted; the default value
+   should be equal to &gt;  write MBS size! */
+#define PXP2_REG_WR_DMAE_TH					 0x120368
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_HC_MPS					 0x1205c8
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_QM_MPS					 0x1205dc
+/* [RW 1] 0 - working in A0 mode;  - working in B0 mode */
+#define PXP2_REG_WR_REV_MODE					 0x120670
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_SRC_MPS					 0x1205e4
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TM_MPS					 0x1205e0
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TSDM_MPS					 0x1205d4
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+   threshold then has_payload indication will be asserted; the default value
+   should be equal to &gt;  write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH					 0x120348
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_USDM_MPS					 0x1205cc
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_XSDM_MPS					 0x1205d8
+/* [R 1] debug only: Indication if PSWHST arbiter is idle */
+#define PXP_REG_HST_ARB_IS_IDLE 				 0x103004
+/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+   this client is waiting for the arbiter. */
+#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB			 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+   block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS				 0x1030a4
+/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
+   should update according to 'hst_discard_doorbells' register when the state
+   machine is idle */
+#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS			 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+   Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES			 0x1030a8
+/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
+   means this PSWHST is discarding inputs from this client. Each bit should
+   update according to 'hst_discard_internal_writes' register when the state
+   machine is idle. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS		 0x10309c
+/* [WB 160] Used for initialization of the inbound interrupts memory */
+#define PXP_REG_HST_INBOUND_INT 				 0x103800
+/* [RW 7] Indirect access to the permission table. The fields are : {Valid;
+ * VFID[5:0]}
+ */
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE			 0x103400
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP_REG_PXP_INT_MASK_0					 0x103074
+#define PXP_REG_PXP_INT_MASK_1					 0x103084
+/* [R 32] Interrupt register #0 read */
+#define PXP_REG_PXP_INT_STS_0					 0x103068
+#define PXP_REG_PXP_INT_STS_1					 0x103078
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP_REG_PXP_INT_STS_CLR_0				 0x10306c
+#define PXP_REG_PXP_INT_STS_CLR_1				 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
+#define PXP_REG_PXP_PRTY_MASK					 0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS					 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR				 0x10308c
+/* [RW 4] The activity counter initial increment value sent in the load
+   request */
+#define QM_REG_ACTCTRINITVAL_0					 0x168040
+#define QM_REG_ACTCTRINITVAL_1					 0x168044
+#define QM_REG_ACTCTRINITVAL_2					 0x168048
+#define QM_REG_ACTCTRINITVAL_3					 0x16804c
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+   index I represents the physical queue number. The 12 lsbs are ignore and
+   considered zero so practically there are only 20 bits in this register;
+   queues 63-0 */
+#define QM_REG_BASEADDR 					 0x168900
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+   index I represents the physical queue number. The 12 lsbs are ignore and
+   considered zero so practically there are only 20 bits in this register;
+   queues 127-64 */
+#define QM_REG_BASEADDR_EXT_A					 0x16e100
+/* [RW 16] The byte credit cost for each task. This value is for both ports */
+#define QM_REG_BYTECRDCOST					 0x168234
+/* [RW 16] The initial byte credit value for both ports. */
+#define QM_REG_BYTECRDINITVAL					 0x168238
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 31-0 */
+#define QM_REG_BYTECRDPORT_LSB					 0x168228
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 95-64 */
+#define QM_REG_BYTECRDPORT_LSB_EXT_A				 0x16e520
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 63-32 */
+#define QM_REG_BYTECRDPORT_MSB					 0x168224
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 127-96 */
+#define QM_REG_BYTECRDPORT_MSB_EXT_A				 0x16e51c
+/* [RW 16] The byte credit value that if above the QM is considered almost
+   full */
+#define QM_REG_BYTECREDITAFULLTHR				 0x168094
+/* [RW 4] The initial credit for interface */
+#define QM_REG_CMINITCRD_0					 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0					 0x16e6e8
+#define QM_REG_CMINITCRD_1					 0x1680d0
+#define QM_REG_CMINITCRD_2					 0x1680d4
+#define QM_REG_CMINITCRD_3					 0x1680d8
+#define QM_REG_CMINITCRD_4					 0x1680dc
+#define QM_REG_CMINITCRD_5					 0x1680e0
+#define QM_REG_CMINITCRD_6					 0x1680e4
+#define QM_REG_CMINITCRD_7					 0x1680e8
+/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
+   is masked */
+#define QM_REG_CMINTEN						 0x1680ec
+/* [RW 12] A bit vector which indicates which one of the queues are tied to
+   interface 0 */
+#define QM_REG_CMINTVOQMASK_0					 0x1681f4
+#define QM_REG_CMINTVOQMASK_1					 0x1681f8
+#define QM_REG_CMINTVOQMASK_2					 0x1681fc
+#define QM_REG_CMINTVOQMASK_3					 0x168200
+#define QM_REG_CMINTVOQMASK_4					 0x168204
+#define QM_REG_CMINTVOQMASK_5					 0x168208
+#define QM_REG_CMINTVOQMASK_6					 0x16820c
+#define QM_REG_CMINTVOQMASK_7					 0x168210
+/* [RW 20] The number of connections divided by 16 which dictates the size
+   of each queue which belongs to even function number. */
+#define QM_REG_CONNNUM_0					 0x168020
+/* [R 6] Keep the fill level of the fifo from write client 4 */
+#define QM_REG_CQM_WRC_FIFOLVL					 0x168018
+/* [RW 8] The context regions sent in the CFC load request */
+#define QM_REG_CTXREG_0 					 0x168030
+#define QM_REG_CTXREG_1 					 0x168034
+#define QM_REG_CTXREG_2 					 0x168038
+#define QM_REG_CTXREG_3 					 0x16803c
+/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
+   bypass enable */
+#define QM_REG_ENBYPVOQMASK					 0x16823c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 31-0 */
+#define QM_REG_ENBYTECRD_LSB					 0x168220
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 95-64 */
+#define QM_REG_ENBYTECRD_LSB_EXT_A				 0x16e518
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 63-32 */
+#define QM_REG_ENBYTECRD_MSB					 0x16821c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 127-96 */
+#define QM_REG_ENBYTECRD_MSB_EXT_A				 0x16e514
+/* [RW 4] If cleared then the secondary interface will not be served by the
+   RR arbiter */
+#define QM_REG_ENSEC						 0x1680f0
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_LSB					 0x168230
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_MSB					 0x16822c
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 31:0 */
+#define QM_REG_HWAEMPTYMASK_LSB 				 0x168218
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 95-64 */
+#define QM_REG_HWAEMPTYMASK_LSB_EXT_A				 0x16e510
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 63:32 */
+#define QM_REG_HWAEMPTYMASK_MSB 				 0x168214
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 127-96 */
+#define QM_REG_HWAEMPTYMASK_MSB_EXT_A				 0x16e50c
+/* [RW 4] The number of outstanding request to CFC */
+#define QM_REG_OUTLDREQ 					 0x168804
+/* [RC 1] A flag to indicate that overflow error occurred in one of the
+   queues. */
+#define QM_REG_OVFERROR 					 0x16805c
+/* [RC 7] the Q where the overflow occurs */
+#define QM_REG_OVFQNUM						 0x168058
+/* [R 16] Pause state for physical queues 15-0 */
+#define QM_REG_PAUSESTATE0					 0x168410
+/* [R 16] Pause state for physical queues 31-16 */
+#define QM_REG_PAUSESTATE1					 0x168414
+/* [R 16] Pause state for physical queues 47-32 */
+#define QM_REG_PAUSESTATE2					 0x16e684
+/* [R 16] Pause state for physical queues 63-48 */
+#define QM_REG_PAUSESTATE3					 0x16e688
+/* [R 16] Pause state for physical queues 79-64 */
+#define QM_REG_PAUSESTATE4					 0x16e68c
+/* [R 16] Pause state for physical queues 95-80 */
+#define QM_REG_PAUSESTATE5					 0x16e690
+/* [R 16] Pause state for physical queues 111-96 */
+#define QM_REG_PAUSESTATE6					 0x16e694
+/* [R 16] Pause state for physical queues 127-112 */
+#define QM_REG_PAUSESTATE7					 0x16e698
+/* [RW 2] The PCI attributes field used in the PCI request. */
+#define QM_REG_PCIREQAT 					 0x168054
+#define QM_REG_PF_EN						 0x16e70c
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0					 0x16e040
+/* [R 16] NOT USED */
+#define QM_REG_PORT0BYTECRD					 0x168300
+/* [R 16] The byte credit of port 1 */
+#define QM_REG_PORT1BYTECRD					 0x168304
+/* [RW 3] pci function number of queues 15-0 */
+#define QM_REG_PQ2PCIFUNC_0					 0x16e6bc
+#define QM_REG_PQ2PCIFUNC_1					 0x16e6c0
+#define QM_REG_PQ2PCIFUNC_2					 0x16e6c4
+#define QM_REG_PQ2PCIFUNC_3					 0x16e6c8
+#define QM_REG_PQ2PCIFUNC_4					 0x16e6cc
+#define QM_REG_PQ2PCIFUNC_5					 0x16e6d0
+#define QM_REG_PQ2PCIFUNC_6					 0x16e6d4
+#define QM_REG_PQ2PCIFUNC_7					 0x16e6d8
+/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
+   ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+   bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL						 0x168a00
+/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
+   ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+   bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL_EXT_A					 0x16e200
+/* [RW 2] Interrupt mask register #0 read/write */
+#define QM_REG_QM_INT_MASK					 0x168444
+/* [R 2] Interrupt register #0 read */
+#define QM_REG_QM_INT_STS					 0x168438
+/* [RW 12] Parity mask register #0 read/write */
+#define QM_REG_QM_PRTY_MASK					 0x168454
+/* [R 12] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS					 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR					 0x16844c
+/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+#define QM_REG_QSTATUS_HIGH					 0x16802c
+/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
+#define QM_REG_QSTATUS_HIGH_EXT_A				 0x16e408
+/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+#define QM_REG_QSTATUS_LOW					 0x168028
+/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
+#define QM_REG_QSTATUS_LOW_EXT_A				 0x16e404
+/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+#define QM_REG_QTASKCTR_0					 0x168308
+/* [R 24] The number of tasks queued for each queue; queues 127-64 */
+#define QM_REG_QTASKCTR_EXT_A_0 				 0x16e584
+/* [RW 4] Queue tied to VOQ */
+#define QM_REG_QVOQIDX_0					 0x1680f4
+#define QM_REG_QVOQIDX_10					 0x16811c
+#define QM_REG_QVOQIDX_100					 0x16e49c
+#define QM_REG_QVOQIDX_101					 0x16e4a0
+#define QM_REG_QVOQIDX_102					 0x16e4a4
+#define QM_REG_QVOQIDX_103					 0x16e4a8
+#define QM_REG_QVOQIDX_104					 0x16e4ac
+#define QM_REG_QVOQIDX_105					 0x16e4b0
+#define QM_REG_QVOQIDX_106					 0x16e4b4
+#define QM_REG_QVOQIDX_107					 0x16e4b8
+#define QM_REG_QVOQIDX_108					 0x16e4bc
+#define QM_REG_QVOQIDX_109					 0x16e4c0
+#define QM_REG_QVOQIDX_11					 0x168120
+#define QM_REG_QVOQIDX_110					 0x16e4c4
+#define QM_REG_QVOQIDX_111					 0x16e4c8
+#define QM_REG_QVOQIDX_112					 0x16e4cc
+#define QM_REG_QVOQIDX_113					 0x16e4d0
+#define QM_REG_QVOQIDX_114					 0x16e4d4
+#define QM_REG_QVOQIDX_115					 0x16e4d8
+#define QM_REG_QVOQIDX_116					 0x16e4dc
+#define QM_REG_QVOQIDX_117					 0x16e4e0
+#define QM_REG_QVOQIDX_118					 0x16e4e4
+#define QM_REG_QVOQIDX_119					 0x16e4e8
+#define QM_REG_QVOQIDX_12					 0x168124
+#define QM_REG_QVOQIDX_120					 0x16e4ec
+#define QM_REG_QVOQIDX_121					 0x16e4f0
+#define QM_REG_QVOQIDX_122					 0x16e4f4
+#define QM_REG_QVOQIDX_123					 0x16e4f8
+#define QM_REG_QVOQIDX_124					 0x16e4fc
+#define QM_REG_QVOQIDX_125					 0x16e500
+#define QM_REG_QVOQIDX_126					 0x16e504
+#define QM_REG_QVOQIDX_127					 0x16e508
+#define QM_REG_QVOQIDX_13					 0x168128
+#define QM_REG_QVOQIDX_14					 0x16812c
+#define QM_REG_QVOQIDX_15					 0x168130
+#define QM_REG_QVOQIDX_16					 0x168134
+#define QM_REG_QVOQIDX_17					 0x168138
+#define QM_REG_QVOQIDX_21					 0x168148
+#define QM_REG_QVOQIDX_22					 0x16814c
+#define QM_REG_QVOQIDX_23					 0x168150
+#define QM_REG_QVOQIDX_24					 0x168154
+#define QM_REG_QVOQIDX_25					 0x168158
+#define QM_REG_QVOQIDX_26					 0x16815c
+#define QM_REG_QVOQIDX_27					 0x168160
+#define QM_REG_QVOQIDX_28					 0x168164
+#define QM_REG_QVOQIDX_29					 0x168168
+#define QM_REG_QVOQIDX_30					 0x16816c
+#define QM_REG_QVOQIDX_31					 0x168170
+#define QM_REG_QVOQIDX_32					 0x168174
+#define QM_REG_QVOQIDX_33					 0x168178
+#define QM_REG_QVOQIDX_34					 0x16817c
+#define QM_REG_QVOQIDX_35					 0x168180
+#define QM_REG_QVOQIDX_36					 0x168184
+#define QM_REG_QVOQIDX_37					 0x168188
+#define QM_REG_QVOQIDX_38					 0x16818c
+#define QM_REG_QVOQIDX_39					 0x168190
+#define QM_REG_QVOQIDX_40					 0x168194
+#define QM_REG_QVOQIDX_41					 0x168198
+#define QM_REG_QVOQIDX_42					 0x16819c
+#define QM_REG_QVOQIDX_43					 0x1681a0
+#define QM_REG_QVOQIDX_44					 0x1681a4
+#define QM_REG_QVOQIDX_45					 0x1681a8
+#define QM_REG_QVOQIDX_46					 0x1681ac
+#define QM_REG_QVOQIDX_47					 0x1681b0
+#define QM_REG_QVOQIDX_48					 0x1681b4
+#define QM_REG_QVOQIDX_49					 0x1681b8
+#define QM_REG_QVOQIDX_5					 0x168108
+#define QM_REG_QVOQIDX_50					 0x1681bc
+#define QM_REG_QVOQIDX_51					 0x1681c0
+#define QM_REG_QVOQIDX_52					 0x1681c4
+#define QM_REG_QVOQIDX_53					 0x1681c8
+#define QM_REG_QVOQIDX_54					 0x1681cc
+#define QM_REG_QVOQIDX_55					 0x1681d0
+#define QM_REG_QVOQIDX_56					 0x1681d4
+#define QM_REG_QVOQIDX_57					 0x1681d8
+#define QM_REG_QVOQIDX_58					 0x1681dc
+#define QM_REG_QVOQIDX_59					 0x1681e0
+#define QM_REG_QVOQIDX_6					 0x16810c
+#define QM_REG_QVOQIDX_60					 0x1681e4
+#define QM_REG_QVOQIDX_61					 0x1681e8
+#define QM_REG_QVOQIDX_62					 0x1681ec
+#define QM_REG_QVOQIDX_63					 0x1681f0
+#define QM_REG_QVOQIDX_64					 0x16e40c
+#define QM_REG_QVOQIDX_65					 0x16e410
+#define QM_REG_QVOQIDX_69					 0x16e420
+#define QM_REG_QVOQIDX_7					 0x168110
+#define QM_REG_QVOQIDX_70					 0x16e424
+#define QM_REG_QVOQIDX_71					 0x16e428
+#define QM_REG_QVOQIDX_72					 0x16e42c
+#define QM_REG_QVOQIDX_73					 0x16e430
+#define QM_REG_QVOQIDX_74					 0x16e434
+#define QM_REG_QVOQIDX_75					 0x16e438
+#define QM_REG_QVOQIDX_76					 0x16e43c
+#define QM_REG_QVOQIDX_77					 0x16e440
+#define QM_REG_QVOQIDX_78					 0x16e444
+#define QM_REG_QVOQIDX_79					 0x16e448
+#define QM_REG_QVOQIDX_8					 0x168114
+#define QM_REG_QVOQIDX_80					 0x16e44c
+#define QM_REG_QVOQIDX_81					 0x16e450
+#define QM_REG_QVOQIDX_85					 0x16e460
+#define QM_REG_QVOQIDX_86					 0x16e464
+#define QM_REG_QVOQIDX_87					 0x16e468
+#define QM_REG_QVOQIDX_88					 0x16e46c
+#define QM_REG_QVOQIDX_89					 0x16e470
+#define QM_REG_QVOQIDX_9					 0x168118
+#define QM_REG_QVOQIDX_90					 0x16e474
+#define QM_REG_QVOQIDX_91					 0x16e478
+#define QM_REG_QVOQIDX_92					 0x16e47c
+#define QM_REG_QVOQIDX_93					 0x16e480
+#define QM_REG_QVOQIDX_94					 0x16e484
+#define QM_REG_QVOQIDX_95					 0x16e488
+#define QM_REG_QVOQIDX_96					 0x16e48c
+#define QM_REG_QVOQIDX_97					 0x16e490
+#define QM_REG_QVOQIDX_98					 0x16e494
+#define QM_REG_QVOQIDX_99					 0x16e498
+/* [RW 1] Initialization bit command */
+#define QM_REG_SOFT_RESET					 0x168428
+/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+#define QM_REG_TASKCRDCOST_0					 0x16809c
+#define QM_REG_TASKCRDCOST_1					 0x1680a0
+#define QM_REG_TASKCRDCOST_2					 0x1680a4
+#define QM_REG_TASKCRDCOST_4					 0x1680ac
+#define QM_REG_TASKCRDCOST_5					 0x1680b0
+/* [R 6] Keep the fill level of the fifo from write client 3 */
+#define QM_REG_TQM_WRC_FIFOLVL					 0x168010
+/* [R 6] Keep the fill level of the fifo from write client 2 */
+#define QM_REG_UQM_WRC_FIFOLVL					 0x168008
+/* [RC 32] Credit update error register */
+#define QM_REG_VOQCRDERRREG					 0x168408
+/* [R 16] The credit value for each VOQ */
+#define QM_REG_VOQCREDIT_0					 0x1682d0
+#define QM_REG_VOQCREDIT_1					 0x1682d4
+#define QM_REG_VOQCREDIT_4					 0x1682e0
+/* [RW 16] The credit value that if above the QM is considered almost full */
+#define QM_REG_VOQCREDITAFULLTHR				 0x168090
+/* [RW 16] The init and maximum credit for each VoQ */
+#define QM_REG_VOQINITCREDIT_0					 0x168060
+#define QM_REG_VOQINITCREDIT_1					 0x168064
+#define QM_REG_VOQINITCREDIT_2					 0x168068
+#define QM_REG_VOQINITCREDIT_4					 0x168070
+#define QM_REG_VOQINITCREDIT_5					 0x168074
+/* [RW 1] The port of which VOQ belongs */
+#define QM_REG_VOQPORT_0					 0x1682a0
+#define QM_REG_VOQPORT_1					 0x1682a4
+#define QM_REG_VOQPORT_2					 0x1682a8
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_0_LSB					 0x168240
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_0_LSB_EXT_A				 0x16e524
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_0_MSB					 0x168244
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_0_MSB_EXT_A				 0x16e528
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_10_LSB					 0x168290
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_10_LSB_EXT_A				 0x16e574
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_10_MSB					 0x168294
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_10_MSB_EXT_A				 0x16e578
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_11_LSB					 0x168298
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_11_LSB_EXT_A				 0x16e57c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_11_MSB					 0x16829c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_11_MSB_EXT_A				 0x16e580
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_1_LSB					 0x168248
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_1_LSB_EXT_A				 0x16e52c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_1_MSB					 0x16824c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_1_MSB_EXT_A				 0x16e530
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_2_LSB					 0x168250
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_2_LSB_EXT_A				 0x16e534
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_2_MSB					 0x168254
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_2_MSB_EXT_A				 0x16e538
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_3_LSB					 0x168258
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_3_LSB_EXT_A				 0x16e53c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_3_MSB_EXT_A				 0x16e540
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_4_LSB					 0x168260
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_4_LSB_EXT_A				 0x16e544
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_4_MSB					 0x168264
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_4_MSB_EXT_A				 0x16e548
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_5_LSB					 0x168268
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_5_LSB_EXT_A				 0x16e54c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_5_MSB					 0x16826c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_5_MSB_EXT_A				 0x16e550
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_6_LSB					 0x168270
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_6_LSB_EXT_A				 0x16e554
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_6_MSB					 0x168274
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_6_MSB_EXT_A				 0x16e558
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_7_LSB					 0x168278
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_7_LSB_EXT_A				 0x16e55c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_7_MSB					 0x16827c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_7_MSB_EXT_A				 0x16e560
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_8_LSB					 0x168280
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_8_LSB_EXT_A				 0x16e564
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_8_MSB					 0x168284
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_8_MSB_EXT_A				 0x16e568
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_9_LSB					 0x168288
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_9_LSB_EXT_A				 0x16e56c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_9_MSB_EXT_A				 0x16e570
+/* [RW 32] Wrr weights */
+#define QM_REG_WRRWEIGHTS_0					 0x16880c
+#define QM_REG_WRRWEIGHTS_1					 0x168810
+#define QM_REG_WRRWEIGHTS_10					 0x168814
+#define QM_REG_WRRWEIGHTS_11					 0x168818
+#define QM_REG_WRRWEIGHTS_12					 0x16881c
+#define QM_REG_WRRWEIGHTS_13					 0x168820
+#define QM_REG_WRRWEIGHTS_14					 0x168824
+#define QM_REG_WRRWEIGHTS_15					 0x168828
+#define QM_REG_WRRWEIGHTS_16					 0x16e000
+#define QM_REG_WRRWEIGHTS_17					 0x16e004
+#define QM_REG_WRRWEIGHTS_18					 0x16e008
+#define QM_REG_WRRWEIGHTS_19					 0x16e00c
+#define QM_REG_WRRWEIGHTS_2					 0x16882c
+#define QM_REG_WRRWEIGHTS_20					 0x16e010
+#define QM_REG_WRRWEIGHTS_21					 0x16e014
+#define QM_REG_WRRWEIGHTS_22					 0x16e018
+#define QM_REG_WRRWEIGHTS_23					 0x16e01c
+#define QM_REG_WRRWEIGHTS_24					 0x16e020
+#define QM_REG_WRRWEIGHTS_25					 0x16e024
+#define QM_REG_WRRWEIGHTS_26					 0x16e028
+#define QM_REG_WRRWEIGHTS_27					 0x16e02c
+#define QM_REG_WRRWEIGHTS_28					 0x16e030
+#define QM_REG_WRRWEIGHTS_29					 0x16e034
+#define QM_REG_WRRWEIGHTS_3					 0x168830
+#define QM_REG_WRRWEIGHTS_30					 0x16e038
+#define QM_REG_WRRWEIGHTS_31					 0x16e03c
+#define QM_REG_WRRWEIGHTS_4					 0x168834
+#define QM_REG_WRRWEIGHTS_5					 0x168838
+#define QM_REG_WRRWEIGHTS_6					 0x16883c
+#define QM_REG_WRRWEIGHTS_7					 0x168840
+#define QM_REG_WRRWEIGHTS_8					 0x168844
+#define QM_REG_WRRWEIGHTS_9					 0x168848
+/* [R 6] Keep the fill level of the fifo from write client 1 */
+#define QM_REG_XQM_WRC_FIFOLVL					 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST					 0x18840
+#define SRC_REG_COUNTFREE0					 0x40500
+/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
+   ports. If set the searcher support 8 functions. */
+#define SRC_REG_E1HMF_ENABLE					 0x404cc
+#define SRC_REG_FIRSTFREE0					 0x40510
+#define SRC_REG_KEYRSS0_0					 0x40408
+#define SRC_REG_KEYRSS0_7					 0x40424
+#define SRC_REG_KEYRSS1_9					 0x40454
+#define SRC_REG_KEYSEARCH_0					 0x40458
+#define SRC_REG_KEYSEARCH_1					 0x4045c
+#define SRC_REG_KEYSEARCH_2					 0x40460
+#define SRC_REG_KEYSEARCH_3					 0x40464
+#define SRC_REG_KEYSEARCH_4					 0x40468
+#define SRC_REG_KEYSEARCH_5					 0x4046c
+#define SRC_REG_KEYSEARCH_6					 0x40470
+#define SRC_REG_KEYSEARCH_7					 0x40474
+#define SRC_REG_KEYSEARCH_8					 0x40478
+#define SRC_REG_KEYSEARCH_9					 0x4047c
+#define SRC_REG_LASTFREE0					 0x40530
+#define SRC_REG_NUMBER_HASH_BITS0				 0x40400
+/* [RW 1] Reset internal state machines. */
+#define SRC_REG_SOFT_RST					 0x4049c
+/* [R 3] Interrupt register #0 read */
+#define SRC_REG_SRC_INT_STS					 0x404ac
+/* [RW 3] Parity mask register #0 read/write */
+#define SRC_REG_SRC_PRTY_MASK					 0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS					 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR				 0x404c0
+/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
+#define TCM_REG_CAM_OCCUP					 0x5017c
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define TCM_REG_CDU_AG_RD_IFEN					 0x50034
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define TCM_REG_CDU_AG_WR_IFEN					 0x50030
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define TCM_REG_CDU_SM_RD_IFEN					 0x5003c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define TCM_REG_CDU_SM_WR_IFEN					 0x50038
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define TCM_REG_CFC_INIT_CRD					 0x50204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CP_WEIGHT					 0x500c0
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_CSEM_IFEN					 0x5002c
+/* [RC 1] Message length mismatch (relative to last indication) at the In#9
+   interface. */
+#define TCM_REG_CSEM_LENGTH_MIS 				 0x50174
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CSEM_WEIGHT					 0x500bc
+/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+#define TCM_REG_ERR_EVNT_ID					 0x500a0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define TCM_REG_ERR_TCM_HDR					 0x5009c
+/* [RW 8] The Event ID for Timers expiration. */
+#define TCM_REG_EXPR_EVNT_ID					 0x500a4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC0_INIT_CRD					 0x5020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC1_INIT_CRD					 0x50210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
+   ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define TCM_REG_GR_ARB_TYPE					 0x50114
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel is the
+   compliment of the other 3 groups. */
+#define TCM_REG_GR_LD0_PR					 0x5011c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel is the
+   compliment of the other 3 groups. */
+#define TCM_REG_GR_LD1_PR					 0x50120
+/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
+   sent to STORM; for a specific connection type. The double REG-pairs are
+   used to align to STORM context row size of 128 bits. The offset of these
+   data in the STORM context is always 0. Index _i stands for the connection
+   type (one of 16). */
+#define TCM_REG_N_SM_CTX_LD_0					 0x50050
+#define TCM_REG_N_SM_CTX_LD_1					 0x50054
+#define TCM_REG_N_SM_CTX_LD_2					 0x50058
+#define TCM_REG_N_SM_CTX_LD_3					 0x5005c
+#define TCM_REG_N_SM_CTX_LD_4					 0x50060
+#define TCM_REG_N_SM_CTX_LD_5					 0x50064
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_PBF_IFEN					 0x50024
+/* [RC 1] Message length mismatch (relative to last indication) at the In#7
+   interface. */
+#define TCM_REG_PBF_LENGTH_MIS					 0x5016c
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PBF_WEIGHT					 0x500b4
+#define TCM_REG_PHYS_QNUM0_0					 0x500e0
+#define TCM_REG_PHYS_QNUM0_1					 0x500e4
+#define TCM_REG_PHYS_QNUM1_0					 0x500e8
+#define TCM_REG_PHYS_QNUM1_1					 0x500ec
+#define TCM_REG_PHYS_QNUM2_0					 0x500f0
+#define TCM_REG_PHYS_QNUM2_1					 0x500f4
+#define TCM_REG_PHYS_QNUM3_0					 0x500f8
+#define TCM_REG_PHYS_QNUM3_1					 0x500fc
+/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_PRS_IFEN					 0x50020
+/* [RC 1] Message length mismatch (relative to last indication) at the In#6
+   interface. */
+#define TCM_REG_PRS_LENGTH_MIS					 0x50168
+/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PRS_WEIGHT					 0x500b0
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define TCM_REG_STOP_EVNT_ID					 0x500a8
+/* [RC 1] Message length mismatch (relative to last indication) at the STORM
+   interface. */
+#define TCM_REG_STORM_LENGTH_MIS				 0x50160
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_STORM_TCM_IFEN					 0x50010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_STORM_WEIGHT					 0x500ac
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_CFC_IFEN					 0x50040
+/* [RW 11] Interrupt mask register #0 read/write */
+#define TCM_REG_TCM_INT_MASK					 0x501dc
+/* [R 11] Interrupt register #0 read */
+#define TCM_REG_TCM_INT_STS					 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK					 0x501ec
+/* [R 27] Parity register #0 read */
+#define TCM_REG_TCM_PRTY_STS					 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR				 0x501e4
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the input message Reg1WbFlg isn't set. */
+#define TCM_REG_TCM_REG0_SZ					 0x500d8
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_STORM0_IFEN 				 0x50004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_STORM1_IFEN 				 0x50008
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_TQM_IFEN					 0x5000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define TCM_REG_TCM_TQM_USE_Q					 0x500d4
+/* [RW 28] The CM header for Timers expiration command. */
+#define TCM_REG_TM_TCM_HDR					 0x50098
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_TM_TCM_IFEN					 0x5001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TM_WEIGHT					 0x500d0
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define TCM_REG_TQM_INIT_CRD					 0x5021c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_P_WEIGHT					 0x500c8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_S_WEIGHT					 0x500cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define TCM_REG_TQM_TCM_HDR_P					 0x50090
+/* [RW 28] The CM header value for QM request (secondary). */
+#define TCM_REG_TQM_TCM_HDR_S					 0x50094
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TQM_TCM_IFEN					 0x50014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TSDM_IFEN					 0x50018
+/* [RC 1] Message length mismatch (relative to last indication) at the SDM
+   interface. */
+#define TCM_REG_TSDM_LENGTH_MIS 				 0x50164
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TSDM_WEIGHT					 0x500c4
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_USEM_IFEN					 0x50028
+/* [RC 1] Message length mismatch (relative to last indication) at the In#8
+   interface. */
+#define TCM_REG_USEM_LENGTH_MIS 				 0x50170
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_USEM_WEIGHT					 0x500b8
+/* [RW 21] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+   pointer; 20:16] - next pointer. */
+#define TCM_REG_XX_DESCR_TABLE					 0x50280
+#define TCM_REG_XX_DESCR_TABLE_SIZE				 29
+/* [R 6] Use to read the value of XX protection Free counter. */
+#define TCM_REG_XX_FREE 					 0x50178
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Max credit available - 127.Write writes the initial credit
+   value; read returns the current value of the credit counter. Must be
+   initialized to 19 at start-up. */
+#define TCM_REG_XX_INIT_CRD					 0x50220
+/* [RW 6] Maximum link list size (messages locked) per connection in the XX
+   protection. */
+#define TCM_REG_XX_MAX_LL_SZ					 0x50044
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+   protection. ~tcm_registers_xx_free.xx_free is read on read. */
+#define TCM_REG_XX_MSG_NUM					 0x50224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define TCM_REG_XX_OVFL_EVNT_ID 				 0x50048
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+   The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
+   header pointer. */
+#define TCM_REG_XX_TABLE					 0x50240
+/* [RW 4] Load value for cfc ac credit cnt. */
+#define TM_REG_CFC_AC_CRDCNT_VAL				 0x164208
+/* [RW 4] Load value for cfc cld credit cnt. */
+#define TM_REG_CFC_CLD_CRDCNT_VAL				 0x164210
+/* [RW 8] Client0 context region. */
+#define TM_REG_CL0_CONT_REGION					 0x164030
+/* [RW 8] Client1 context region. */
+#define TM_REG_CL1_CONT_REGION					 0x164034
+/* [RW 8] Client2 context region. */
+#define TM_REG_CL2_CONT_REGION					 0x164038
+/* [RW 2] Client in High priority client number. */
+#define TM_REG_CLIN_PRIOR0_CLIENT				 0x164024
+/* [RW 4] Load value for clout0 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT0_VAL				 0x164220
+/* [RW 4] Load value for clout1 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT1_VAL				 0x164228
+/* [RW 4] Load value for clout2 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT2_VAL				 0x164230
+/* [RW 1] Enable client0 input. */
+#define TM_REG_EN_CL0_INPUT					 0x164008
+/* [RW 1] Enable client1 input. */
+#define TM_REG_EN_CL1_INPUT					 0x16400c
+/* [RW 1] Enable client2 input. */
+#define TM_REG_EN_CL2_INPUT					 0x164010
+#define TM_REG_EN_LINEAR0_TIMER 				 0x164014
+/* [RW 1] Enable real time counter. */
+#define TM_REG_EN_REAL_TIME_CNT 				 0x1640d8
+/* [RW 1] Enable for Timers state machines. */
+#define TM_REG_EN_TIMERS					 0x164000
+/* [RW 4] Load value for expiration credit cnt. CFC max number of
+   outstanding load requests for timers (expiration) context loading. */
+#define TM_REG_EXP_CRDCNT_VAL					 0x164238
+/* [RW 32] Linear0 logic address. */
+#define TM_REG_LIN0_LOGIC_ADDR					 0x164240
+/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+#define TM_REG_LIN0_MAX_ACTIVE_CID				 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS					 0x1640a0
+/* [WB 64] Linear0 phy address. */
+#define TM_REG_LIN0_PHY_ADDR					 0x164270
+/* [RW 1] Linear0 physical address valid. */
+#define TM_REG_LIN0_PHY_ADDR_VALID				 0x164248
+#define TM_REG_LIN0_SCAN_ON					 0x1640d0
+/* [RW 24] Linear0 array scan timeout. */
+#define TM_REG_LIN0_SCAN_TIME					 0x16403c
+#define TM_REG_LIN0_VNIC_UC					 0x164128
+/* [RW 32] Linear1 logic address. */
+#define TM_REG_LIN1_LOGIC_ADDR					 0x164250
+/* [WB 64] Linear1 phy address. */
+#define TM_REG_LIN1_PHY_ADDR					 0x164280
+/* [RW 1] Linear1 physical address valid. */
+#define TM_REG_LIN1_PHY_ADDR_VALID				 0x164258
+/* [RW 6] Linear timer set_clear fifo threshold. */
+#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR			 0x164070
+/* [RW 2] Load value for pci arbiter credit cnt. */
+#define TM_REG_PCIARB_CRDCNT_VAL				 0x164260
+/* [RW 20] The amount of hardware cycles for each timer tick. */
+#define TM_REG_TIMER_TICK_SIZE					 0x16401c
+/* [RW 8] Timers Context region. */
+#define TM_REG_TM_CONTEXT_REGION				 0x164044
+/* [RW 1] Interrupt mask register #0 read/write */
+#define TM_REG_TM_INT_MASK					 0x1640fc
+/* [R 1] Interrupt register #0 read */
+#define TM_REG_TM_INT_STS					 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK					 0x16410c
+/* [R 7] Parity register #0 read */
+#define TM_REG_TM_PRTY_STS					 0x164100
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR					 0x164104
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_EVENT_0				 0x42038
+#define TSDM_REG_AGG_INT_EVENT_1				 0x4203c
+#define TSDM_REG_AGG_INT_EVENT_2				 0x42040
+#define TSDM_REG_AGG_INT_EVENT_3				 0x42044
+#define TSDM_REG_AGG_INT_EVENT_4				 0x42048
+/* [RW 1] The T bit for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_T_0					 0x420b8
+#define TSDM_REG_AGG_INT_T_1					 0x420bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define TSDM_REG_CFC_RSP_START_ADDR				 0x42008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define TSDM_REG_CMP_COUNTER_MAX0				 0x4201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define TSDM_REG_CMP_COUNTER_MAX1				 0x42020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define TSDM_REG_CMP_COUNTER_MAX2				 0x42024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define TSDM_REG_CMP_COUNTER_MAX3				 0x42028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define TSDM_REG_CMP_COUNTER_START_ADDR 			 0x4200c
+#define TSDM_REG_ENABLE_IN1					 0x42238
+#define TSDM_REG_ENABLE_IN2					 0x4223c
+#define TSDM_REG_ENABLE_OUT1					 0x42240
+#define TSDM_REG_ENABLE_OUT2					 0x42244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define TSDM_REG_INIT_CREDIT_PXP_CTRL				 0x424bc
+/* [ST 32] The number of ACK after placement messages received */
+#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0x4227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define TSDM_REG_NUM_OF_PKT_END_MSG				 0x42274
+/* [ST 32] The number of requests received from the pxp async if */
+#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ				 0x42278
+/* [ST 32] The number of commands received in queue 0 */
+#define TSDM_REG_NUM_OF_Q0_CMD					 0x42248
+/* [ST 32] The number of commands received in queue 10 */
+#define TSDM_REG_NUM_OF_Q10_CMD 				 0x4226c
+/* [ST 32] The number of commands received in queue 11 */
+#define TSDM_REG_NUM_OF_Q11_CMD 				 0x42270
+/* [ST 32] The number of commands received in queue 1 */
+#define TSDM_REG_NUM_OF_Q1_CMD					 0x4224c
+/* [ST 32] The number of commands received in queue 3 */
+#define TSDM_REG_NUM_OF_Q3_CMD					 0x42250
+/* [ST 32] The number of commands received in queue 4 */
+#define TSDM_REG_NUM_OF_Q4_CMD					 0x42254
+/* [ST 32] The number of commands received in queue 5 */
+#define TSDM_REG_NUM_OF_Q5_CMD					 0x42258
+/* [ST 32] The number of commands received in queue 6 */
+#define TSDM_REG_NUM_OF_Q6_CMD					 0x4225c
+/* [ST 32] The number of commands received in queue 7 */
+#define TSDM_REG_NUM_OF_Q7_CMD					 0x42260
+/* [ST 32] The number of commands received in queue 8 */
+#define TSDM_REG_NUM_OF_Q8_CMD					 0x42264
+/* [ST 32] The number of commands received in queue 9 */
+#define TSDM_REG_NUM_OF_Q9_CMD					 0x42268
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define TSDM_REG_PCK_END_MSG_START_ADDR 			 0x42014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define TSDM_REG_Q_COUNTER_START_ADDR				 0x42010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0x42548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_PARSER_EMPTY				 0x42550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_SYNC_EMPTY				 0x42558
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define TSDM_REG_TIMER_TICK					 0x42000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSDM_REG_TSDM_INT_MASK_0				 0x4229c
+#define TSDM_REG_TSDM_INT_MASK_1				 0x422ac
+/* [R 32] Interrupt register #0 read */
+#define TSDM_REG_TSDM_INT_STS_0 				 0x42290
+#define TSDM_REG_TSDM_INT_STS_1 				 0x422a0
+/* [RW 11] Parity mask register #0 read/write */
+#define TSDM_REG_TSDM_PRTY_MASK 				 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS					 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR				 0x422b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define TSEM_REG_ARB_CYCLE_SIZE 				 0x180034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define TSEM_REG_ARB_ELEMENT0					 0x180020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
+#define TSEM_REG_ARB_ELEMENT1					 0x180024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+   and ~tsem_registers_arb_element1.arb_element1 */
+#define TSEM_REG_ARB_ELEMENT2					 0x180028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~tsem_registers_arb_element0.arb_element0 and
+   ~tsem_registers_arb_element1.arb_element1 and
+   ~tsem_registers_arb_element2.arb_element2 */
+#define TSEM_REG_ARB_ELEMENT3					 0x18002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+   and ~tsem_registers_arb_element1.arb_element1 and
+   ~tsem_registers_arb_element2.arb_element2 and
+   ~tsem_registers_arb_element3.arb_element3 */
+#define TSEM_REG_ARB_ELEMENT4					 0x180030
+#define TSEM_REG_ENABLE_IN					 0x1800a4
+#define TSEM_REG_ENABLE_OUT					 0x1800a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define TSEM_REG_FAST_MEMORY					 0x1a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define TSEM_REG_FIC0_DISABLE					 0x180224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define TSEM_REG_FIC1_DISABLE					 0x180234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define TSEM_REG_INT_TABLE					 0x180400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define TSEM_REG_MSG_NUM_FIC0					 0x180000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define TSEM_REG_MSG_NUM_FIC1					 0x180004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define TSEM_REG_MSG_NUM_FOC0					 0x180008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define TSEM_REG_MSG_NUM_FOC1					 0x18000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define TSEM_REG_MSG_NUM_FOC2					 0x180010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define TSEM_REG_MSG_NUM_FOC3					 0x180014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define TSEM_REG_PAS_DISABLE					 0x18024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define TSEM_REG_PASSIVE_BUFFER 				 0x181000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define TSEM_REG_PRAM						 0x1c0000
+/* [R 8] Valid sleeping threads indication have bit per thread */
+#define TSEM_REG_SLEEP_THREADS_VALID				 0x18026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define TSEM_REG_SLOW_EXT_STORE_EMPTY				 0x1802a0
+/* [RW 8] List of free threads . There is a bit per thread. */
+#define TSEM_REG_THREADS_LIST					 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0				 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1				 0x180128
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define TSEM_REG_TS_0_AS					 0x180038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define TSEM_REG_TS_10_AS					 0x180060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define TSEM_REG_TS_11_AS					 0x180064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define TSEM_REG_TS_12_AS					 0x180068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define TSEM_REG_TS_13_AS					 0x18006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define TSEM_REG_TS_14_AS					 0x180070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define TSEM_REG_TS_15_AS					 0x180074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define TSEM_REG_TS_16_AS					 0x180078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define TSEM_REG_TS_17_AS					 0x18007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define TSEM_REG_TS_18_AS					 0x180080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define TSEM_REG_TS_1_AS					 0x18003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define TSEM_REG_TS_2_AS					 0x180040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define TSEM_REG_TS_3_AS					 0x180044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define TSEM_REG_TS_4_AS					 0x180048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define TSEM_REG_TS_5_AS					 0x18004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define TSEM_REG_TS_6_AS					 0x180050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define TSEM_REG_TS_7_AS					 0x180054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define TSEM_REG_TS_8_AS					 0x180058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define TSEM_REG_TS_9_AS					 0x18005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSEM_REG_TSEM_INT_MASK_0				 0x180100
+#define TSEM_REG_TSEM_INT_MASK_1				 0x180110
+/* [R 32] Interrupt register #0 read */
+#define TSEM_REG_TSEM_INT_STS_0 				 0x1800f4
+#define TSEM_REG_TSEM_INT_STS_1 				 0x180104
+/* [RW 32] Parity mask register #0 read/write */
+#define TSEM_REG_TSEM_PRTY_MASK_0				 0x180120
+#define TSEM_REG_TSEM_PRTY_MASK_1				 0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0				 0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1				 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM					 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX						 0xe2000
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define UCM_REG_CAM_OCCUP					 0xe0170
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define UCM_REG_CDU_AG_RD_IFEN					 0xe0038
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define UCM_REG_CDU_AG_WR_IFEN					 0xe0034
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define UCM_REG_CDU_SM_RD_IFEN					 0xe0040
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define UCM_REG_CDU_SM_WR_IFEN					 0xe003c
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define UCM_REG_CFC_INIT_CRD					 0xe0204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CP_WEIGHT					 0xe00c4
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_CSEM_IFEN					 0xe0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the csem interface is detected. */
+#define UCM_REG_CSEM_LENGTH_MIS 				 0xe0160
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CSEM_WEIGHT					 0xe00b8
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_DORQ_IFEN					 0xe0030
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the dorq interface is detected. */
+#define UCM_REG_DORQ_LENGTH_MIS 				 0xe0168
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_DORQ_WEIGHT					 0xe00c0
+/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+#define UCM_REG_ERR_EVNT_ID					 0xe00a4
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define UCM_REG_ERR_UCM_HDR					 0xe00a0
+/* [RW 8] The Event ID for Timers expiration. */
+#define UCM_REG_EXPR_EVNT_ID					 0xe00a8
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC0_INIT_CRD					 0xe020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC1_INIT_CRD					 0xe0210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
+   ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define UCM_REG_GR_ARB_TYPE					 0xe0144
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel group is
+   compliment to the others. */
+#define UCM_REG_GR_LD0_PR					 0xe014c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel group is
+   compliment to the others. */
+#define UCM_REG_GR_LD1_PR					 0xe0150
+/* [RW 2] The queue index for invalidate counter flag decision. */
+#define UCM_REG_INV_CFLG_Q					 0xe00e4
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+   sent to STORM; for a specific connection type. the double REG-pairs are
+   used in order to align to STORM context row size of 128 bits. The offset
+   of these data in the STORM context is always 0. Index _i stands for the
+   connection type (one of 16). */
+#define UCM_REG_N_SM_CTX_LD_0					 0xe0054
+#define UCM_REG_N_SM_CTX_LD_1					 0xe0058
+#define UCM_REG_N_SM_CTX_LD_2					 0xe005c
+#define UCM_REG_N_SM_CTX_LD_3					 0xe0060
+#define UCM_REG_N_SM_CTX_LD_4					 0xe0064
+#define UCM_REG_N_SM_CTX_LD_5					 0xe0068
+#define UCM_REG_PHYS_QNUM0_0					 0xe0110
+#define UCM_REG_PHYS_QNUM0_1					 0xe0114
+#define UCM_REG_PHYS_QNUM1_0					 0xe0118
+#define UCM_REG_PHYS_QNUM1_1					 0xe011c
+#define UCM_REG_PHYS_QNUM2_0					 0xe0120
+#define UCM_REG_PHYS_QNUM2_1					 0xe0124
+#define UCM_REG_PHYS_QNUM3_0					 0xe0128
+#define UCM_REG_PHYS_QNUM3_1					 0xe012c
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define UCM_REG_STOP_EVNT_ID					 0xe00ac
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the STORM interface is detected. */
+#define UCM_REG_STORM_LENGTH_MIS				 0xe0154
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_STORM_UCM_IFEN					 0xe0010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_STORM_WEIGHT					 0xe00b0
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 4 at start-up. */
+#define UCM_REG_TM_INIT_CRD					 0xe021c
+/* [RW 28] The CM header for Timers expiration command. */
+#define UCM_REG_TM_UCM_HDR					 0xe009c
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_TM_UCM_IFEN					 0xe001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TM_WEIGHT					 0xe00d4
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_TSEM_IFEN					 0xe0024
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the tsem interface is detected. */
+#define UCM_REG_TSEM_LENGTH_MIS 				 0xe015c
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TSEM_WEIGHT					 0xe00b4
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_CFC_IFEN					 0xe0044
+/* [RW 11] Interrupt mask register #0 read/write */
+#define UCM_REG_UCM_INT_MASK					 0xe01d4
+/* [R 11] Interrupt register #0 read */
+#define UCM_REG_UCM_INT_STS					 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK					 0xe01e4
+/* [R 27] Parity register #0 read */
+#define UCM_REG_UCM_PRTY_STS					 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR				 0xe01dc
+/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the Reg1WbFlg isn't set. */
+#define UCM_REG_UCM_REG0_SZ					 0xe00dc
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_STORM0_IFEN 				 0xe0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_STORM1_IFEN 				 0xe0008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_UCM_TM_IFEN					 0xe0020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_UQM_IFEN					 0xe000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define UCM_REG_UCM_UQM_USE_Q					 0xe00d8
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define UCM_REG_UQM_INIT_CRD					 0xe0220
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_P_WEIGHT					 0xe00cc
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_S_WEIGHT					 0xe00d0
+/* [RW 28] The CM header value for QM request (primary). */
+#define UCM_REG_UQM_UCM_HDR_P					 0xe0094
+/* [RW 28] The CM header value for QM request (secondary). */
+#define UCM_REG_UQM_UCM_HDR_S					 0xe0098
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UQM_UCM_IFEN					 0xe0014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_USDM_IFEN					 0xe0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the SDM interface is detected. */
+#define UCM_REG_USDM_LENGTH_MIS 				 0xe0158
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_USDM_WEIGHT					 0xe00c8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_XSEM_IFEN					 0xe002c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the xsem interface isdetected. */
+#define UCM_REG_XSEM_LENGTH_MIS 				 0xe0164
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_XSEM_WEIGHT					 0xe00bc
+/* [RW 20] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are:[5:0] - message length; 14:6] - message
+   pointer; 19:15] - next pointer. */
+#define UCM_REG_XX_DESCR_TABLE					 0xe0280
+#define UCM_REG_XX_DESCR_TABLE_SIZE				 27
+/* [R 6] Use to read the XX protection Free counter. */
+#define UCM_REG_XX_FREE 					 0xe016c
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Write writes the initial credit value; read returns the current
+   value of the credit counter. Must be initialized to 12 at start-up. */
+#define UCM_REG_XX_INIT_CRD					 0xe0224
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+   protection. ~ucm_registers_xx_free.xx_free read on read. */
+#define UCM_REG_XX_MSG_NUM					 0xe0228
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define UCM_REG_XX_OVFL_EVNT_ID 				 0xe004c
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+   The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
+   header pointer. */
+#define UCM_REG_XX_TABLE					 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_HD_ENA				 (0x1<<10)
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE			 (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA			 (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK			 (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN				 (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE			 (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN			 (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA				 (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET			 (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA				 (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG					 0x8
+/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
+ * state from LPI state when it receives packet for transmission. The
+ * decrement unit is 1 micro-second. */
+#define UMAC_REG_EEE_WAKE_TIMER					 0x6c
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0					 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1					 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR						 0x14
+#define UMAC_REG_UMAC_EEE_CTRL					 0x64
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN				 (0x1<<3)
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define USDM_REG_AGG_INT_EVENT_0				 0xc4038
+#define USDM_REG_AGG_INT_EVENT_1				 0xc403c
+#define USDM_REG_AGG_INT_EVENT_2				 0xc4040
+#define USDM_REG_AGG_INT_EVENT_4				 0xc4048
+#define USDM_REG_AGG_INT_EVENT_5				 0xc404c
+#define USDM_REG_AGG_INT_EVENT_6				 0xc4050
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+   or auto-mask-mode (1) */
+#define USDM_REG_AGG_INT_MODE_0 				 0xc41b8
+#define USDM_REG_AGG_INT_MODE_1 				 0xc41bc
+#define USDM_REG_AGG_INT_MODE_4 				 0xc41c8
+#define USDM_REG_AGG_INT_MODE_5 				 0xc41cc
+#define USDM_REG_AGG_INT_MODE_6 				 0xc41d0
+/* [RW 1] The T bit for aggregated interrupt 5 */
+#define USDM_REG_AGG_INT_T_5					 0xc40cc
+#define USDM_REG_AGG_INT_T_6					 0xc40d0
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define USDM_REG_CFC_RSP_START_ADDR				 0xc4008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define USDM_REG_CMP_COUNTER_MAX0				 0xc401c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define USDM_REG_CMP_COUNTER_MAX1				 0xc4020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define USDM_REG_CMP_COUNTER_MAX2				 0xc4024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define USDM_REG_CMP_COUNTER_MAX3				 0xc4028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define USDM_REG_CMP_COUNTER_START_ADDR 			 0xc400c
+#define USDM_REG_ENABLE_IN1					 0xc4238
+#define USDM_REG_ENABLE_IN2					 0xc423c
+#define USDM_REG_ENABLE_OUT1					 0xc4240
+#define USDM_REG_ENABLE_OUT2					 0xc4244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define USDM_REG_INIT_CREDIT_PXP_CTRL				 0xc44c0
+/* [ST 32] The number of ACK after placement messages received */
+#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0xc4280
+/* [ST 32] The number of packet end messages received from the parser */
+#define USDM_REG_NUM_OF_PKT_END_MSG				 0xc4278
+/* [ST 32] The number of requests received from the pxp async if */
+#define USDM_REG_NUM_OF_PXP_ASYNC_REQ				 0xc427c
+/* [ST 32] The number of commands received in queue 0 */
+#define USDM_REG_NUM_OF_Q0_CMD					 0xc4248
+/* [ST 32] The number of commands received in queue 10 */
+#define USDM_REG_NUM_OF_Q10_CMD 				 0xc4270
+/* [ST 32] The number of commands received in queue 11 */
+#define USDM_REG_NUM_OF_Q11_CMD 				 0xc4274
+/* [ST 32] The number of commands received in queue 1 */
+#define USDM_REG_NUM_OF_Q1_CMD					 0xc424c
+/* [ST 32] The number of commands received in queue 2 */
+#define USDM_REG_NUM_OF_Q2_CMD					 0xc4250
+/* [ST 32] The number of commands received in queue 3 */
+#define USDM_REG_NUM_OF_Q3_CMD					 0xc4254
+/* [ST 32] The number of commands received in queue 4 */
+#define USDM_REG_NUM_OF_Q4_CMD					 0xc4258
+/* [ST 32] The number of commands received in queue 5 */
+#define USDM_REG_NUM_OF_Q5_CMD					 0xc425c
+/* [ST 32] The number of commands received in queue 6 */
+#define USDM_REG_NUM_OF_Q6_CMD					 0xc4260
+/* [ST 32] The number of commands received in queue 7 */
+#define USDM_REG_NUM_OF_Q7_CMD					 0xc4264
+/* [ST 32] The number of commands received in queue 8 */
+#define USDM_REG_NUM_OF_Q8_CMD					 0xc4268
+/* [ST 32] The number of commands received in queue 9 */
+#define USDM_REG_NUM_OF_Q9_CMD					 0xc426c
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define USDM_REG_PCK_END_MSG_START_ADDR 			 0xc4014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define USDM_REG_Q_COUNTER_START_ADDR				 0xc4010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0xc4550
+/* [R 1] parser fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_PARSER_EMPTY				 0xc4558
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_SYNC_EMPTY				 0xc4560
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define USDM_REG_TIMER_TICK					 0xc4000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USDM_REG_USDM_INT_MASK_0				 0xc42a0
+#define USDM_REG_USDM_INT_MASK_1				 0xc42b0
+/* [R 32] Interrupt register #0 read */
+#define USDM_REG_USDM_INT_STS_0 				 0xc4294
+#define USDM_REG_USDM_INT_STS_1 				 0xc42a4
+/* [RW 11] Parity mask register #0 read/write */
+#define USDM_REG_USDM_PRTY_MASK 				 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS					 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR				 0xc42b8
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define USEM_REG_ARB_CYCLE_SIZE 				 0x300034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define USEM_REG_ARB_ELEMENT0					 0x300020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
+#define USEM_REG_ARB_ELEMENT1					 0x300024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~usem_registers_arb_element0.arb_element0
+   and ~usem_registers_arb_element1.arb_element1 */
+#define USEM_REG_ARB_ELEMENT2					 0x300028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~usem_registers_arb_element0.arb_element0 and
+   ~usem_registers_arb_element1.arb_element1 and
+   ~usem_registers_arb_element2.arb_element2 */
+#define USEM_REG_ARB_ELEMENT3					 0x30002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~usem_registers_arb_element0.arb_element0
+   and ~usem_registers_arb_element1.arb_element1 and
+   ~usem_registers_arb_element2.arb_element2 and
+   ~usem_registers_arb_element3.arb_element3 */
+#define USEM_REG_ARB_ELEMENT4					 0x300030
+#define USEM_REG_ENABLE_IN					 0x3000a4
+#define USEM_REG_ENABLE_OUT					 0x3000a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define USEM_REG_FAST_MEMORY					 0x320000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define USEM_REG_FIC0_DISABLE					 0x300224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define USEM_REG_FIC1_DISABLE					 0x300234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define USEM_REG_INT_TABLE					 0x300400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define USEM_REG_MSG_NUM_FIC0					 0x300000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define USEM_REG_MSG_NUM_FIC1					 0x300004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define USEM_REG_MSG_NUM_FOC0					 0x300008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define USEM_REG_MSG_NUM_FOC1					 0x30000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define USEM_REG_MSG_NUM_FOC2					 0x300010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define USEM_REG_MSG_NUM_FOC3					 0x300014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define USEM_REG_PAS_DISABLE					 0x30024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define USEM_REG_PASSIVE_BUFFER 				 0x302000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define USEM_REG_PRAM						 0x340000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define USEM_REG_SLEEP_THREADS_VALID				 0x30026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define USEM_REG_SLOW_EXT_STORE_EMPTY				 0x3002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define USEM_REG_THREADS_LIST					 0x3002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define USEM_REG_TS_0_AS					 0x300038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define USEM_REG_TS_10_AS					 0x300060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define USEM_REG_TS_11_AS					 0x300064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define USEM_REG_TS_12_AS					 0x300068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define USEM_REG_TS_13_AS					 0x30006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define USEM_REG_TS_14_AS					 0x300070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define USEM_REG_TS_15_AS					 0x300074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define USEM_REG_TS_16_AS					 0x300078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define USEM_REG_TS_17_AS					 0x30007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define USEM_REG_TS_18_AS					 0x300080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define USEM_REG_TS_1_AS					 0x30003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define USEM_REG_TS_2_AS					 0x300040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define USEM_REG_TS_3_AS					 0x300044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define USEM_REG_TS_4_AS					 0x300048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define USEM_REG_TS_5_AS					 0x30004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define USEM_REG_TS_6_AS					 0x300050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define USEM_REG_TS_7_AS					 0x300054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define USEM_REG_TS_8_AS					 0x300058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define USEM_REG_TS_9_AS					 0x30005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USEM_REG_USEM_INT_MASK_0				 0x300110
+#define USEM_REG_USEM_INT_MASK_1				 0x300120
+/* [R 32] Interrupt register #0 read */
+#define USEM_REG_USEM_INT_STS_0 				 0x300104
+#define USEM_REG_USEM_INT_STS_1 				 0x300114
+/* [RW 32] Parity mask register #0 read/write */
+#define USEM_REG_USEM_PRTY_MASK_0				 0x300130
+#define USEM_REG_USEM_PRTY_MASK_1				 0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0				 0x300124
+#define USEM_REG_USEM_PRTY_STS_1				 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0				 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1				 0x300138
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM					 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST				 (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST				 (0x1<<1)
+#define VFC_REG_MEMORIES_RST					 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX						 0x28000
+/* [RW 2] The queue index for registration on Aux1 counter flag. */
+#define XCM_REG_AUX1_Q						 0x20134
+/* [RW 2] Per each decision rule the queue index to register to. */
+#define XCM_REG_AUX_CNT_FLG_Q_19				 0x201b0
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define XCM_REG_CAM_OCCUP					 0x20244
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define XCM_REG_CDU_AG_RD_IFEN					 0x20044
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define XCM_REG_CDU_AG_WR_IFEN					 0x20040
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define XCM_REG_CDU_SM_RD_IFEN					 0x2004c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define XCM_REG_CDU_SM_WR_IFEN					 0x20048
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define XCM_REG_CFC_INIT_CRD					 0x20404
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CP_WEIGHT					 0x200dc
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_CSEM_IFEN					 0x20028
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the csem interface. */
+#define XCM_REG_CSEM_LENGTH_MIS 				 0x20228
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CSEM_WEIGHT					 0x200c4
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_DORQ_IFEN					 0x20030
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the dorq interface. */
+#define XCM_REG_DORQ_LENGTH_MIS 				 0x20230
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_DORQ_WEIGHT					 0x200cc
+/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+#define XCM_REG_ERR_EVNT_ID					 0x200b0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define XCM_REG_ERR_XCM_HDR					 0x200ac
+/* [RW 8] The Event ID for Timers expiration. */
+#define XCM_REG_EXPR_EVNT_ID					 0x200b4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC0_INIT_CRD					 0x2040c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC1_INIT_CRD					 0x20410
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0				 0x20118
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1				 0x2011c
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0				 0x20108
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1				 0x2010c
+/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
+   ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define XCM_REG_GR_ARB_TYPE					 0x2020c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Channel group is the
+   compliment of the other 3 groups. */
+#define XCM_REG_GR_LD0_PR					 0x20214
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Channel group is the
+   compliment of the other 3 groups. */
+#define XCM_REG_GR_LD1_PR					 0x20218
+/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG0_IFEN					 0x20038
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the nig0 interface. */
+#define XCM_REG_NIG0_LENGTH_MIS 				 0x20238
+/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_NIG0_WEIGHT					 0x200d4
+/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG1_IFEN					 0x2003c
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the nig1 interface. */
+#define XCM_REG_NIG1_LENGTH_MIS 				 0x2023c
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+   sent to STORM; for a specific connection type. The double REG-pairs are
+   used in order to align to STORM context row size of 128 bits. The offset
+   of these data in the STORM context is always 0. Index _i stands for the
+   connection type (one of 16). */
+#define XCM_REG_N_SM_CTX_LD_0					 0x20060
+#define XCM_REG_N_SM_CTX_LD_1					 0x20064
+#define XCM_REG_N_SM_CTX_LD_2					 0x20068
+#define XCM_REG_N_SM_CTX_LD_3					 0x2006c
+#define XCM_REG_N_SM_CTX_LD_4					 0x20070
+#define XCM_REG_N_SM_CTX_LD_5					 0x20074
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_PBF_IFEN					 0x20034
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the pbf interface. */
+#define XCM_REG_PBF_LENGTH_MIS					 0x20234
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_PBF_WEIGHT					 0x200d0
+#define XCM_REG_PHYS_QNUM3_0					 0x20100
+#define XCM_REG_PHYS_QNUM3_1					 0x20104
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define XCM_REG_STOP_EVNT_ID					 0x200b8
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the STORM interface. */
+#define XCM_REG_STORM_LENGTH_MIS				 0x2021c
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_STORM_WEIGHT					 0x200bc
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_STORM_XCM_IFEN					 0x20010
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 4 at start-up. */
+#define XCM_REG_TM_INIT_CRD					 0x2041c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TM_WEIGHT					 0x200ec
+/* [RW 28] The CM header for Timers expiration command. */
+#define XCM_REG_TM_XCM_HDR					 0x200a8
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_TM_XCM_IFEN					 0x2001c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_TSEM_IFEN					 0x20024
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the tsem interface. */
+#define XCM_REG_TSEM_LENGTH_MIS 				 0x20224
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TSEM_WEIGHT					 0x200c0
+/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
+#define XCM_REG_UNA_GT_NXT_Q					 0x20120
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_USEM_IFEN					 0x2002c
+/* [RC 1] Message length mismatch (relative to last indication) at the usem
+   interface. */
+#define XCM_REG_USEM_LENGTH_MIS 				 0x2022c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_USEM_WEIGHT					 0x200c8
+#define XCM_REG_WU_DA_CNT_CMD00 				 0x201d4
+#define XCM_REG_WU_DA_CNT_CMD01 				 0x201d8
+#define XCM_REG_WU_DA_CNT_CMD10 				 0x201dc
+#define XCM_REG_WU_DA_CNT_CMD11 				 0x201e0
+#define XCM_REG_WU_DA_CNT_UPD_VAL00				 0x201e4
+#define XCM_REG_WU_DA_CNT_UPD_VAL01				 0x201e8
+#define XCM_REG_WU_DA_CNT_UPD_VAL10				 0x201ec
+#define XCM_REG_WU_DA_CNT_UPD_VAL11				 0x201f0
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00			 0x201c4
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01			 0x201c8
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10			 0x201cc
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11			 0x201d0
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_CFC_IFEN					 0x20050
+/* [RW 14] Interrupt mask register #0 read/write */
+#define XCM_REG_XCM_INT_MASK					 0x202b4
+/* [R 14] Interrupt register #0 read */
+#define XCM_REG_XCM_INT_STS					 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK					 0x202c4
+/* [R 30] Parity register #0 read */
+#define XCM_REG_XCM_PRTY_STS					 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR				 0x202bc
+
+/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the Reg1WbFlg isn't set. */
+#define XCM_REG_XCM_REG0_SZ					 0x200f4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_STORM0_IFEN 				 0x20004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_STORM1_IFEN 				 0x20008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_XCM_TM_IFEN					 0x20020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_XQM_IFEN					 0x2000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define XCM_REG_XCM_XQM_USE_Q					 0x200f0
+/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
+#define XCM_REG_XQM_BYP_ACT_UPD 				 0x200fc
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define XCM_REG_XQM_INIT_CRD					 0x20420
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_P_WEIGHT					 0x200e4
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_S_WEIGHT					 0x200e8
+/* [RW 28] The CM header value for QM request (primary). */
+#define XCM_REG_XQM_XCM_HDR_P					 0x200a0
+/* [RW 28] The CM header value for QM request (secondary). */
+#define XCM_REG_XQM_XCM_HDR_S					 0x200a4
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XQM_XCM_IFEN					 0x20014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XSDM_IFEN					 0x20018
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the SDM interface. */
+#define XCM_REG_XSDM_LENGTH_MIS 				 0x20220
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XSDM_WEIGHT					 0x200e0
+/* [RW 17] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are: [5:0] - message length; 11:6] - message
+   pointer; 16:12] - next pointer. */
+#define XCM_REG_XX_DESCR_TABLE					 0x20480
+#define XCM_REG_XX_DESCR_TABLE_SIZE				 32
+/* [R 6] Used to read the XX protection Free counter. */
+#define XCM_REG_XX_FREE 					 0x20240
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Max credit available - 3.Write writes the initial credit value;
+   read returns the current value of the credit counter. Must be initialized
+   to 2 at start-up. */
+#define XCM_REG_XX_INIT_CRD					 0x20424
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+   protection. ~xcm_registers_xx_free.xx_free read on read. */
+#define XCM_REG_XX_MSG_NUM					 0x20428
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define XCM_REG_XX_OVFL_EVNT_ID 				 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS	 (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS	 (0x1<<1)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK				 (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN					 (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET				 (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN					 (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB				 (0x1<<7)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN				 (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN				 (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON			 (0x1<<1)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN			 (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN			 (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN				 (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN				 (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS				 0x60
+#define XMAC_REG_CTRL						 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI					 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO					 0x28
+#define XMAC_REG_EEE_CTRL					 0xd8
+#define XMAC_REG_EEE_TIMERS_HI					 0xe4
+#define XMAC_REG_PAUSE_CTRL					 0x68
+#define XMAC_REG_PFC_CTRL					 0x70
+#define XMAC_REG_PFC_CTRL_HI					 0x74
+#define XMAC_REG_RX_LSS_CTRL					 0x50
+#define XMAC_REG_RX_LSS_STATUS					 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE					 0x40
+#define XMAC_REG_TX_CTRL					 0x20
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE		 (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE		 (0x1<<1)
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+   The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+   header pointer. */
+#define XCM_REG_XX_TABLE					 0x20500
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define XSDM_REG_AGG_INT_EVENT_0				 0x166038
+#define XSDM_REG_AGG_INT_EVENT_1				 0x16603c
+#define XSDM_REG_AGG_INT_EVENT_10				 0x166060
+#define XSDM_REG_AGG_INT_EVENT_11				 0x166064
+#define XSDM_REG_AGG_INT_EVENT_12				 0x166068
+#define XSDM_REG_AGG_INT_EVENT_13				 0x16606c
+#define XSDM_REG_AGG_INT_EVENT_14				 0x166070
+#define XSDM_REG_AGG_INT_EVENT_2				 0x166040
+#define XSDM_REG_AGG_INT_EVENT_3				 0x166044
+#define XSDM_REG_AGG_INT_EVENT_4				 0x166048
+#define XSDM_REG_AGG_INT_EVENT_5				 0x16604c
+#define XSDM_REG_AGG_INT_EVENT_6				 0x166050
+#define XSDM_REG_AGG_INT_EVENT_7				 0x166054
+#define XSDM_REG_AGG_INT_EVENT_8				 0x166058
+#define XSDM_REG_AGG_INT_EVENT_9				 0x16605c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+   or auto-mask-mode (1) */
+#define XSDM_REG_AGG_INT_MODE_0 				 0x1661b8
+#define XSDM_REG_AGG_INT_MODE_1 				 0x1661bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define XSDM_REG_CFC_RSP_START_ADDR				 0x166008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define XSDM_REG_CMP_COUNTER_MAX0				 0x16601c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define XSDM_REG_CMP_COUNTER_MAX1				 0x166020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define XSDM_REG_CMP_COUNTER_MAX2				 0x166024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define XSDM_REG_CMP_COUNTER_MAX3				 0x166028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define XSDM_REG_CMP_COUNTER_START_ADDR 			 0x16600c
+#define XSDM_REG_ENABLE_IN1					 0x166238
+#define XSDM_REG_ENABLE_IN2					 0x16623c
+#define XSDM_REG_ENABLE_OUT1					 0x166240
+#define XSDM_REG_ENABLE_OUT2					 0x166244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define XSDM_REG_INIT_CREDIT_PXP_CTRL				 0x1664bc
+/* [ST 32] The number of ACK after placement messages received */
+#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0x16627c
+/* [ST 32] The number of packet end messages received from the parser */
+#define XSDM_REG_NUM_OF_PKT_END_MSG				 0x166274
+/* [ST 32] The number of requests received from the pxp async if */
+#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ				 0x166278
+/* [ST 32] The number of commands received in queue 0 */
+#define XSDM_REG_NUM_OF_Q0_CMD					 0x166248
+/* [ST 32] The number of commands received in queue 10 */
+#define XSDM_REG_NUM_OF_Q10_CMD 				 0x16626c
+/* [ST 32] The number of commands received in queue 11 */
+#define XSDM_REG_NUM_OF_Q11_CMD 				 0x166270
+/* [ST 32] The number of commands received in queue 1 */
+#define XSDM_REG_NUM_OF_Q1_CMD					 0x16624c
+/* [ST 32] The number of commands received in queue 3 */
+#define XSDM_REG_NUM_OF_Q3_CMD					 0x166250
+/* [ST 32] The number of commands received in queue 4 */
+#define XSDM_REG_NUM_OF_Q4_CMD					 0x166254
+/* [ST 32] The number of commands received in queue 5 */
+#define XSDM_REG_NUM_OF_Q5_CMD					 0x166258
+/* [ST 32] The number of commands received in queue 6 */
+#define XSDM_REG_NUM_OF_Q6_CMD					 0x16625c
+/* [ST 32] The number of commands received in queue 7 */
+#define XSDM_REG_NUM_OF_Q7_CMD					 0x166260
+/* [ST 32] The number of commands received in queue 8 */
+#define XSDM_REG_NUM_OF_Q8_CMD					 0x166264
+/* [ST 32] The number of commands received in queue 9 */
+#define XSDM_REG_NUM_OF_Q9_CMD					 0x166268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define XSDM_REG_Q_COUNTER_START_ADDR				 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN					 0x1664c4
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0x166548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_PARSER_EMPTY				 0x166550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_SYNC_EMPTY				 0x166558
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define XSDM_REG_TIMER_TICK					 0x166000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSDM_REG_XSDM_INT_MASK_0				 0x16629c
+#define XSDM_REG_XSDM_INT_MASK_1				 0x1662ac
+/* [R 32] Interrupt register #0 read */
+#define XSDM_REG_XSDM_INT_STS_0 				 0x166290
+#define XSDM_REG_XSDM_INT_STS_1 				 0x1662a0
+/* [RW 11] Parity mask register #0 read/write */
+#define XSDM_REG_XSDM_PRTY_MASK 				 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS					 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR				 0x1662b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define XSEM_REG_ARB_CYCLE_SIZE 				 0x280034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define XSEM_REG_ARB_ELEMENT0					 0x280020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
+#define XSEM_REG_ARB_ELEMENT1					 0x280024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+   and ~xsem_registers_arb_element1.arb_element1 */
+#define XSEM_REG_ARB_ELEMENT2					 0x280028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~xsem_registers_arb_element0.arb_element0 and
+   ~xsem_registers_arb_element1.arb_element1 and
+   ~xsem_registers_arb_element2.arb_element2 */
+#define XSEM_REG_ARB_ELEMENT3					 0x28002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+   and ~xsem_registers_arb_element1.arb_element1 and
+   ~xsem_registers_arb_element2.arb_element2 and
+   ~xsem_registers_arb_element3.arb_element3 */
+#define XSEM_REG_ARB_ELEMENT4					 0x280030
+#define XSEM_REG_ENABLE_IN					 0x2800a4
+#define XSEM_REG_ENABLE_OUT					 0x2800a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define XSEM_REG_FAST_MEMORY					 0x2a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define XSEM_REG_FIC0_DISABLE					 0x280224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define XSEM_REG_FIC1_DISABLE					 0x280234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define XSEM_REG_INT_TABLE					 0x280400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define XSEM_REG_MSG_NUM_FIC0					 0x280000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define XSEM_REG_MSG_NUM_FIC1					 0x280004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define XSEM_REG_MSG_NUM_FOC0					 0x280008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define XSEM_REG_MSG_NUM_FOC1					 0x28000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define XSEM_REG_MSG_NUM_FOC2					 0x280010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define XSEM_REG_MSG_NUM_FOC3					 0x280014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define XSEM_REG_PAS_DISABLE					 0x28024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define XSEM_REG_PASSIVE_BUFFER 				 0x282000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define XSEM_REG_PRAM						 0x2c0000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define XSEM_REG_SLEEP_THREADS_VALID				 0x28026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define XSEM_REG_SLOW_EXT_STORE_EMPTY				 0x2802a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define XSEM_REG_THREADS_LIST					 0x2802e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define XSEM_REG_TS_0_AS					 0x280038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define XSEM_REG_TS_10_AS					 0x280060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define XSEM_REG_TS_11_AS					 0x280064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define XSEM_REG_TS_12_AS					 0x280068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define XSEM_REG_TS_13_AS					 0x28006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define XSEM_REG_TS_14_AS					 0x280070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define XSEM_REG_TS_15_AS					 0x280074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define XSEM_REG_TS_16_AS					 0x280078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define XSEM_REG_TS_17_AS					 0x28007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define XSEM_REG_TS_18_AS					 0x280080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define XSEM_REG_TS_1_AS					 0x28003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define XSEM_REG_TS_2_AS					 0x280040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define XSEM_REG_TS_3_AS					 0x280044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define XSEM_REG_TS_4_AS					 0x280048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define XSEM_REG_TS_5_AS					 0x28004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define XSEM_REG_TS_6_AS					 0x280050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define XSEM_REG_TS_7_AS					 0x280054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define XSEM_REG_TS_8_AS					 0x280058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define XSEM_REG_TS_9_AS					 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM					 0x280380
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSEM_REG_XSEM_INT_MASK_0				 0x280110
+#define XSEM_REG_XSEM_INT_MASK_1				 0x280120
+/* [R 32] Interrupt register #0 read */
+#define XSEM_REG_XSEM_INT_STS_0 				 0x280104
+#define XSEM_REG_XSEM_INT_STS_1 				 0x280114
+/* [RW 32] Parity mask register #0 read/write */
+#define XSEM_REG_XSEM_PRTY_MASK_0				 0x280130
+#define XSEM_REG_XSEM_PRTY_MASK_1				 0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0				 0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1				 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0				 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1				 0x280138
+#define MCPR_ACCESS_LOCK_LOCK					 (1L<<31)
+#define MCPR_NVM_ACCESS_ENABLE_EN				 (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN				 (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE				 (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE				 (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT					 (1L<<4)
+#define MCPR_NVM_COMMAND_DONE					 (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST					 (1L<<7)
+#define MCPR_NVM_COMMAND_LAST					 (1L<<8)
+#define MCPR_NVM_COMMAND_WR					 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1				 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1				 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1				 (1L<<1)
+#define BIGMAC_REGISTER_BMAC_CONTROL				 (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL			 (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE				 (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL				 (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS			 (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS				 (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE				 (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64				 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ				 (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL				 (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE				 (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD			 (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR				 (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT				 (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT				 (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL				 (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL			 (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE				 (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL				 (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL				 (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS			 (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT				 (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE				 (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64				 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ				 (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP				 (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL				 (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE				 (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL			 (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR			 (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT				 (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK				 (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP				 (0x24<<3)
+#define EMAC_LED_1000MB_OVERRIDE				 (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE 				 (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE					 (1L<<3)
+#define EMAC_LED_2500MB_OVERRIDE				 (1L<<12)
+#define EMAC_LED_OVERRIDE					 (1L<<0)
+#define EMAC_LED_TRAFFIC					 (1L<<6)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS				 (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22				 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45				 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22				 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 			 (1L<<26)
+#define EMAC_MDIO_COMM_DATA					 (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY				 (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL				 (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45				 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT				 (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT			 16
+#define EMAC_MDIO_STATUS_10MB					 (1L<<1)
+#define EMAC_MODE_25G_MODE					 (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX					 (1L<<1)
+#define EMAC_MODE_PORT_GMII					 (2L<<2)
+#define EMAC_MODE_PORT_MII					 (1L<<2)
+#define EMAC_MODE_PORT_MII_10M					 (3L<<2)
+#define EMAC_MODE_RESET 					 (1L<<0)
+#define EMAC_REG_EMAC_LED					 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 				 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 				 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 				 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS				 0xb0
+#define EMAC_REG_EMAC_MODE					 0x0
+#define EMAC_REG_EMAC_RX_MODE					 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE				 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC				 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28				 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT				 23
+#define EMAC_REG_EMAC_TX_MODE					 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC				 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT				 22
+#define EMAC_REG_RX_PFC_MODE					 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES				 (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN				 (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN				 (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM					 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT			 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT		 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD				 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT			 (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT				 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT			 (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD				 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT			 (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT				 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT			 (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN					 (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL				 (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG				 (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS				 (1L<<8)
+#define EMAC_RX_MODE_RESET					 (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA				 (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN				 (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN					 (1L<<4)
+#define EMAC_TX_MODE_RESET					 (1L<<0)
+#define MISC_REGISTERS_GPIO_0					 0
+#define MISC_REGISTERS_GPIO_1					 1
+#define MISC_REGISTERS_GPIO_2					 2
+#define MISC_REGISTERS_GPIO_3					 3
+#define MISC_REGISTERS_GPIO_CLR_POS				 16
+#define MISC_REGISTERS_GPIO_FLOAT				 (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS				 24
+#define MISC_REGISTERS_GPIO_HIGH				 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z				 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 			 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR			 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET			 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 			 16
+#define MISC_REGISTERS_GPIO_LOW 				 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 			 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW				 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT				 4
+#define MISC_REGISTERS_GPIO_SET_POS				 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR			 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_BRB1			 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_1_RST_DORQ			 (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_1_RST_HC			 (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP			 (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM			 (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_1_SET				 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1			 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC				 (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC			 (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0			 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1			 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0			 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE		 (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1			 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE		 (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC			 (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B	 (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE	 (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU	 (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO			 (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE		 (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO			 (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN			 (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET				 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0			 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1			 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC				 (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT			 (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR			 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ	 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN	 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW  (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ	 (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN	 (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD  (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW	 (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET				 0x5a4
+#define MISC_REGISTERS_SPIO_4					 4
+#define MISC_REGISTERS_SPIO_5					 5
+#define MISC_REGISTERS_SPIO_7					 7
+#define MISC_REGISTERS_SPIO_CLR_POS				 16
+#define MISC_REGISTERS_SPIO_FLOAT				 (0xffL<<24)
+#define MISC_REGISTERS_SPIO_FLOAT_POS				 24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z				 2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS			 16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 			 1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW				 0
+#define MISC_REGISTERS_SPIO_SET_POS				 8
+#define MISC_SPIO_CLR_POS					 16
+#define MISC_SPIO_FLOAT					 (0xffL<<24)
+#define MISC_SPIO_FLOAT_POS					 24
+#define MISC_SPIO_INPUT_HI_Z					 2
+#define MISC_SPIO_INT_OLD_SET_POS				 16
+#define MISC_SPIO_OUTPUT_HIGH					 1
+#define MISC_SPIO_OUTPUT_LOW					 0
+#define MISC_SPIO_SET_POS					 8
+#define MISC_SPIO_SPIO4					 0x10
+#define MISC_SPIO_SPIO5					 0x20
+#define HW_LOCK_MAX_RESOURCE_VALUE				 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB				 13
+#define HW_LOCK_RESOURCE_DRV_FLAGS				 10
+#define HW_LOCK_RESOURCE_GPIO					 1
+#define HW_LOCK_RESOURCE_MDIO					 0
+#define HW_LOCK_RESOURCE_NVRAM					 12
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK				 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0			 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1			 9
+#define HW_LOCK_RESOURCE_RECOVERY_REG				 11
+#define HW_LOCK_RESOURCE_RESET					 5
+#define HW_LOCK_RESOURCE_SPIO					 2
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT			 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR			 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT			 (0x1<<19)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR			 (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT			 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR			 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT			 (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR			 (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT			 (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR			 (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT			 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR			 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT			 (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR			 (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR			 (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT			 (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR			 (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT		 (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR		 (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR			 (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY		 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY		 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY		 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT			 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR			 (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR			 (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR		 (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR		 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR			 (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT	 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR	 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR		 (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5				 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT			 (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR			 (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT		 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR		 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT			 (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR			 (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT			 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR			 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT			 (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR			 (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT			 (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR			 (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT			 (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR			 (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT			 (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR			 (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR		 (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT			 (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR			 (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT			 (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR			 (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT			 (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR			 (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0			(0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1			(0x1<<9)
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0	0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK		0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK	0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6	6
+#define RESERVED_GENERAL_ATTENTION_BIT_7	7
+#define RESERVED_GENERAL_ATTENTION_BIT_8	8
+#define RESERVED_GENERAL_ATTENTION_BIT_9	9
+#define RESERVED_GENERAL_ATTENTION_BIT_10	10
+#define RESERVED_GENERAL_ATTENTION_BIT_11	11
+#define RESERVED_GENERAL_ATTENTION_BIT_12	12
+#define RESERVED_GENERAL_ATTENTION_BIT_13	13
+#define RESERVED_GENERAL_ATTENTION_BIT_14	14
+#define RESERVED_GENERAL_ATTENTION_BIT_15	15
+#define RESERVED_GENERAL_ATTENTION_BIT_16	16
+#define RESERVED_GENERAL_ATTENTION_BIT_17	17
+#define RESERVED_GENERAL_ATTENTION_BIT_18	18
+#define RESERVED_GENERAL_ATTENTION_BIT_19	19
+#define RESERVED_GENERAL_ATTENTION_BIT_20	20
+#define RESERVED_GENERAL_ATTENTION_BIT_21	21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT	      RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0	    RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1	    RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2	    RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3	    RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4	    RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5	    RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6	    RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7	    RESERVED_GENERAL_ATTENTION_BIT_19
+
+
+#define LATCHED_ATTN_RBCR			23
+#define LATCHED_ATTN_RBCT			24
+#define LATCHED_ATTN_RBCN			25
+#define LATCHED_ATTN_RBCU			26
+#define LATCHED_ATTN_RBCP			27
+#define LATCHED_ATTN_TIMEOUT_GRC		28
+#define LATCHED_ATTN_RSVD_GRC			29
+#define LATCHED_ATTN_ROM_PARITY_MCP		30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP		31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP		32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP		33
+
+#define GENERAL_ATTEN_WORD(atten_name)	       ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name)\
+	(1UL << ((94 + atten_name) % 32))
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS		0x000000
+#define GRCBASE_PCICONFIG	0x002000
+#define GRCBASE_PCIREG		0x002400
+#define GRCBASE_EMAC0		0x008000
+#define GRCBASE_EMAC1		0x008400
+#define GRCBASE_DBU		0x008800
+#define GRCBASE_MISC		0x00A000
+#define GRCBASE_DBG		0x00C000
+#define GRCBASE_NIG		0x010000
+#define GRCBASE_XCM		0x020000
+#define GRCBASE_PRS		0x040000
+#define GRCBASE_SRCH		0x040400
+#define GRCBASE_TSDM		0x042000
+#define GRCBASE_TCM		0x050000
+#define GRCBASE_BRB1		0x060000
+#define GRCBASE_MCP		0x080000
+#define GRCBASE_UPB		0x0C1000
+#define GRCBASE_CSDM		0x0C2000
+#define GRCBASE_USDM		0x0C4000
+#define GRCBASE_CCM		0x0D0000
+#define GRCBASE_UCM		0x0E0000
+#define GRCBASE_CDU		0x101000
+#define GRCBASE_DMAE		0x102000
+#define GRCBASE_PXP		0x103000
+#define GRCBASE_CFC		0x104000
+#define GRCBASE_HC		0x108000
+#define GRCBASE_PXP2		0x120000
+#define GRCBASE_PBF		0x140000
+#define GRCBASE_UMAC0		0x160000
+#define GRCBASE_UMAC1		0x160400
+#define GRCBASE_XPB		0x161000
+#define GRCBASE_MSTAT0	    0x162000
+#define GRCBASE_MSTAT1	    0x162800
+#define GRCBASE_XMAC0		0x163000
+#define GRCBASE_XMAC1		0x163800
+#define GRCBASE_TIMERS		0x164000
+#define GRCBASE_XSDM		0x166000
+#define GRCBASE_QM		0x168000
+#define GRCBASE_DQ		0x170000
+#define GRCBASE_TSEM		0x180000
+#define GRCBASE_CSEM		0x200000
+#define GRCBASE_XSEM		0x280000
+#define GRCBASE_USEM		0x300000
+#define GRCBASE_MISC_AEU	GRCBASE_MISC
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET					0x2000
+#define PCICFG_VENDOR_ID_OFFSET 			0x00
+#define PCICFG_DEVICE_ID_OFFSET 			0x02
+#define PCICFG_COMMAND_OFFSET				0x04
+#define PCICFG_COMMAND_IO_SPACE 		(1<<0)
+#define PCICFG_COMMAND_MEM_SPACE		(1<<1)
+#define PCICFG_COMMAND_BUS_MASTER		(1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES		(1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES		(1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP		(1<<5)
+#define PCICFG_COMMAND_PERR_ENA 		(1<<6)
+#define PCICFG_COMMAND_STEPPING 		(1<<7)
+#define PCICFG_COMMAND_SERR_ENA 		(1<<8)
+#define PCICFG_COMMAND_FAST_B2B 		(1<<9)
+#define PCICFG_COMMAND_INT_DISABLE		(1<<10)
+#define PCICFG_COMMAND_RESERVED 		(0x1f<<11)
+#define PCICFG_STATUS_OFFSET				0x06
+#define PCICFG_REVISION_ID_OFFSET			0x08
+#define PCICFG_REVESION_ID_MASK			0xff
+#define PCICFG_REVESION_ID_ERROR_VAL		0xff
+#define PCICFG_CACHE_LINE_SIZE				0x0c
+#define PCICFG_LATENCY_TIMER				0x0d
+#define PCICFG_BAR_1_LOW				0x10
+#define PCICFG_BAR_1_HIGH				0x14
+#define PCICFG_BAR_2_LOW				0x18
+#define PCICFG_BAR_2_HIGH				0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET		0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET			0x2e
+#define PCICFG_INT_LINE 				0x3c
+#define PCICFG_INT_PIN					0x3d
+#define PCICFG_PM_CAPABILITY				0x48
+#define PCICFG_PM_CAPABILITY_VERSION		(0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK		(1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED		(1<<20)
+#define PCICFG_PM_CAPABILITY_DSI		(1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT	(0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT 	(1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT 	(1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0		(1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1		(1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2		(1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT	(1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD	(1<<31)
+#define PCICFG_PM_CSR_OFFSET				0x4c
+#define PCICFG_PM_CSR_STATE			(0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE		(1<<8)
+#define PCICFG_PM_CSR_PME_STATUS		(1<<15)
+#define PCICFG_MSI_CAP_ID_OFFSET			0x58
+#define PCICFG_MSI_CONTROL_ENABLE		(0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP 		(0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA 		(0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP	(0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE	(0x1<<24)
+#define PCICFG_GRC_ADDRESS				0x78
+#define PCICFG_GRC_DATA				0x80
+#define PCICFG_ME_REGISTER				0x98
+#define PCICFG_MSIX_CAP_ID_OFFSET			0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE		(0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED		(0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK		(0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE 	(0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL				0xb4
+#define PCICFG_DEVICE_STATUS				0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET	(1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET	(1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET	(1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET	(1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET	(1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND		(1<<5)
+#define PCICFG_LINK_CONTROL				0xbc
+
+
+#define BAR_USTRORM_INTMEM				0x400000
+#define BAR_CSTRORM_INTMEM				0x410000
+#define BAR_XSTRORM_INTMEM				0x420000
+#define BAR_TSTRORM_INTMEM				0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM					0x440000
+
+#define BAR_DOORBELL_OFFSET				0x800000
+
+#define BAR_ME_REGISTER 				0x450000
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG				0x408
+#define PCI_CONFIG_2_BAR1_SIZE			(0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED 	(0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K		(1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K		(2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K		(3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K		(4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M		(5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M		(6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M		(7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M		(8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M		(9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M		(10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M		(11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M		(12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M		(13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M		(14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G		(15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA 		(1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY		(1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY		(1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE		(1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE		(0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED	(0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K		(1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K		(2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K		(3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K		(4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K		(5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K		(6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K		(7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K		(8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K		(9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M		(10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M		(11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M		(12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M		(13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M		(14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M		(15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH		(1L<<16)
+#define PCI_CONFIG_2_RESERVED0			(0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG				0x40c
+#define PCI_CONFIG_3_STICKY_BYTE		(0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME			(1L<<24)
+#define PCI_CONFIG_3_PME_STATUS 		(1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE 		(1L<<26)
+#define PCI_CONFIG_3_PM_STATE			(0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET		(1L<<30)
+#define PCI_CONFIG_3_PCI_POWER			(1L<<31)
+
+#define GRC_BAR2_CONFIG 				0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE			(0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED 	(0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K		(1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K		(2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K		(3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K		(4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M		(5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M		(6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M		(7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M		(8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M		(9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M		(10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M		(11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M		(12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M		(13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M		(14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G		(15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA 		(1L<<4)
+
+#define PCI_PM_DATA_A					0x410
+#define PCI_PM_DATA_B					0x414
+#define PCI_ID_VAL1					0x434
+#define PCI_ID_VAL2					0x438
+#define PCI_ID_VAL3					0x43c
+
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL		    0x61C
+#define GRC_CONFIG_REG_PF_INIT_VF		0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK	0xf
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs \
+ * programmed for each PF.
+ * Since registers from 0x000-0x7ff are split across functions, each PF will
+ * have the same location for the same 4 bits
+ */
+
+#define PXPCS_TL_CONTROL_5		    0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN    (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN	   (1 << 28)   /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN   (1 << 27)   /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN    (1 << 26)   /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR  (1 << 25)   /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW	   (1 << 24)   /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN    (1 << 23)   /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN	   (1 << 22)   /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE   (1 << 21)   /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG  (1 << 20)   /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1   (1 << 19)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1   (1 << 18)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1   (1 << 17)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1   (1 << 16)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1   (1 << 15)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1  (1 << 14)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1    (1 << 13)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1    (1 << 12)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1    (1 << 11)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1   (1 << 10)   /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT    (1 << 9)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT    (1 << 8)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC    (1 << 7)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP    (1 << 6)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW    (1 << 5)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL   (1 << 4)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT     (1 << 3)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT     (1 << 2)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL	   (1 << 1)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP    (1 << 0)    /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT	   0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4    (1 << 29)   /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+	(1 << 28) /* Unsupported Request Error Status in function4, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+	(1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+	(1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+	(1 << 25) /* Receiver Overflow Status Status in function 4, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+	(1 << 24) /* Unexpected Completion Status Status in function 4, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+	(1 << 23) /* Receive UR Statusin function 4. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+	(1 << 22) /* Completer Timeout Status Status in function 4, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+	(1 << 21) /* Flow Control Protocol Error Status Status in \
+	function 4, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+	(1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3    (1 << 19)   /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+	(1 << 18) /* Unsupported Request Error Status in function3, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+	(1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+	(1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+	(1 << 15) /* Receiver Overflow Status Status in function 3, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+	(1 << 14) /* Unexpected Completion Status Status in function 3, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+	(1 << 13) /* Receive UR Statusin function 3. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+	(1 << 12) /* Completer Timeout Status Status in function 3, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+	(1 << 11) /* Flow Control Protocol Error Status Status in \
+	function 3, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+	(1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2    (1 << 9)    /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+	(1 << 8) /* Unsupported Request Error Status for Function 2, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+	(1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+	(1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+	(1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+	(1 << 4) /* Unexpected Completion Status Status for Function 2, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+	(1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+	(1 << 2) /* Completer Timeout Status Status for Function 2, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+	(1 << 1) /* Flow Control Protocol Error Status Status for \
+	Function 2, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+	(1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT  0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7    (1 << 29)   /*	 WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+	(1 << 28) /* Unsupported Request Error Status in function7, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+	(1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+	(1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+	(1 << 25) /* Receiver Overflow Status Status in function 7, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+	(1 << 24) /* Unexpected Completion Status Status in function 7, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+	(1 << 23) /* Receive UR Statusin function 7. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+	(1 << 22) /* Completer Timeout Status Status in function 7, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+	(1 << 21) /* Flow Control Protocol Error Status Status in \
+	function 7, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+	(1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6    (1 << 19)    /*	  WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+	(1 << 18) /* Unsupported Request Error Status in function6, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+	(1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+	(1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+	(1 << 15) /* Receiver Overflow Status Status in function 6, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+	(1 << 14) /* Unexpected Completion Status Status in function 6, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+	(1 << 13) /* Receive UR Statusin function 6. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+	(1 << 12) /* Completer Timeout Status Status in function 6, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+	(1 << 11) /* Flow Control Protocol Error Status Status in \
+	function 6, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+	(1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5    (1 << 9) /*    WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+	(1 << 8) /* Unsupported Request Error Status for Function 5, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+	(1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+	(1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+	(1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+	(1 << 4) /* Unexpected Completion Status Status for Function 5, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+	(1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+	(1 << 2) /* Completer Timeout Status Status for Function 5, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+	(1 << 1) /* Flow Control Protocol Error Status Status for \
+	Function 5, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+	(1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM				0x400000
+#define BAR_CSTRORM_INTMEM				0x410000
+#define BAR_XSTRORM_INTMEM				0x420000
+#define BAR_TSTRORM_INTMEM				0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM					0x440000
+
+#define BAR_DOORBELL_OFFSET				0x800000
+
+#define BAR_ME_REGISTER				0x450000
+#define ME_REG_PF_NUM_SHIFT		0
+#define ME_REG_PF_NUM\
+	(7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID		(1<<8)
+#define ME_REG_VF_NUM_SHIFT		9
+#define ME_REG_VF_NUM_MASK		(0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR			(0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT	16
+#define ME_REG_ABS_PF_NUM\
+	(7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define PXP_VF_ADDR_IGU_START				0
+#define PXP_VF_ADDR_IGU_SIZE				0x3000
+#define PXP_VF_ADDR_IGU_END\
+	((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START			0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE\
+	(PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END\
+	((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START			0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE			(PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END\
+	((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START				0x7c00
+#define PXP_VF_ADDR_DB_SIZE				0x200
+#define PXP_VF_ADDR_DB_END\
+	((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
+#define MDIO_REG_BANK_CL73_IEEEB0	0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL	0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN	0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN		0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST	0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1	0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1		0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE			0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2		0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M		0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX		0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4		0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR		0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1		0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE		0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK		0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2			0x04
+
+#define MDIO_REG_BANK_RX0				0x80b0
+#define MDIO_RX0_RX_STATUS				0x10
+#define MDIO_RX0_RX_STATUS_SIGDET			0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE			0x1000
+#define MDIO_RX0_RX_EQ_BOOST				0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX1				0x80c0
+#define MDIO_RX1_RX_EQ_BOOST				0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX2				0x80d0
+#define MDIO_RX2_RX_EQ_BOOST				0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX3				0x80e0
+#define MDIO_RX3_RX_EQ_BOOST				0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX_ALL				0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 			0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL	0x10
+
+#define MDIO_REG_BANK_TX0				0x8060
+#define MDIO_TX0_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_TX1				0x8070
+#define MDIO_TX1_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_TX2				0x8080
+#define MDIO_TX2_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_TX3				0x8090
+#define MDIO_TX3_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0			0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL			0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1			0x8010
+#define MDIO_BLOCK1_LANE_CTRL0				0x15
+#define MDIO_BLOCK1_LANE_CTRL1				0x16
+#define MDIO_BLOCK1_LANE_CTRL2				0x17
+#define MDIO_BLOCK1_LANE_PRBS				0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2			0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP			0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE		0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE	0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP		0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE		0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G	0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS	0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS	0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 	0x15
+
+#define MDIO_REG_BANK_GP_STATUS 			0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1				0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE	0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE	0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS		0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS		0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE	0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE	0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE	0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE	0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 	0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M		0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 	0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G		0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 	0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G		0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G		0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG	0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4	0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG	0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G	0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G		0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G		0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G		0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX	0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4	0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR	0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI	0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS	0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI	0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2	0x3900
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT		0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS		0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK		0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL		0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN	0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK		0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT		(0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL			0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1			0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 		0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF			0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN		0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT	0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET			0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE			0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2			0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN			0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 		0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1			0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII			0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK			0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX			0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK			0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 		3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G			0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G			0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M			0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M			0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2			0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 		0x0002
+#define MDIO_SERDES_DIGITAL_MISC1				0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK			0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M			0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M			0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M			0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M			0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M			0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL			0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK			0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G			0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G			0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G			0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG			0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4			0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G			0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G			0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G			0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G			0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G			0x0009
+
+#define MDIO_REG_BANK_OVER_1G				0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 				0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK				0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT				5
+#define MDIO_OVER_1G_UP1					0x19
+#define MDIO_OVER_1G_UP1_2_5G						0x0001
+#define MDIO_OVER_1G_UP1_5G						0x0002
+#define MDIO_OVER_1G_UP1_6G						0x0004
+#define MDIO_OVER_1G_UP1_10G						0x0010
+#define MDIO_OVER_1G_UP1_10GH						0x0008
+#define MDIO_OVER_1G_UP1_12G						0x0020
+#define MDIO_OVER_1G_UP1_12_5G						0x0040
+#define MDIO_OVER_1G_UP1_13G						0x0080
+#define MDIO_OVER_1G_UP1_15G						0x0100
+#define MDIO_OVER_1G_UP1_16G						0x0200
+#define MDIO_OVER_1G_UP2					0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK				0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK					0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK				0x03C0
+#define MDIO_OVER_1G_UP3					0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 					0x0001
+#define MDIO_OVER_1G_LP_UP1					0x1C
+#define MDIO_OVER_1G_LP_UP2					0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 			0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK				0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT				7
+#define MDIO_OVER_1G_LP_UP3						0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY			0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS				0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG	0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG	0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE			0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL			0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE			0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN			0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0		0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL				0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL			0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1				0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK			0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37		0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 			0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN				0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN		0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN		0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 			0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 		0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 		0xFFD0
+#define MDIO_AER_BLOCK_AER_REG					0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0		0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL				0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK			0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10			0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100			0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000			0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 			0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN				0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN				0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK				0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET				0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS				0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS				0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE			0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV				0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX			0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX			0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK			0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE			0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC			0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC			0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH			0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 			0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 	0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE	0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK		0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK	0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE	0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH	0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP	0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP	0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE	0x0001
+
+
+#define MDIO_PMA_DEVAD			0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL		0x0
+#define MDIO_PMA_REG_STATUS		0x1
+#define MDIO_PMA_REG_10G_CTRL2		0x7
+#define MDIO_PMA_REG_TX_DISABLE		0x0009
+#define MDIO_PMA_REG_RX_SD		0xa
+/*bcm*/
+#define MDIO_PMA_REG_BCM_CTRL		0x0096
+#define MDIO_PMA_REG_FEC_CTRL		0x00ab
+#define MDIO_PMA_REG_PHY_IDENTIFIER	0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL	0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS	0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN	0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS	0xca09
+#define MDIO_PMA_REG_MISC_CTRL		0xca0a
+#define MDIO_PMA_REG_GEN_CTRL		0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP	0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET		0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG	0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG	0xca13
+#define MDIO_PMA_REG_ROM_VER1		0xca19
+#define MDIO_PMA_REG_ROM_VER2		0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN	0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH	0xca1d
+#define MDIO_PMA_REG_PLL_CTRL		0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 	0xca23
+#define MDIO_PMA_REG_LRM_MODE		0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH	0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 	0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL		0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK	0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE		0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE	0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS	0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 	0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT	0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR	0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF	0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1		0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2		0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR	0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF	0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_TX_CTRL1		0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2		0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL		0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL		0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP		0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG		0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL		0x8309
+
+#define MDIO_PMA_REG_8073_CHIP_REV			0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS		0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA			0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL		0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 	0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL	0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1		0xc026
+#define MDIO_PMA_REG_7101_VER2		0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL			0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK			0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK			0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK			0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK			0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK			0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK			0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL			0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK	0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD			0x2
+/*bcm*/
+#define MDIO_WIS_REG_LASI_CNTL		0x9002
+#define MDIO_WIS_REG_LASI_STATUS	0x9005
+
+#define MDIO_PCS_DEVAD			0x3
+#define MDIO_PCS_REG_STATUS		0x0020
+#define MDIO_PCS_REG_LASI_STATUS	0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS	0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX	0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD	 (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD			0x4
+#define MDIO_XS_PLL_SEQUENCER		0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1	0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0	0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1	0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2	0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3	0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA	0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE	0x80FA
+
+#define MDIO_AN_DEVAD			0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL		0x0000
+#define MDIO_AN_REG_STATUS		0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE		0x0020
+#define MDIO_AN_REG_ADV_PAUSE		0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE		0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC	0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH		0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK		0x0C00
+#define MDIO_AN_REG_ADV 		0x0011
+#define MDIO_AN_REG_ADV2		0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG		0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2	0x0014
+#define MDIO_AN_REG_MASTER_STATUS	0x0021
+#define MDIO_AN_REG_EEE_ADV		0x003c
+#define MDIO_AN_REG_LP_EEE_ADV		0x003d
+/*bcm*/
+#define MDIO_AN_REG_LINK_STATUS 	0x8304
+#define MDIO_AN_REG_CL37_CL73		0x8370
+#define MDIO_AN_REG_CL37_AN		0xffe0
+#define MDIO_AN_REG_CL37_FC_LD		0xffe4
+#define		MDIO_AN_REG_CL37_FC_LP		0xffe5
+#define		MDIO_AN_REG_1000T_STATUS	0xffea
+
+#define MDIO_AN_REG_8073_2_5G		0x8329
+#define MDIO_AN_REG_8073_BAM		0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL	0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL	0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G	0x40
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS	0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB		0xffe2
+#define BCM84858_PHY_ID					0x600d
+#define MDIO_AN_REG_848xx_ID_LSB		0xffe3
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV		0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION	0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL		0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL	0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF	0x0008
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW	0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS	0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL		0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW		0xfffc
+
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD			0x1e
+#define MDIO_CTL_REG_84823_MEDIA		0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK		0x0018
+	/* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI			0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M		0x0010
+	/* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK		0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L		0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI		0x0040
+	/* When this pin is active high during reset, 10GBASE-T core is power
+	 * down, When it is active low the 10GBASE-T is power up
+	 */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN	0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK		0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER	0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER		0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G			0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG			0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS			0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH		0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ	0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1			0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1			0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN			0x0080
+
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV			0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE		0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE		0x1f81
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1			0x401a
+#define MDIO_84833_SUPER_ISOLATE		0x8000
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0			0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1			0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2			0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3			0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4			0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26		0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27		0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28		0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29		0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30		0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31		0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND	(MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS	(MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1	(MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2	(MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3	(MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4	(MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5	(MDIO_848xx_TOP_CFG_SCRATCH_REG31)
+
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP			0x8001
+#define PHY848xx_CMD_GET_EEE_MODE			0x8008
+#define PHY848xx_CMD_SET_EEE_MODE			0x8009
+/* Mailbox status set used by 84833 only */
+#define PHY84833_STATUS_CMD_RECEIVED			0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS			0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS		0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR		0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS		0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT			0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS		0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE		0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE		0xa5a5
+
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED			0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS			0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS		0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR		0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY			0xbbbb
+
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD					0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL			0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP			0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0	0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1	0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2	0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY	0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ		0x8000
+#define MDIO_WC_REG_PCS_STATUS2				0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL			0x0096
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL		0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1		0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW			0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0			0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1			0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2			0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0			0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0			0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0			0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0			0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER			0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET			0x01
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK				0x000e
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET		0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK			0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET		0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK				0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET		0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK			0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER			0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER			0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER			0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G			0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G			0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL			0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL			0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL			0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL			0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI		0x80fa
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G		0x8104
+#define MDIO_WC_REG_XGXS_STATUS3			0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL			0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2			0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3			0x8142
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1			0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1			0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0			0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1			0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2			0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3			0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4			0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP		0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION			0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE		0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET	0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT	    0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR	    0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC	    0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI	    0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G	    0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET	0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET	0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET	0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC			0x81FE
+#define MDIO_WC_REG_DSC_SMC				0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0		0x821e
+#define MDIO_WC_REG_TX_FIR_TAP				0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET		0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK			0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET		0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK		0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET		0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK		0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE		0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP		0x82e2
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL	0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL	0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL	0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL	0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL	0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1		0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2		0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3		0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1		0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1			0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2			0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1			0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1			 0x832c
+#define MDIO_WC_REG_DIGITAL4_MISC3			0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5			0x833e
+#define MDIO_WC_REG_DIGITAL5_MISC6			0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7			0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS		0x834d
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED		0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL		0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL			0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL			0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT			0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1			0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2			0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3			0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD			0x837b
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0			0x8390
+#define MDIO_WC_REG_TX66_CONTROL			0x83b0
+#define MDIO_WC_REG_RX66_CONTROL			0x83c0
+#define MDIO_WC_REG_RX66_SCW0				0x83c2
+#define MDIO_WC_REG_RX66_SCW1				0x83c3
+#define MDIO_WC_REG_RX66_SCW2				0x83c4
+#define MDIO_WC_REG_RX66_SCW3				0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK			0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK			0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK			0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK			0x83c9
+#define MDIO_WC_REG_FX100_CTRL1				0x8400
+#define MDIO_WC_REG_FX100_CTRL3				0x8402
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5		0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6		0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7		0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9		0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10		0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11		0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1			0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2			0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3			0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE		0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE			0x8457
+#define MDIO_WC_REG_MICROBLK_CMD			0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS			0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3			0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER				0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL			0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT		0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET			0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT	0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT	4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2		0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK			0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB				0x3
+#define MDIO_REG_GPHY_ID_54618SE		0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG			0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG			0xe
+#define MDIO_REG_GPHY_EEE_RESOLVED		0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE			0x15
+#define MDIO_REG_GPHY_EXP_ACCESS			0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP		0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF		0x40
+#define MDIO_REG_GPHY_AUX_STATUS			0x19
+#define MDIO_REG_INTR_STATUS				0x1a
+#define MDIO_REG_INTR_MASK				0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS			(0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW				0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1			(0x0d << 10)
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2			(0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA			(0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED		(0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD		(0x1 << 8)
+
+#define IGU_FUNC_BASE			0x0400
+
+#define IGU_ADDR_MSIX			0x0000
+#define IGU_ADDR_INT_ACK		0x0200
+#define IGU_ADDR_PROD_UPD		0x0201
+#define IGU_ADDR_ATTN_BITS_UPD	0x0202
+#define IGU_ADDR_ATTN_BITS_SET	0x0203
+#define IGU_ADDR_ATTN_BITS_CLR	0x0204
+#define IGU_ADDR_COALESCE_NOW	0x0205
+#define IGU_ADDR_SIMD_MASK		0x0206
+#define IGU_ADDR_SIMD_NOMASK	0x0207
+#define IGU_ADDR_MSI_CTL		0x0210
+#define IGU_ADDR_MSI_ADDR_LO	0x0211
+#define IGU_ADDR_MSI_ADDR_HI	0x0212
+#define IGU_ADDR_MSI_DATA		0x0213
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup  0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup  1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup  2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup  3
+
+#define COMMAND_REG_INT_ACK	    0x0
+#define COMMAND_REG_PROD_UPD	    0x4
+#define COMMAND_REG_ATTN_BITS_UPD   0x8
+#define COMMAND_REG_ATTN_BITS_SET   0xc
+#define COMMAND_REG_ATTN_BITS_CLR   0x10
+#define COMMAND_REG_COALESCE_NOW    0x14
+#define COMMAND_REG_SIMD_MASK	    0x18
+#define COMMAND_REG_SIMD_NOMASK     0x1c
+
+
+#define IGU_MEM_BASE						0x0000
+
+#define IGU_MEM_MSIX_BASE					0x0000
+#define IGU_MEM_MSIX_UPPER					0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER			0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE				0x0200
+#define IGU_MEM_PBA_MSIX_UPPER				0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD		0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 	0x03ff
+
+#define IGU_CMD_INT_ACK_BASE				0x0400
+#define IGU_CMD_INT_ACK_UPPER\
+	(IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER		0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE			0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER\
+	(IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER	0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER			0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER			0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER			0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER		0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER	0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER	0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER		0x05a6
+
+#define IGU_REG_RESERVED_UPPER				0x05ff
+/* Fields of IGU PF CONFIGURATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN	  (0x1<<0)  /* function enable	      */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1<<1)  /* MSI/MSIX enable	      */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1<<2)  /* INT enable	      */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1<<3)  /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4)  /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE	  (0x1<<5)  /* simd all ones mode     */
+
+/* Fields of IGU VF CONFIGURATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN	   (0x1<<0)  /* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1<<1)  /* MSI/MSIX enable        */
+#define IGU_VF_CONF_PARENT_MASK    (0x3<<2)  /* Parent PF	       */
+#define IGU_VF_CONF_PARENT_SHIFT   2	     /* Parent PF	       */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1<<4)  /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS    5
+#define IGU_BC_NDSB_NUM_SEGS   2
+#define IGU_NORM_DSB_NUM_SEGS  2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD   128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+	/* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+	[5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF	    (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT  6
+#define IGU_FID_VF_NUM_MASK	    (0x3f)
+#define IGU_FID_PF_NUM_MASK	    (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID		(1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK	(0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT	1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK	(0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT	7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/* String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type)\
+	(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type)\
+	(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
+	(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
+	(0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+/******************************************************************************
+ * Description:
+ *	   Calculates crc 8 on a word value: polynomial 0-1-2-8
+ *	   Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+static inline u8 calc_crc8(u32 data, u8 crc)
+{
+	u8 D[32];
+	u8 NewCRC[8];
+	u8 C[8];
+	u8 crc_res;
+	u8 i;
+
+	/* split the data into 31 bits */
+	for (i = 0; i < 32; i++) {
+		D[i] = (u8)(data & 1);
+		data = data >> 1;
+	}
+
+	/* split the crc into 8 bits */
+	for (i = 0; i < 8; i++) {
+		C[i] = crc & 1;
+		crc = crc >> 1;
+	}
+
+	NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+		    D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+		    C[6] ^ C[7];
+	NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+		    D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+		    D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
+		    C[6];
+	NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+		    D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+		    C[0] ^ C[1] ^ C[4] ^ C[5];
+	NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+		    D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+		    C[1] ^ C[2] ^ C[5] ^ C[6];
+	NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+		    D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+		    C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+	NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+		    D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+		    C[3] ^ C[4] ^ C[7];
+	NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+		    D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+		    C[5];
+	NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+		    D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+		    C[6];
+
+	crc_res = 0;
+	for (i = 0; i < 8; i++)
+		crc_res |= (NewCRC[i] << i);
+
+	return crc_res;
+}
+
+
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
new file mode 100644
index 0000000..ff702a7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -0,0 +1,6343 @@
+/* bnx2x_sp.c: Qlogic Everest network driver.
+ *
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and Qlogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32c.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+
+#define BNX2X_MAX_EMUL_MULTI		16
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * bnx2x_exe_queue_init - init the Exe Queue object
+ *
+ * @o:		pointer to the object
+ * @exe_len:	length
+ * @owner:	pointer to the owner
+ * @validate:	validate function pointer
+ * @optimize:	optimize function pointer
+ * @exec:	execute function pointer
+ * @get:	get function pointer
+ */
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+					struct bnx2x_exe_queue_obj *o,
+					int exe_len,
+					union bnx2x_qable_obj *owner,
+					exe_q_validate validate,
+					exe_q_remove remove,
+					exe_q_optimize optimize,
+					exe_q_execute exec,
+					exe_q_get get)
+{
+	memset(o, 0, sizeof(*o));
+
+	INIT_LIST_HEAD(&o->exe_queue);
+	INIT_LIST_HEAD(&o->pending_comp);
+
+	spin_lock_init(&o->lock);
+
+	o->exe_chunk_len = exe_len;
+	o->owner         = owner;
+
+	/* Owner specific callbacks */
+	o->validate      = validate;
+	o->remove        = remove;
+	o->optimize      = optimize;
+	o->execute       = exec;
+	o->get           = get;
+
+	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
+	   exe_len);
+}
+
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+					     struct bnx2x_exeq_elem *elem)
+{
+	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+	kfree(elem);
+}
+
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
+	int cnt = 0;
+
+	spin_lock_bh(&o->lock);
+
+	list_for_each_entry(elem, &o->exe_queue, link)
+		cnt++;
+
+	spin_unlock_bh(&o->lock);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp:		driver handle
+ * @o:		queue
+ * @cmd:	new command to add
+ * @restore:	true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+				      struct bnx2x_exe_queue_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      bool restore)
+{
+	int rc;
+
+	spin_lock_bh(&o->lock);
+
+	if (!restore) {
+		/* Try to cancel this element queue */
+		rc = o->optimize(bp, o->owner, elem);
+		if (rc)
+			goto free_and_exit;
+
+		/* Check if this request is ok */
+		rc = o->validate(bp, o->owner, elem);
+		if (rc) {
+			DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
+			goto free_and_exit;
+		}
+	}
+
+	/* If so, add it to the execution queue */
+	list_add_tail(&elem->link, &o->exe_queue);
+
+	spin_unlock_bh(&o->lock);
+
+	return 0;
+
+free_and_exit:
+	bnx2x_exe_queue_free_elem(bp, elem);
+
+	spin_unlock_bh(&o->lock);
+
+	return rc;
+}
+
+static inline void __bnx2x_exe_queue_reset_pending(
+	struct bnx2x *bp,
+	struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
+
+	while (!list_empty(&o->pending_comp)) {
+		elem = list_first_entry(&o->pending_comp,
+					struct bnx2x_exeq_elem, link);
+
+		list_del(&elem->link);
+		bnx2x_exe_queue_free_elem(bp, elem);
+	}
+}
+
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp:			driver handle
+ * @o:			queue
+ * @ramrod_flags:	flags
+ *
+ * (Should be called while holding the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+				       struct bnx2x_exe_queue_obj *o,
+				       unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem, spacer;
+	int cur_len = 0, rc;
+
+	memset(&spacer, 0, sizeof(spacer));
+
+	/* Next step should not be performed until the current is finished,
+	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+	 * properly clear object internals without sending any command to the FW
+	 * which also implies there won't be any completion to clear the
+	 * 'pending' list.
+	 */
+	if (!list_empty(&o->pending_comp)) {
+		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
+			__bnx2x_exe_queue_reset_pending(bp, o);
+		} else {
+			return 1;
+		}
+	}
+
+	/* Run through the pending commands list and create a next
+	 * execution chunk.
+	 */
+	while (!list_empty(&o->exe_queue)) {
+		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+					link);
+		WARN_ON(!elem->cmd_len);
+
+		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+			cur_len += elem->cmd_len;
+			/* Prevent from both lists being empty when moving an
+			 * element. This will allow the call of
+			 * bnx2x_exe_queue_empty() without locking.
+			 */
+			list_add_tail(&spacer.link, &o->pending_comp);
+			mb();
+			list_move_tail(&elem->link, &o->pending_comp);
+			list_del(&spacer.link);
+		} else
+			break;
+	}
+
+	/* Sanity check */
+	if (!cur_len)
+		return 0;
+
+	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+	if (rc < 0)
+		/* In case of an error return the commands back to the queue
+		 * and reset the pending_comp.
+		 */
+		list_splice_init(&o->pending_comp, &o->exe_queue);
+	else if (!rc)
+		/* If zero is returned, means there are no outstanding pending
+		 * completions and we may dismiss the pending list.
+		 */
+		__bnx2x_exe_queue_reset_pending(bp, o);
+
+	return rc;
+}
+
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+	bool empty = list_empty(&o->exe_queue);
+
+	/* Don't reorder!!! */
+	mb();
+
+	return empty && list_empty(&o->pending_comp);
+}
+
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+	struct bnx2x *bp)
+{
+	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
+
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+	return !!test_bit(o->state, o->pstate);
+}
+
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_atomic();
+	clear_bit(o->state, o->pstate);
+	smp_mb__after_atomic();
+}
+
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_atomic();
+	set_bit(o->state, o->pstate);
+	smp_mb__after_atomic();
+}
+
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp:		device handle
+ * @state:	state which is to be cleared
+ * @state_p:	state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+				   unsigned long *pstate)
+{
+	/* can take a while if any port is running */
+	int cnt = 5000;
+
+	if (CHIP_REV_IS_EMUL(bp))
+		cnt *= 20;
+
+	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+	might_sleep();
+	while (cnt--) {
+		if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
+#endif
+			return 0;
+		}
+
+		usleep_range(1000, 2000);
+
+		if (bp->panic)
+			return -EIO;
+	}
+
+	/* timeout! */
+	BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+	bnx2x_panic();
+#endif
+
+	return -EBUSY;
+}
+
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+	return bnx2x_state_wait(bp, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get_entry(mp, offset);
+}
+
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get(mp, 1);
+}
+
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	WARN_ON(!vp);
+
+	return vp->get_entry(vp, offset);
+}
+
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	WARN_ON(!vp);
+
+	return vp->get(vp, 1);
+}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
+
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put_entry(mp, offset);
+}
+
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put(mp, 1);
+}
+
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put_entry(vp, offset);
+}
+
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put(vp, 1);
+}
+
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
+
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp:		device handle
+ * @o:		vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ *           queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+					    struct bnx2x_vlan_mac_obj *o)
+{
+	if (o->head_reader) {
+		DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+		return -EBUSY;
+	}
+
+	DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+	return 0;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp:		device handle
+ * @o:		vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ *          and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+					    struct bnx2x_vlan_mac_obj *o)
+{
+	int rc;
+	unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+	DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+	   ramrod_flags);
+	o->head_exe_request = false;
+	o->saved_ramrod_flags = 0;
+	rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+	if ((rc != 0) && (rc != 1)) {
+		BNX2X_ERR("execution of pending commands failed with rc %d\n",
+			  rc);
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#endif
+	}
+}
+
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp:			device handle
+ * @o:			vlan_mac object
+ * @ramrod_flags:	ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+				    struct bnx2x_vlan_mac_obj *o,
+				    unsigned long ramrod_flags)
+{
+	o->head_exe_request = true;
+	o->saved_ramrod_flags = ramrod_flags;
+	DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+	   ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp:			device handle
+ * @o:			vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ *          execution exists, it would perform it - possibly releasing and
+ *          reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+					    struct bnx2x_vlan_mac_obj *o)
+{
+	/* It's possible a new pending execution was added since this writer
+	 * executed. If so, execute again. [Ad infinitum]
+	 */
+	while (o->head_exe_request) {
+		DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+		__bnx2x_vlan_mac_h_exec_pending(bp, o);
+	}
+}
+
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp:			device handle
+ * @o:			vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ *          release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+					struct bnx2x_vlan_mac_obj *o)
+{
+	/* If we got here, we're holding lock --> no WRITER exists */
+	o->head_reader++;
+	DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+	   o->head_reader);
+
+	return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp:			device handle
+ * @o:			vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+			       struct bnx2x_vlan_mac_obj *o)
+{
+	int rc;
+
+	spin_lock_bh(&o->exe_queue.lock);
+	rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+	spin_unlock_bh(&o->exe_queue.lock);
+
+	return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp:			device handle
+ * @o:			vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ *          execution exists, it would be performed if this was the last
+ *          reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+					  struct bnx2x_vlan_mac_obj *o)
+{
+	if (!o->head_reader) {
+		BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#endif
+	} else {
+		o->head_reader--;
+		DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+		   o->head_reader);
+	}
+
+	/* It's possible a new pending execution was added, and that this reader
+	 * was last - if so we need to execute the command.
+	 */
+	if (!o->head_reader && o->head_exe_request) {
+		DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+		/* Writer release will do the trick */
+		__bnx2x_vlan_mac_h_write_unlock(bp, o);
+	}
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp:			device handle
+ * @o:			vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ *          was the last reader. Claims and releases the execution queue lock
+ *          during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o)
+{
+	spin_lock_bh(&o->exe_queue.lock);
+	__bnx2x_vlan_mac_h_read_unlock(bp, o);
+	spin_unlock_bh(&o->exe_queue.lock);
+}
+
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+				int n, u8 *base, u8 stride, u8 size)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	u8 *next = base;
+	int counter = 0;
+	int read_lock;
+
+	DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+	read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+	if (read_lock != 0)
+		BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
+
+	/* traverse list */
+	list_for_each_entry(pos, &o->head, link) {
+		if (counter < n) {
+			memcpy(next, &pos->u, size);
+			counter++;
+			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+			   counter, next);
+			next += stride + size;
+		}
+	}
+
+	if (read_lock == 0) {
+		DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+		bnx2x_vlan_mac_h_read_unlock(bp, o);
+	}
+
+	return counter * ETH_ALEN;
+}
+
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x *bp,
+			       struct bnx2x_vlan_mac_obj *o,
+			       union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
+
+	if (!is_valid_ether_addr(data->mac.mac))
+		return -EINVAL;
+
+	/* Check if a requested MAC already exists */
+	list_for_each_entry(pos, &o->head, link)
+		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
+		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
+			return -EEXIST;
+
+	return 0;
+}
+
+static int bnx2x_check_vlan_add(struct bnx2x *bp,
+				struct bnx2x_vlan_mac_obj *o,
+				union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return -EEXIST;
+
+	return 0;
+}
+
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+				    struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+				  ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return -EEXIST;
+
+	return 0;
+}
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_mac_del(struct bnx2x *bp,
+			    struct bnx2x_vlan_mac_obj *o,
+			    union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
+
+	list_for_each_entry(pos, &o->head, link)
+		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
+		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_del(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return pos;
+
+	return NULL;
+}
+
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *src_o,
+			     struct bnx2x_vlan_mac_obj *dst_o,
+			     union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	int rc;
+
+	/* Check if we can delete the requested configuration from the first
+	 * object.
+	 */
+	pos = src_o->check_del(bp, src_o, data);
+
+	/*  check if configuration can be added */
+	rc = dst_o->check_add(bp, dst_o, data);
+
+	/* If this classification can not be added (is already set)
+	 * or can't be deleted - return an error.
+	 */
+	if (rc || !pos)
+		return false;
+
+	return true;
+}
+
+static bool bnx2x_check_move_always_err(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *src_o,
+	struct bnx2x_vlan_mac_obj *dst_o,
+	union bnx2x_classification_ramrod_data *data)
+{
+	return false;
+}
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+				 bool add, unsigned char *dev_addr, int index)
+{
+	u32 wb_data[2];
+	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+			 NIG_REG_LLH0_FUNC_MEM;
+
+	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
+		return;
+
+	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
+		return;
+
+	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+			 (add ? "ADD" : "DELETE"), index);
+
+	if (add) {
+		/* LLH_FUNC_MEM is a u64 WB register */
+		reg_offset += 8*index;
+
+		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+			      (dev_addr[4] <<  8) |  dev_addr[5]);
+		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
+
+		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+	}
+
+	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
+
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue for which we want to configure this rule
+ * @add:	if true the command is an ADD command, DEL otherwise
+ * @opcode:	CLASSIFY_RULE_OPCODE_XXX
+ * @hdr:	pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+	struct eth_classify_cmd_header *hdr)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	hdr->client_id = raw->cl_id;
+	hdr->func_id = raw->func_id;
+
+	/* Rx or/and Tx (internal switching) configuration ? */
+	hdr->cmd_general_data |=
+		bnx2x_vlan_mac_get_rx_tx_flag(o);
+
+	if (add)
+		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+	hdr->cmd_general_data |=
+		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid:	connection id
+ * @type:	BNX2X_FILTER_XXX_PENDING
+ * @hdr:	pointer to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+				struct eth_classify_header *hdr, int rule_cnt)
+{
+	hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
+				(type << BNX2X_SWCID_SHIFT));
+	hdr->rule_cnt = (u8)rule_cnt;
+}
+
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 struct bnx2x_exeq_elem *elem, int rule_idx,
+				 int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
+	 * relevant. In addition, current implementation is tuned for a
+	 * single ETH MAC.
+	 *
+	 * When multiple unicast ETH MACs PF configuration in switch
+	 * independent mode is required (NetQ, multiple netdev MACs,
+	 * etc.), consider better utilisation of 8 per function MAC
+	 * entries in the LLH register. There is also
+	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+	 * total number of CAM entries to 16.
+	 *
+	 * Currently we won't configure NIG for MACs other than a primary ETH
+	 * MAC and iSCSI L2 MAC.
+	 *
+	 * If this MAC is moving from one Queue to another, no need to change
+	 * NIG configuration.
+	 */
+	if (cmd != BNX2X_VLAN_MAC_MOVE) {
+		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac,
+					     BNX2X_LLH_CAM_ISCSI_ETH_LINE);
+		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac,
+					     BNX2X_LLH_CAM_ETH_LINE);
+	}
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Setup a command header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+				      &rule_entry->mac.header);
+
+	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
+	   (add ? "add" : "delete"), mac, raw->cl_id);
+
+	/* Set a MAC itself */
+	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+			      &rule_entry->mac.mac_mid,
+			      &rule_entry->mac.mac_lsb, mac);
+	rule_entry->mac.inner_mac =
+		cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_MAC,
+					      &rule_entry->mac.header);
+
+		/* Set a MAC itself */
+		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+				      &rule_entry->mac.mac_mid,
+				      &rule_entry->mac.mac_lsb, mac);
+		rule_entry->mac.inner_mac =
+			cpu_to_le16(elem->cmd_data.vlan_mac.
+						u.mac.is_inner_mac);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue
+ * @type:
+ * @cam_offset:	offset in cam memory
+ * @hdr:	pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+	struct mac_configuration_hdr *hdr)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	hdr->length = 1;
+	hdr->offset = (u8)cam_offset;
+	hdr->client_id = cpu_to_le16(0xff);
+	hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+				(type << BNX2X_SWCID_SHIFT));
+}
+
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	u32 cl_bit_vec = (1 << r->cl_id);
+
+	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+	cfg_entry->pf_id = r->func_id;
+	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+	if (add) {
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+		SET_FLAG(cfg_entry->flags,
+			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+		/* Set a MAC in a ramrod data */
+		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+				      &cfg_entry->middle_mac_addr,
+				      &cfg_entry->lsb_mac_addr, mac);
+	} else
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+					 &config->hdr);
+	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+					 cfg_entry);
+
+	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
+			 (add ? "setting" : "clearing"),
+			 mac, raw->cl_id, cam_offset);
+}
+
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/* 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
+				     ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+				      &rule_entry->vlan.header);
+
+	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+			 vlan);
+
+	/* Set a VLAN itself */
+	rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_VLAN,
+					      &rule_entry->vlan.header);
+
+		/* Set a VLAN itself */
+		rule_entry->vlan.vlan = cpu_to_le16(vlan);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+	u16 inner_mac;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselves */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+	inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+	rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	/* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		struct bnx2x_vlan_mac_obj *target_obj;
+
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		target_obj = elem->cmd_data.vlan_mac.target_obj;
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
+		rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	}
+
+	/* Set the ramrod data header */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/* 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp:		device handle
+ * @p:		command parameters
+ * @ppos:	pointer to the cookie
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
+ * into an account
+ *
+ * pointer to the cookie  - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_ramrod_params *p,
+			   struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+	/* If list is empty - there is nothing to do here */
+	if (list_empty(&o->head)) {
+		*ppos = NULL;
+		return 0;
+	}
+
+	/* make a step... */
+	if (*ppos == NULL)
+		*ppos = list_first_entry(&o->head,
+					 struct bnx2x_vlan_mac_registry_elem,
+					 link);
+	else
+		*ppos = list_next_entry(*ppos, link);
+
+	pos = *ppos;
+
+	/* If it's the last step - return NULL */
+	if (list_is_last(&pos->link, &o->head))
+		*ppos = NULL;
+
+	/* Prepare a 'user_req' */
+	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+	/* Set the command */
+	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+	/* Set vlan_mac_flags */
+	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+	/* Set a restore bit */
+	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, p);
+}
+
+/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			    sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd ==
+		     elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	int rc;
+
+	/* Check the registry */
+	rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
+	if (rc) {
+		DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
+		return rc;
+	}
+
+	/* Check if there is a pending ADD command for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
+	 */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+		return -EEXIST;
+	}
+
+	/* TODO: Check the pending MOVE from other objects where this
+	 * object is a destination object.
+	 */
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->get_credit(o)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check
+ * @elem:	element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem query_elem;
+
+	/* If this classification can not be deleted (doesn't exist)
+	 * - return a BNX2X_EXIST.
+	 */
+	pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
+	if (!pos) {
+		DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
+		return -EEXIST;
+	}
+
+	/* Check if there are pending DEL or MOVE commands for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check for MOVE commands */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+	if (exeq->get(exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending MOVE command already\n");
+		return -EINVAL;
+	}
+
+	/* Check for DEL commands */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+		return -EEXIST;
+	}
+
+	/* Return the credit to the credit pool if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->put_credit(o))) {
+		BNX2X_ERR("Failed to return a credit\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check (source)
+ * @elem:	element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+					       union bnx2x_qable_obj *qo,
+					       struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+	struct bnx2x_exeq_elem query_elem;
+	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+	/* Check if we can perform this operation based on the current registry
+	 * state.
+	 */
+	if (!src_o->check_move(bp, src_o, dest_o,
+			       &elem->cmd_data.vlan_mac.u)) {
+		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
+		return -EINVAL;
+	}
+
+	/* Check if there is an already pending DEL or MOVE command for the
+	 * source object or ADD command for a destination object. Return an
+	 * error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check DEL on source */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+	if (src_exeq->get(src_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending DEL command on the source queue already\n");
+		return -EINVAL;
+	}
+
+	/* Check MOVE on source */
+	if (src_exeq->get(src_exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+		return -EEXIST;
+	}
+
+	/* Check ADD on destination */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+	if (dest_exeq->get(dest_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
+		return -EINVAL;
+	}
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    dest_o->get_credit(dest_o)))
+		return -EINVAL;
+
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    src_o->put_credit(src_o))) {
+		/* return the credit taken from dest... */
+		dest_o->put_credit(dest_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+	case BNX2X_VLAN_MAC_DEL:
+		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+	case BNX2X_VLAN_MAC_MOVE:
+		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
+				  union bnx2x_qable_obj *qo,
+				  struct bnx2x_exeq_elem *elem)
+{
+	int rc = 0;
+
+	/* If consumption wasn't required, nothing to do */
+	if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		     &elem->cmd_data.vlan_mac.vlan_mac_flags))
+		return 0;
+
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+	case BNX2X_VLAN_MAC_MOVE:
+		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
+		break;
+	case BNX2X_VLAN_MAC_DEL:
+		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rc != true)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+			       struct bnx2x_vlan_mac_obj *o)
+{
+	int cnt = 5000, rc;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	while (cnt--) {
+		/* Wait for the current command to complete */
+		rc = raw->wait_comp(bp, raw);
+		if (rc)
+			return rc;
+
+		/* Wait until there are no pending commands */
+		if (!bnx2x_exe_queue_empty(exeq))
+			usleep_range(1000, 2000);
+		else
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+					 struct bnx2x_vlan_mac_obj *o,
+					 unsigned long *ramrod_flags)
+{
+	int rc = 0;
+
+	spin_lock_bh(&o->exe_queue.lock);
+
+	DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+	rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+	if (rc != 0) {
+		__bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+		/* Calling function should not diffrentiate between this case
+		 * and the case in which there is already a pending ramrod
+		 */
+		rc = 1;
+	} else {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+	}
+	spin_unlock_bh(&o->exe_queue.lock);
+
+	return rc;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @cqe:
+ * @cont:	if true schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+				   struct bnx2x_vlan_mac_obj *o,
+				   union event_ring_elem *cqe,
+				   unsigned long *ramrod_flags)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc;
+
+	/* Clearing the pending list & raw state should be made
+	 * atomically (as execution flow assumes they represent the same).
+	 */
+	spin_lock_bh(&o->exe_queue.lock);
+
+	/* Reset pending list */
+	__bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+	/* Clear pending */
+	r->clear_pending(r);
+
+	spin_unlock_bh(&o->exe_queue.lock);
+
+	/* If ramrod failed this is most likely a SW bug */
+	if (cqe->message.error)
+		return -EINVAL;
+
+	/* Run the next bulk of pending commands if requested */
+	if (test_bit(RAMROD_CONT, ramrod_flags)) {
+		rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	/* If there is more work to do return PENDING */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem query, *pos;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+	memcpy(&query, elem, sizeof(query));
+
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+		break;
+	case BNX2X_VLAN_MAC_DEL:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+		break;
+	default:
+		/* Don't handle anything other than ADD or DEL */
+		return 0;
+	}
+
+	/* If we found the appropriate element - delete it */
+	pos = exeq->get(exeq, &query);
+	if (pos) {
+
+		/* Return the credit of the optimized command */
+		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+			if ((query.cmd_data.vlan_mac.cmd ==
+			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+				BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
+				return -EINVAL;
+			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+				BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
+				return -EINVAL;
+			}
+		}
+
+		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+			   "ADD" : "DEL");
+
+		list_del(&pos->link);
+		bnx2x_exe_queue_free_elem(bp, pos);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp:	  device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o,
+	struct bnx2x_exeq_elem *elem,
+	bool restore,
+	struct bnx2x_vlan_mac_registry_elem **re)
+{
+	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+	/* Allocate a new registry element if needed. */
+	if (!restore &&
+	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+		if (!reg_elem)
+			return -ENOMEM;
+
+		/* Get a new CAM offset */
+		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+			/* This shall never happen, because we have checked the
+			 * CAM availability in the 'validate'.
+			 */
+			WARN_ON(1);
+			kfree(reg_elem);
+			return -EINVAL;
+		}
+
+		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+		/* Set a VLAN-MAC data */
+		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+			  sizeof(reg_elem->u));
+
+		/* Copy the flags (needed for DEL and RESTORE flows) */
+		reg_elem->vlan_mac_flags =
+			elem->cmd_data.vlan_mac.vlan_mac_flags;
+	} else /* DEL, RESTORE */
+		reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
+
+	*re = reg_elem;
+	return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp:			device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+				  union bnx2x_qable_obj *qo,
+				  struct list_head *exe_chunk,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc, idx = 0;
+	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+	enum bnx2x_vlan_mac_cmd cmd;
+
+	/* If DRIVER_ONLY execution is requested, cleanup a registry
+	 * and exit. Otherwise send a ramrod to FW.
+	 */
+	if (!drv_only) {
+		WARN_ON(r->check_pending(r));
+
+		/* Set pending */
+		r->set_pending(r);
+
+		/* Fill the ramrod data */
+		list_for_each_entry(elem, exe_chunk, link) {
+			cmd = elem->cmd_data.vlan_mac.cmd;
+			/* We will add to the target object in MOVE command, so
+			 * change the object for a CAM search.
+			 */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				cam_obj = elem->cmd_data.vlan_mac.target_obj;
+			else
+				cam_obj = o;
+
+			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+							      elem, restore,
+							      &reg_elem);
+			if (rc)
+				goto error_exit;
+
+			WARN_ON(!reg_elem);
+
+			/* Push a new entry into the registry */
+			if (!restore &&
+			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+			    (cmd == BNX2X_VLAN_MAC_MOVE)))
+				list_add(&reg_elem->link, &cam_obj->head);
+
+			/* Configure a single command in a ramrod data buffer */
+			o->set_one_rule(bp, o, elem, idx,
+					reg_elem->cam_offset);
+
+			/* MOVE command consumes 2 entries in the ramrod data */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				idx += 2;
+			else
+				idx++;
+		}
+
+		/* No need for an explicit memory barrier here as long we would
+		 * need to ensure the ordering of writing to the SPQ element
+		 * and updating of the SPQ producer which involves a memory
+		 * read and we will have to put a full memory barrier there
+		 * (inside bnx2x_sp_post()).
+		 */
+
+		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+				   U64_HI(r->rdata_mapping),
+				   U64_LO(r->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			goto error_exit;
+	}
+
+	/* Now, when we are done with the ramrod - clean up the registry */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
+			reg_elem = o->check_del(bp, o,
+						&elem->cmd_data.vlan_mac.u);
+
+			WARN_ON(!reg_elem);
+
+			o->put_cam_offset(o, reg_elem->cam_offset);
+			list_del(&reg_elem->link);
+			kfree(reg_elem);
+		}
+	}
+
+	if (!drv_only)
+		return 1;
+	else
+		return 0;
+
+error_exit:
+	r->clear_pending(r);
+
+	/* Cleanup a registry in case of a failure */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+
+		if (cmd == BNX2X_VLAN_MAC_MOVE)
+			cam_obj = elem->cmd_data.vlan_mac.target_obj;
+		else
+			cam_obj = o;
+
+		/* Delete all newly added above entries */
+		if (!restore &&
+		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
+			reg_elem = o->check_del(bp, cam_obj,
+						&elem->cmd_data.vlan_mac.u);
+			if (reg_elem) {
+				list_del(&reg_elem->link);
+				kfree(reg_elem);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	/* Allocate the execution queue element */
+	elem = bnx2x_exe_queue_alloc_elem(bp);
+	if (!elem)
+		return -ENOMEM;
+
+	/* Set the command 'length' */
+	switch (p->user_req.cmd) {
+	case BNX2X_VLAN_MAC_MOVE:
+		elem->cmd_len = 2;
+		break;
+	default:
+		elem->cmd_len = 1;
+	}
+
+	/* Fill the object specific info */
+	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+	/* Try to add a new command to the pending list */
+	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp:	  device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	int rc = 0;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	unsigned long *ramrod_flags = &p->ramrod_flags;
+	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	/*
+	 * Add new elements to the execution list for commands that require it.
+	 */
+	if (!cont) {
+		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	/* If nothing will be executed further in this iteration we want to
+	 * return PENDING if there are pending commands
+	 */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		rc = 1;
+
+	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
+		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
+		raw->clear_pending(raw);
+	}
+
+	/* Execute commands if required */
+	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+		rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+						   &p->ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+	 * then user want to wait until the last command is done.
+	 */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		/* Wait maximum for the current exe_queue length iterations plus
+		 * one (for the current pending command).
+		 */
+		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+		       max_iterations--) {
+
+			/* Wait for the current command to complete */
+			rc = raw->wait_comp(bp, raw);
+			if (rc)
+				return rc;
+
+			/* Make a next step */
+			rc = __bnx2x_vlan_mac_execute_step(bp,
+							   p->vlan_mac_obj,
+							   &p->ramrod_flags);
+			if (rc < 0)
+				return rc;
+		}
+
+		return 0;
+	}
+
+	return rc;
+}
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp:			device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags:	execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * more elements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  unsigned long *vlan_mac_flags,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+	struct bnx2x_vlan_mac_ramrod_params p;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+	unsigned long flags;
+	int read_lock;
+	int rc = 0;
+
+	/* Clear pending commands first */
+
+	spin_lock_bh(&exeq->lock);
+
+	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+		flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
+			rc = exeq->remove(bp, exeq->owner, exeq_pos);
+			if (rc) {
+				BNX2X_ERR("Failed to remove command\n");
+				spin_unlock_bh(&exeq->lock);
+				return rc;
+			}
+			list_del(&exeq_pos->link);
+			bnx2x_exe_queue_free_elem(bp, exeq_pos);
+		}
+	}
+
+	spin_unlock_bh(&exeq->lock);
+
+	/* Prepare a command request */
+	memset(&p, 0, sizeof(p));
+	p.vlan_mac_obj = o;
+	p.ramrod_flags = *ramrod_flags;
+	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+	/* Add all but the last VLAN-MAC to the execution queue without actually
+	 * execution anything.
+	 */
+	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+	read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+	if (read_lock != 0)
+		return read_lock;
+
+	list_for_each_entry(pos, &o->head, link) {
+		flags = pos->vlan_mac_flags;
+		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
+			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+			rc = bnx2x_config_vlan_mac(bp, &p);
+			if (rc < 0) {
+				BNX2X_ERR("Failed to add a new DEL command\n");
+				bnx2x_vlan_mac_h_read_unlock(bp, o);
+				return rc;
+			}
+		}
+	}
+
+	DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+	bnx2x_vlan_mac_h_read_unlock(bp, o);
+
+	p.ramrod_flags = *ramrod_flags;
+	__set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+	unsigned long *pstate, bnx2x_obj_type type)
+{
+	raw->func_id = func_id;
+	raw->cid = cid;
+	raw->cl_id = cl_id;
+	raw->rdata = rdata;
+	raw->rdata_mapping = rdata_mapping;
+	raw->state = state;
+	raw->pstate = pstate;
+	raw->obj_type = type;
+	raw->check_pending = bnx2x_raw_check_pending;
+	raw->clear_pending = bnx2x_raw_clear_pending;
+	raw->set_pending = bnx2x_raw_set_pending;
+	raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+	int state, unsigned long *pstate, bnx2x_obj_type type,
+	struct bnx2x_credit_pool_obj *macs_pool,
+	struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	INIT_LIST_HEAD(&o->head);
+	o->head_reader = 0;
+	o->head_exe_request = false;
+	o->saved_ramrod_flags = 0;
+
+	o->macs_pool = macs_pool;
+	o->vlans_pool = vlans_pool;
+
+	o->delete_all = bnx2x_vlan_mac_del_all;
+	o->restore = bnx2x_vlan_mac_restore;
+	o->complete = bnx2x_complete_vlan_mac;
+	o->wait = bnx2x_wait_vlan_mac;
+
+	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+			   state, pstate, type);
+}
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, NULL);
+
+	/* CAM credit pool handling */
+	mac_obj->get_credit = bnx2x_get_credit_mac;
+	mac_obj->put_credit = bnx2x_put_credit_mac;
+	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1x(bp)) {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move_always_err;
+		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	} else {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move;
+		mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+		mac_obj->get_n_elements    = bnx2x_get_n_elements;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	}
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type, NULL,
+				   vlans_pool);
+
+	vlan_obj->get_credit = bnx2x_get_credit_vlan;
+	vlan_obj->put_credit = bnx2x_put_credit_vlan;
+	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_ERR("Do not support chips others than E2 and newer\n");
+		BUG();
+	} else {
+		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
+		vlan_obj->check_del         = bnx2x_check_vlan_del;
+		vlan_obj->check_add         = bnx2x_check_vlan_add;
+		vlan_obj->check_move        = bnx2x_check_move;
+		vlan_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+		vlan_obj->get_n_elements    = bnx2x_get_n_elements;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan);
+	}
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/* CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+}
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+			struct tstorm_eth_mac_filter_config *mac_filters,
+			u16 pf_id)
+{
+	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+				 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* update the bp MAC filter structure */
+	u32 mask = (1 << p->cl_id);
+
+	struct tstorm_eth_mac_filter_config *mac_filters =
+		(struct tstorm_eth_mac_filter_config *)p->rdata;
+
+	/* initial setting is drop-all */
+	u8 drop_all_ucast = 1, drop_all_mcast = 1;
+	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+	u8 unmatched_unicast = 0;
+
+    /* In e1x there we only take into account rx accept flag since tx switching
+     * isn't enabled. */
+	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+		/* accept matched ucast */
+		drop_all_ucast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+		/* accept matched mcast */
+		drop_all_mcast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_ucast = 0;
+		accp_all_ucast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_mcast = 0;
+		accp_all_mcast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+		/* accept (all) bcast */
+		accp_all_bcast = 1;
+	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+		/* accept unmatched unicasts */
+		unmatched_unicast = 1;
+
+	mac_filters->ucast_drop_all = drop_all_ucast ?
+		mac_filters->ucast_drop_all | mask :
+		mac_filters->ucast_drop_all & ~mask;
+
+	mac_filters->mcast_drop_all = drop_all_mcast ?
+		mac_filters->mcast_drop_all | mask :
+		mac_filters->mcast_drop_all & ~mask;
+
+	mac_filters->ucast_accept_all = accp_all_ucast ?
+		mac_filters->ucast_accept_all | mask :
+		mac_filters->ucast_accept_all & ~mask;
+
+	mac_filters->mcast_accept_all = accp_all_mcast ?
+		mac_filters->mcast_accept_all | mask :
+		mac_filters->mcast_accept_all & ~mask;
+
+	mac_filters->bcast_accept_all = accp_all_bcast ?
+		mac_filters->bcast_accept_all | mask :
+		mac_filters->bcast_accept_all & ~mask;
+
+	mac_filters->unmatched_unicast = unmatched_unicast ?
+		mac_filters->unmatched_unicast | mask :
+		mac_filters->unmatched_unicast & ~mask;
+
+	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
+	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
+	   mac_filters->bcast_accept_all);
+
+	/* write the MAC filter structure*/
+	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+	/* The operation is completed */
+	clear_bit(p->state, p->pstate);
+	smp_mb__after_atomic();
+
+	return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+				struct eth_classify_header *hdr,
+				u8 rule_cnt)
+{
+	hdr->echo = cpu_to_le32(cid);
+	hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+				unsigned long *accept_flags,
+				struct eth_filter_rules_cmd *cmd,
+				bool clear_accept_all)
+{
+	u16 state;
+
+	/* start with 'drop-all' */
+	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+	if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+	if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
+		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+	}
+
+	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
+		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+	}
+
+	if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
+		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+	if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+	}
+
+	if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
+		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
+	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+	if (clear_accept_all) {
+		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+	}
+
+	cmd->state = cpu_to_le16(state);
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+				struct bnx2x_rx_mode_ramrod_params *p)
+{
+	struct eth_filter_rules_ramrod_data *data = p->rdata;
+	int rc;
+	u8 rule_idx = 0;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* Setup ramrod data */
+
+	/* Tx (internal switching) */
+	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_TX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+					       &(data->rules[rule_idx++]),
+					       false);
+	}
+
+	/* Rx */
+	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_RX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+					       &(data->rules[rule_idx++]),
+					       false);
+	}
+
+	/* If FCoE Queue configuration has been requested configure the Rx and
+	 * internal switching modes for this queue in separate rules.
+	 *
+	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+	 */
+	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+		/*  Tx (internal switching) */
+		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_TX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+						       &(data->rules[rule_idx]),
+						       true);
+			rule_idx++;
+		}
+
+		/* Rx */
+		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_RX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+						       &(data->rules[rule_idx]),
+						       true);
+			rule_idx++;
+		}
+	}
+
+	/* Set the ramrod header (most importantly - number of rules to
+	 * configure).
+	 */
+	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
+			 data->header.rule_cnt, p->rx_accept_flags,
+			 p->tx_accept_flags);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+			   U64_HI(p->rdata_mapping),
+			   U64_LO(p->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+	if (rc)
+		return rc;
+
+	/* Ramrod completion is pending */
+	return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+				      struct bnx2x_rx_mode_ramrod_params *p)
+{
+	return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+				    struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* Do nothing */
+	return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	int rc;
+
+	/* Configure the new classification in the chip */
+	rc = p->rx_mode_obj->config_rx_mode(bp, p);
+	if (rc < 0)
+		return rc;
+
+	/* Wait for a ramrod completion if was requested */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		rc = p->rx_mode_obj->wait_comp(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o)
+{
+	if (CHIP_IS_E1x(bp)) {
+		o->wait_comp      = bnx2x_empty_rx_mode_wait;
+		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+	} else {
+		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+		o->config_rx_mode = bnx2x_set_rx_mode_e2;
+	}
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+	struct list_head link;
+	u8 mac[ETH_ALEN];
+	u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_pending_mcast_cmd {
+	struct list_head link;
+	int type; /* BNX2X_MCAST_CMD_X */
+	union {
+		struct list_head macs_head;
+		u32 macs_num; /* Needed for DEL command */
+		int next_bin; /* Needed for RESTORE flow with aprox match */
+	} data;
+
+	bool done; /* set to true, when the command has been handled,
+		    * practically used in 57712 handling only, where one pending
+		    * command may be handled in a few operations. As long as for
+		    * other chips every operation handling is completed in a
+		    * single ramrod, there is no need to utilize this field.
+		    */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+			    struct bnx2x_mcast_obj *o)
+{
+	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+			o->raw.wait_comp(bp, &o->raw))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+				   struct bnx2x_mcast_obj *o,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   enum bnx2x_mcast_cmd cmd)
+{
+	int total_sz;
+	struct bnx2x_pending_mcast_cmd *new_cmd;
+	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
+	struct bnx2x_mcast_list_elem *pos;
+	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
+			     p->mcast_list_len : 0);
+
+	/* If the command is empty ("handle pending commands only"), break */
+	if (!p->mcast_list_len)
+		return 0;
+
+	total_sz = sizeof(*new_cmd) +
+		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
+
+	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+
+	if (!new_cmd)
+		return -ENOMEM;
+
+	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+	   cmd, macs_list_len);
+
+	INIT_LIST_HEAD(&new_cmd->data.macs_head);
+
+	new_cmd->type = cmd;
+	new_cmd->done = false;
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		cur_mac = (struct bnx2x_mcast_mac_elem *)
+			  ((u8 *)new_cmd + sizeof(*new_cmd));
+
+		/* Push the MACs of the current command into the pending command
+		 * MACs list: FIFO
+		 */
+		list_for_each_entry(pos, &p->mcast_list, link) {
+			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
+			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
+			cur_mac++;
+		}
+
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		new_cmd->data.macs_num = p->mcast_list_len;
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		new_cmd->data.next_bin = 0;
+		break;
+
+	default:
+		kfree(new_cmd);
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Push the new pending command to the tail of the pending list: FIFO */
+	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+	o->set_sched(o);
+
+	return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last:	index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+		if (o->registry.aprox_match.vec[i])
+			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+						       vec, cur_bit)) {
+					return cur_bit;
+				}
+			}
+		inner_start = 0;
+	}
+
+	/* None found */
+	return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+	if (cur_bit >= 0)
+		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+	return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+	u8 func_id = r->func_id;
+	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+	int bin;
+
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+	/* Get a bin and update a bins' vector */
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		/* If there were no more bins to clear
+		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+		 * clear any (0xff) bin.
+		 * See bnx2x_mcast_validate_e2() for explanation when it may
+		 * happen.
+		 */
+		bin = bnx2x_mcast_clear_first_bin(o);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		bin = cfg_data->bin;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return;
+	}
+
+	DP(BNX2X_MSG_SP, "%s bin %d\n",
+			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+			 "Setting"  : "Clearing"), bin);
+
+	data->rules[idx].bin_id    = (u8)bin;
+	data->rules[idx].func_id   = func_id;
+	data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_bin:	index in the registry to start from (including)
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+	int *rdata_idx)
+{
+	int cur_bin, cnt = *rdata_idx;
+	union bnx2x_mcast_config_data cfg_data = {NULL};
+
+	/* go through the registry and configure the bins from it */
+	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+		cfg_data.bin = (u8)cur_bin;
+		o->set_one_rule(bp, o, cnt, &cfg_data,
+				BNX2X_MCAST_CMD_RESTORE);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*rdata_idx = cnt;
+
+	return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+	int cnt = *line_idx;
+	union bnx2x_mcast_config_data cfg_data = {NULL};
+
+	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+				 link) {
+
+		cfg_data.mac = &pmac_pos->mac[0];
+		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+		   pmac_pos->mac);
+
+		list_del(&pmac_pos->link);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* if no more MACs to configure - we are done */
+	if (list_empty(&cmd_pos->data.macs_head))
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	int cnt = *line_idx;
+
+	while (cmd_pos->data.macs_num) {
+		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+		cnt++;
+
+		cmd_pos->data.macs_num--;
+
+		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+				   cmd_pos->data.macs_num, cnt);
+
+		/* Break if we reached the maximum
+		 * number of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* If we cleared all bins - we are done */
+	if (!cmd_pos->data.macs_num)
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+						line_idx);
+
+	if (cmd_pos->data.next_bin < 0)
+		/* If o->set_restore returned -1 we are done */
+		cmd_pos->done = true;
+	else
+		/* Start from the next bin next time */
+		cmd_pos->data.next_bin++;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+	int cnt = 0;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+				 link) {
+		switch (cmd_pos->type) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+							   &cnt);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+			return -EINVAL;
+		}
+
+		/* If the command has been completed - remove it from the list
+		 * and free the memory
+		 */
+		if (cmd_pos->done) {
+			list_del(&cmd_pos->link);
+			kfree(cmd_pos);
+		}
+
+		/* Break if we reached the maximum number of rules */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	union bnx2x_mcast_config_data cfg_data = {NULL};
+	int cnt = *line_idx;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		cfg_data.mac = mlist_pos->mac;
+		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+		   mlist_pos->mac);
+	}
+
+	*line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	int cnt = *line_idx, i;
+
+	for (i = 0; i < p->mcast_list_len; i++) {
+		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+				 p->mcast_list_len - i - 1);
+	}
+
+	*line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd -
+ *
+ * @bp:		device handle
+ * @p:
+ * @cmd:
+ * @start_cnt:	first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p,
+			enum bnx2x_mcast_cmd cmd,
+			int start_cnt)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int cnt = start_cnt;
+
+	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* The current command has been handled */
+	p->mcast_list_len = 0;
+
+	return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		/* Here we set the approximate amount of work to do, which in
+		 * fact may be only less as some MACs in postponed ADD
+		 * command(s) scheduled before this command may fall into
+		 * the same bin and the actual number of bins set in the
+		 * registry would be less than we estimated here. See
+		 * bnx2x_mcast_set_one_rule_e2() for further details.
+		 */
+		p->mcast_list_len = reg_sz;
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Here we assume that all new MACs will fall into new bins.
+		 * However we will correct the real registry size after we
+		 * handle all pending commands.
+		 */
+		o->set_registry_size(o, reg_sz + p->mcast_list_len);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Increase the total number of MACs pending to be configured */
+	o->total_pending_num += p->mcast_list_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_bins)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_bins);
+	o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+	data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+					(BNX2X_FILTER_MCAST_PENDING <<
+					 BNX2X_SWCID_SHIFT));
+	data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp:		device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	int i, cnt = 0;
+	u64 elem;
+
+	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+		elem = o->registry.aprox_match.vec[i];
+		for (; elem; cnt++)
+			elem &= elem - 1;
+	}
+
+	o->set_registry_size(o, cnt);
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+	int cnt = 0, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there was enough room in ramrod
+	 * data for all pending commands and for the current
+	 * command. Otherwise the current command would have been added
+	 * to the pending commands and p->mcast_list_len would have been
+	 * zeroed.
+	 */
+	if (p->mcast_list_len > 0)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+	/* We've pulled out some MACs - update the total number of
+	 * outstanding.
+	 */
+	o->total_pending_num -= cnt;
+
+	/* send a ramrod */
+	WARN_ON(o->total_pending_num < 0);
+	WARN_ON(cnt > o->max_cmd_len);
+
+	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+	/* Update a registry size if there are no more pending operations.
+	 *
+	 * We don't want to change the value of the registry size if there are
+	 * pending operations because we want it to always be equal to the
+	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+	 * set bins after the last requested operation in order to properly
+	 * evaluate the size of the next DEL/RESTORE operation.
+	 *
+	 * Note that we update the registry itself during command(s) handling
+	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+	 * with a limited amount of update commands (per MAC/bin) and we don't
+	 * know in this scope what the actual state of bins configuration is
+	 * going to be after this ramrod.
+	 */
+	if (!o->total_pending_num)
+		bnx2x_mcast_refresh_registry_e2(bp, o);
+
+	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/* No need for an explicit memory barrier here as long as we
+		 * ensure the ordering of writing to the SPQ element
+		 * and updating of the SPQ producer which involves a memory
+		 * read. If the memory read is removed we will have to put a
+		 * full memory barrier there (inside bnx2x_sp_post()).
+		 */
+
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+				   raw->cid, U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+				    struct bnx2x_mcast_ramrod_params *p,
+				    enum bnx2x_mcast_cmd cmd)
+{
+	/* Mark, that there is a work to do */
+	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		p->mcast_list_len = 1;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+				       struct bnx2x_mcast_ramrod_params *p,
+				       int old_num_bins)
+{
+	/* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+					   struct bnx2x_mcast_obj *o,
+					   struct bnx2x_mcast_ramrod_params *p,
+					   u32 *mc_filter)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	int bit;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
+		   mlist_pos->mac, bit);
+
+		/* bookkeeping... */
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+				  bit);
+	}
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	u32 *mc_filter)
+{
+	int bit;
+
+	for (bit = bnx2x_mcast_get_next_bin(o, 0);
+	     bit >= 0;
+	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+	}
+}
+
+/* On 57711 we write the multicast MACs' approximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+				 struct bnx2x_mcast_ramrod_params *p,
+				 enum bnx2x_mcast_cmd cmd)
+{
+	int i;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* If CLEAR_ONLY has been requested - clear the registry
+	 * and clear a pending bit.
+	 */
+	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		u32 mc_filter[MC_HASH_SIZE] = {0};
+
+		/* Set the multicast filter bits before writing it into
+		 * the internal memory.
+		 */
+		switch (cmd) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			DP(BNX2X_MSG_SP,
+			   "Invalidating multicast MACs configuration\n");
+
+			/* clear the registry */
+			memset(o->registry.aprox_match.vec, 0,
+			       sizeof(o->registry.aprox_match.vec));
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd);
+			return -EINVAL;
+		}
+
+		/* Set the mcast filter in the internal memory */
+		for (i = 0; i < MC_HASH_SIZE; i++)
+			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+	} else
+		/* clear the registry */
+		memset(o->registry.aprox_match.vec, 0,
+		       sizeof(o->registry.aprox_match.vec));
+
+	/* We are done */
+	r->clear_pending(r);
+
+	return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		p->mcast_list_len = reg_sz;
+		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+				   cmd, p->mcast_list_len);
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Multicast MACs on 57710 are configured as unicast MACs and
+		 * there is only a limited number of CAM entries for that
+		 * matter.
+		 */
+		if (p->mcast_list_len > o->max_cmd_len) {
+			BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
+				  o->max_cmd_len);
+			return -EINVAL;
+		}
+		/* Every configured MAC should be cleared if DEL command is
+		 * called. Only the last ADD command is relevant as long as
+		 * every ADD commands overrides the previous configuration.
+		 */
+		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+		if (p->mcast_list_len > 0)
+			o->set_registry_size(o, p->mcast_list_len);
+
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* We want to ensure that commands are executed one by one for 57710.
+	 * Therefore each none-empty command will consume o->max_cmd_len.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num += o->max_cmd_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_macs)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_macs);
+
+	/* If current command hasn't been handled yet and we are
+	 * here means that it's meant to be dropped and we have to
+	 * update the number of outstanding MACs accordingly.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	/* copy mac */
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+				      &data->config_table[idx].middle_mac_addr,
+				      &data->config_table[idx].lsb_mac_addr,
+				      cfg_data->mac);
+
+		data->config_table[idx].vlan_id = 0;
+		data->config_table[idx].pf_id = r->func_id;
+		data->config_table[idx].clients_bit_vector =
+			cpu_to_le32(1 << r->cl_id);
+
+		SET_FLAG(data->config_table[idx].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+	}
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+	data->hdr.offset = offset;
+	data->hdr.client_id = cpu_to_le16(0xff);
+	data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+				     (BNX2X_FILTER_MCAST_PENDING <<
+				      BNX2X_SWCID_SHIFT));
+	data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_idx:	index in the registry to start from
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+	int *rdata_idx)
+{
+	struct bnx2x_mcast_mac_elem *elem;
+	int i = 0;
+	union bnx2x_mcast_config_data cfg_data = {NULL};
+
+	/* go through the registry and configure the MACs from it. */
+	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+		cfg_data.mac = &elem->mac[0];
+		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+		i++;
+
+		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+		     cfg_data.mac);
+	}
+
+	*rdata_idx = i;
+
+	return -1;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos;
+	struct bnx2x_mcast_mac_elem *pmac_pos;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	union bnx2x_mcast_config_data cfg_data = {NULL};
+	int cnt = 0;
+
+	/* If nothing to be done - return */
+	if (list_empty(&o->pending_cmds_head))
+		return 0;
+
+	/* Handle the first command */
+	cmd_pos = list_first_entry(&o->pending_cmds_head,
+				   struct bnx2x_pending_mcast_cmd, link);
+
+	switch (cmd_pos->type) {
+	case BNX2X_MCAST_CMD_ADD:
+		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+			cfg_data.mac = &pmac_pos->mac[0];
+			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+			cnt++;
+
+			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+			   pmac_pos->mac);
+		}
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		cnt = cmd_pos->data.macs_num;
+		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+		return -EINVAL;
+	}
+
+	list_del(&cmd_pos->link);
+	kfree(cmd_pos);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi:
+ * @fw_mid:
+ * @fw_lo:
+ * @mac:
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+					 __le16 *fw_lo, u8 *mac)
+{
+	mac[1] = ((u8 *)fw_hi)[0];
+	mac[0] = ((u8 *)fw_hi)[1];
+	mac[3] = ((u8 *)fw_mid)[0];
+	mac[2] = ((u8 *)fw_mid)[1];
+	mac[5] = ((u8 *)fw_lo)[0];
+	mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp:		device handle
+ * @cnt:
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct bnx2x_mcast_mac_elem *elem;
+	struct mac_configuration_cmd *data =
+			(struct mac_configuration_cmd *)(raw->rdata);
+
+	/* If first entry contains a SET bit - the command was ADD,
+	 * otherwise - DEL_ALL
+	 */
+	if (GET_FLAG(data->config_table[0].flags,
+			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+		int i, len = data->hdr.length;
+
+		/* Break if it was a RESTORE command */
+		if (!list_empty(&o->registry.exact_match.macs))
+			return 0;
+
+		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
+		if (!elem) {
+			BNX2X_ERR("Failed to allocate registry memory\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < len; i++, elem++) {
+			bnx2x_get_fw_mac_addr(
+				&data->config_table[i].msb_mac_addr,
+				&data->config_table[i].middle_mac_addr,
+				&data->config_table[i].lsb_mac_addr,
+				elem->mac);
+			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
+			   elem->mac);
+			list_add_tail(&elem->link,
+				      &o->registry.exact_match.macs);
+		}
+	} else {
+		elem = list_first_entry(&o->registry.exact_match.macs,
+					struct bnx2x_mcast_mac_elem, link);
+		DP(BNX2X_MSG_SP, "Deleting a registry\n");
+		kfree(elem);
+		INIT_LIST_HEAD(&o->registry.exact_match.macs);
+	}
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	int cnt = 0, i, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* First set all entries as invalid */
+	for (i = 0; i < o->max_cmd_len ; i++)
+		SET_FLAG(data->config_table[i].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+
+	/* Handle pending commands first */
+	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there were no pending commands */
+	if (!cnt)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+	/* For 57710 every command has o->max_cmd_len length to ensure that
+	 * commands are done one at a time.
+	 */
+	o->total_pending_num -= o->max_cmd_len;
+
+	/* send a ramrod */
+
+	WARN_ON(cnt > o->max_cmd_len);
+
+	/* Set ramrod header (in particular, a number of entries to update) */
+	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+	/* update a registry: we need the registry contents to be always up
+	 * to date in order to be able to execute a RESTORE opcode. Here
+	 * we use the fact that for 57710 we sent one command at a time
+	 * hence we may take the registry update out of the command handling
+	 * and do it in a simpler way here.
+	 */
+	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+	if (rc)
+		return rc;
+
+	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/* No need for an explicit memory barrier here as long as we
+		 * ensure the ordering of writing to the SPQ element
+		 * and updating of the SPQ producer which involves a memory
+		 * read. If the memory read is removed we will have to put a
+		 * full memory barrier there (inside bnx2x_sp_post()).
+		 */
+
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+				   U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       enum bnx2x_mcast_cmd cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc = 0, old_reg_size;
+
+	/* This is needed to recover number of currently configured mcast macs
+	 * in case of failure.
+	 */
+	old_reg_size = o->get_registry_size(o);
+
+	/* Do some calculations and checks */
+	rc = o->validate(bp, p, cmd);
+	if (rc)
+		return rc;
+
+	/* Return if there is no work to do */
+	if ((!p->mcast_list_len) && (!o->check_sched(o)))
+		return 0;
+
+	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
+	   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
+
+	/* Enqueue the current command to the pending list if we can't complete
+	 * it in the current iteration
+	 */
+	if (r->check_pending(r) ||
+	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+		if (rc < 0)
+			goto error_exit1;
+
+		/* As long as the current command is in a command list we
+		 * don't need to handle it separately.
+		 */
+		p->mcast_list_len = 0;
+	}
+
+	if (!r->check_pending(r)) {
+
+		/* Set 'pending' state */
+		r->set_pending(r);
+
+		/* Configure the new classification in the chip */
+		rc = o->config_mcast(bp, p, cmd);
+		if (rc < 0)
+			goto error_exit2;
+
+		/* Wait for a ramrod completion if was requested */
+		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+			rc = o->wait_comp(bp, o);
+	}
+
+	return rc;
+
+error_exit2:
+	r->clear_pending(r);
+
+error_exit1:
+	o->revert(bp, p, old_reg_size);
+
+	return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_atomic();
+	clear_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_atomic();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_atomic();
+	set_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_atomic();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+	return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+	return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+	memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+			   rdata, rdata_mapping, state, pstate, type);
+
+	mcast_obj->engine_id = engine_id;
+
+	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+	mcast_obj->check_sched = bnx2x_mcast_check_sched;
+	mcast_obj->set_sched = bnx2x_mcast_set_sched;
+	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+	if (CHIP_IS_E1(bp)) {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e1;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+
+		if (CHIP_REV_IS_SLOW(bp))
+			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+		else
+			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_exact;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_exact;
+
+		/* 57710 is the only chip that uses the exact match for mcast
+		 * at the moment.
+		 */
+		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
+		mcast_obj->enqueue_cmd   = NULL;
+		mcast_obj->hdl_restore   = NULL;
+		mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+		/* 57711 doesn't send a ramrod, so it has unlimited credit
+		 * for one command.
+		 */
+		mcast_obj->max_cmd_len       = -1;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = NULL;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	} else {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e2;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+		/* TODO: There should be a proper HSI define for this number!!!
+		 */
+		mcast_obj->max_cmd_len       = 16;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
+		mcast_obj->validate          = bnx2x_mcast_validate_e2;
+		mcast_obj->revert            = bnx2x_mcast_revert_e2;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	}
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to add to v...
+ * @u:	...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c + a >= u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c + a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to dec from v...
+ * @u:	...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c - a < u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c - a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+	smp_mb();
+
+	return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+
+	/* Don't let to refill if credit + cnt > pool_sz */
+	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+	smp_mb();
+
+	return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+	int cur_credit;
+
+	smp_mb();
+	cur_credit = atomic_read(&o->credit);
+
+	return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+					  int cnt)
+{
+	return true;
+}
+
+static bool bnx2x_credit_pool_get_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	int idx, vec, i;
+
+	*offset = -1;
+
+	/* Find "internal cam-offset" then add to base for this object... */
+	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+		/* Skip the current vector if there are no free entries in it */
+		if (!o->pool_mirror[vec])
+			continue;
+
+		/* If we've got here we are going to find a free entry */
+		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
+		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+				/* Got one!! */
+				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+				*offset = o->base_pool_offset + idx;
+				return true;
+			}
+	}
+
+	return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	if (offset < o->base_pool_offset)
+		return false;
+
+	offset -= o->base_pool_offset;
+
+	if (offset >= o->pool_sz)
+		return false;
+
+	/* Return the entry to the pool */
+	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	*offset = -1;
+	return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base:	Base entry in the CAM to use.
+ * @credit:	pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit)
+{
+	/* Zero the object first */
+	memset(p, 0, sizeof(*p));
+
+	/* Set the table to all 1s */
+	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+	/* Init a pool as full */
+	atomic_set(&p->credit, credit);
+
+	/* The total poll size */
+	p->pool_sz = credit;
+
+	p->base_pool_offset = base;
+
+	/* Commit the change */
+	smp_mb();
+
+	p->check = bnx2x_credit_pool_check;
+
+	/* if pool credit is negative - disable the checks */
+	if (credit >= 0) {
+		p->put      = bnx2x_credit_pool_put;
+		p->get      = bnx2x_credit_pool_get;
+		p->put_entry = bnx2x_credit_pool_put_entry;
+		p->get_entry = bnx2x_credit_pool_get_entry;
+	} else {
+		p->put      = bnx2x_credit_pool_always_true;
+		p->get      = bnx2x_credit_pool_always_true;
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+
+	/* If base is negative - disable entries handling */
+	if (base < 0) {
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+	int cam_sz;
+
+	if (CHIP_IS_E1(bp)) {
+		/* In E1, Multicast is saved in cam... */
+		if (!CHIP_REV_IS_SLOW(bp))
+			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+		else
+			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		/* CAM credit is equaly divided between all active functions
+		 * on the PORT!.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	} else {
+
+		/* CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if (func_num > 0) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+			/* No need for CAM entries handling for 57712 and
+			 * newer.
+			 */
+			bnx2x_init_credit_pool(p, -1, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+	}
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p,
+				 u8 func_id,
+				 u8 func_num)
+{
+	if (CHIP_IS_E1x(bp)) {
+		/* There is no VLAN credit in HW on 57710 and 57711 only
+		 * MAC / MAC-VLAN can be set
+		 */
+		bnx2x_init_credit_pool(p, 0, -1);
+	} else {
+		/* CAM credit is equally divided between all active functions
+		 * on the PATH.
+		 */
+		if (func_num > 0) {
+			int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+			bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
+		} else
+			/* this should never happen! Block VLAN operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+	}
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp:		driver handle
+ * @p:		pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+					struct bnx2x_config_rss_params *p)
+{
+	int i;
+
+	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+	DP(BNX2X_MSG_SP, "0x0000: ");
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+		/* Print 4 bytes in a line */
+		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+		    (((i + 1) & 0x3) == 0)) {
+			DP_CONT(BNX2X_MSG_SP, "\n");
+			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+		}
+	}
+
+	DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp:		device handle
+ * @p:		rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+			   struct bnx2x_config_rss_params *p)
+{
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_rss_update_ramrod_data *data =
+		(struct eth_rss_update_ramrod_data *)(r->rdata);
+	u16 caps = 0;
+	u8 rss_mode = 0;
+	int rc;
+
+	memset(data, 0, sizeof(*data));
+
+	DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+	/* Set an echo field */
+	data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+				 (r->state << BNX2X_SWCID_SHIFT));
+
+	/* RSS mode */
+	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_DISABLED;
+	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_REGULAR;
+
+	data->rss_mode = rss_mode;
+
+	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+	/* RSS capabilities */
+	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
+
+	/* RSS keys */
+	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+		u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
+		const u8 *src = (const u8 *)p->rss_key;
+		int i;
+
+		/* Apparently, bnx2x reads this array in reverse order
+		 * We need to byte swap rss_key to comply with Toeplitz specs.
+		 */
+		for (i = 0; i < sizeof(data->rss_key); i++)
+			*--dst = *src++;
+
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+	}
+
+	data->capabilities = cpu_to_le16(caps);
+
+	/* Hashing mask */
+	data->rss_result_mask = p->rss_result_mask;
+
+	/* RSS engine ID */
+	data->rss_engine_id = o->engine_id;
+
+	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+	/* Indirection table */
+	memcpy(data->indirection_table, p->ind_table,
+		  T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Remember the last configuration */
+	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Print the indirection table */
+	if (netif_msg_ifup(bp))
+		bnx2x_debug_print_ind_table(bp, p);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+			   U64_HI(r->rdata_mapping),
+			   U64_LO(r->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+
+	if (rc < 0)
+		return rc;
+
+	return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table)
+{
+	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p)
+{
+	int rc;
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* Do nothing if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+		   p->ramrod_flags);
+		return 0;
+	}
+
+	r->set_pending(r);
+
+	rc = o->config_rss(bp, p);
+	if (rc < 0) {
+		r->clear_pending(r);
+		return rc;
+	}
+
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+		rc = r->wait_comp(bp, r);
+
+	return rc;
+}
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type)
+{
+	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+			   rdata_mapping, state, pstate, type);
+
+	rss_obj->engine_id  = engine_id;
+	rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	int rc, pending_bit;
+	unsigned long *pending = &o->pending;
+
+	/* Check that the requested transition is legal */
+	rc = o->check_transition(bp, o, params);
+	if (rc) {
+		BNX2X_ERR("check transition returned an error. rc %d\n", rc);
+		return -EINVAL;
+	}
+
+	/* Set "pending" bit */
+	DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
+	pending_bit = o->set_pending(o, params);
+	DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+		o->complete_cmd(bp, o, pending_bit);
+	else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+		if (rc) {
+			o->next_state = BNX2X_Q_STATE_MAX;
+			clear_bit(pending_bit, pending);
+			smp_mb__after_atomic();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, pending_bit);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(pending_bit, pending);
+}
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+				   struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+	/* ACTIVATE and DEACTIVATE commands are implemented on top of
+	 * UPDATE command.
+	 */
+	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
+		bit = BNX2X_Q_CMD_UPDATE;
+	else
+		bit = cmd;
+
+	set_bit(bit, &obj->pending);
+	return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+				 struct bnx2x_queue_sp_obj *o,
+				 enum bnx2x_queue_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				enum bnx2x_queue_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
+			  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	if (o->next_tx_only >= o->max_cos)
+		/* >= because tx only must always be smaller than cos since the
+		 * primary connection supports COS 0
+		 */
+		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
+			   o->next_tx_only, o->max_cos);
+
+	DP(BNX2X_MSG_SP,
+	   "Completing command %d for queue %d, setting state to %d\n",
+	   cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+
+	if (o->next_tx_only)  /* print num tx-only if any exist */
+		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
+		   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+
+	o->state = o->next_state;
+	o->num_tx_only = o->next_tx_only;
+	o->next_state = BNX2X_Q_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_atomic();
+
+	return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+	/* Rx data */
+
+	/* IPv6 TPA supported for E2 and above only */
+	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_general_setup_params *params,
+				struct client_init_general_data *gen_data,
+				unsigned long *flags)
+{
+	gen_data->client_id = o->cl_id;
+
+	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
+		gen_data->statistics_counter_id =
+					params->stat_id;
+		gen_data->statistics_en_flg = 1;
+		gen_data->statistics_zero_flg =
+			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
+	} else
+		gen_data->statistics_counter_id =
+					DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
+	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
+	gen_data->sp_client_id = params->spcl_id;
+	gen_data->mtu = cpu_to_le16(params->mtu);
+	gen_data->func_id = o->func_id;
+
+	gen_data->cos = params->cos;
+
+	gen_data->traffic_type =
+		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
+		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+	gen_data->fp_hsi_ver = params->fp_hsi;
+
+	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
+	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
+}
+
+static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_txq_setup_params *params,
+				struct client_init_tx_data *tx_data,
+				unsigned long *flags)
+{
+	tx_data->enforce_security_flg =
+		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
+	tx_data->default_vlan =
+		cpu_to_le16(params->default_vlan);
+	tx_data->default_vlan_flg =
+		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
+	tx_data->tx_switching_flg =
+		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
+	tx_data->anti_spoofing_flg =
+		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+	tx_data->force_default_pri_flg =
+		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+	tx_data->refuse_outband_vlan_flg =
+		test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
+	tx_data->tunnel_lso_inc_ip_id =
+		test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+	tx_data->tunnel_non_lso_pcsum_location =
+		test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
+							    CSUM_ON_BD;
+
+	tx_data->tx_status_block_id = params->fw_sb_id;
+	tx_data->tx_sb_index_number = params->sb_cq_index;
+	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
+
+	tx_data->tx_bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->dscr_map));
+	tx_data->tx_bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->dscr_map));
+
+	/* Don't configure any Tx switching mode during queue SETUP */
+	tx_data->state = 0;
+}
+
+static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
+				struct rxq_pause_params *params,
+				struct client_init_rx_data *rx_data)
+{
+	/* flow control data */
+	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
+	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
+	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
+	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
+	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
+	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
+	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
+}
+
+static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_rxq_setup_params *params,
+				struct client_init_rx_data *rx_data,
+				unsigned long *flags)
+{
+	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+	rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
+				CLIENT_INIT_RX_DATA_TPA_MODE;
+	rx_data->vmqueue_mode_en_flg = 0;
+
+	rx_data->cache_line_alignment_log_size =
+		params->cache_line_log;
+	rx_data->enable_dynamic_hc =
+		test_bit(BNX2X_Q_FLG_DHC, flags);
+	rx_data->max_sges_for_packet = params->max_sges_pkt;
+	rx_data->client_qzone_id = params->cl_qzone_id;
+	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
+
+	/* Always start in DROP_ALL mode */
+	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+	/* We don't set drop flags */
+	rx_data->drop_ip_cs_err_flg = 0;
+	rx_data->drop_tcp_cs_err_flg = 0;
+	rx_data->drop_ttl0_flg = 0;
+	rx_data->drop_udp_cs_err_flg = 0;
+	rx_data->inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_VLAN, flags);
+	rx_data->outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_OV, flags);
+	rx_data->status_block_id = params->fw_sb_id;
+	rx_data->rx_sb_index_number = params->sb_cq_index;
+	rx_data->max_tpa_queues = params->max_tpa_queues;
+	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
+	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
+	rx_data->bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->dscr_map));
+	rx_data->bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->dscr_map));
+	rx_data->sge_page_base.lo =
+		cpu_to_le32(U64_LO(params->sge_map));
+	rx_data->sge_page_base.hi =
+		cpu_to_le32(U64_HI(params->sge_map));
+	rx_data->cqe_page_base.lo =
+		cpu_to_le32(U64_LO(params->rcq_map));
+	rx_data->cqe_page_base.hi =
+		cpu_to_le32(U64_HI(params->rcq_map));
+	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
+
+	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
+		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
+		rx_data->is_approx_mcast = 1;
+	}
+
+	rx_data->rss_engine_id = params->rss_engine_id;
+
+	/* silent vlan removal */
+	rx_data->silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
+	rx_data->silent_vlan_value =
+		cpu_to_le16(params->silent_removal_value);
+	rx_data->silent_vlan_mask =
+		cpu_to_le16(params->silent_removal_mask);
+}
+
+/* initialize the general, tx and rx parts of a queue object */
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+				       &cmd_params->params.setup.gen_params,
+				       &data->general,
+				       &cmd_params->params.setup.flags);
+
+	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+				  &cmd_params->params.setup.txq_params,
+				  &data->tx,
+				  &cmd_params->params.setup.flags);
+
+	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
+				  &cmd_params->params.setup.rxq_params,
+				  &data->rx,
+				  &cmd_params->params.setup.flags);
+
+	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
+				     &cmd_params->params.setup.pause_params,
+				     &data->rx);
+}
+
+/* initialize the general and tx parts of a tx-only queue object */
+static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct tx_queue_init_ramrod_data *data)
+{
+	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+				       &cmd_params->params.tx_only.gen_params,
+				       &data->general,
+				       &cmd_params->params.tx_only.flags);
+
+	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+				  &cmd_params->params.tx_only.txq_params,
+				  &data->tx,
+				  &cmd_params->params.tx_only.flags);
+
+	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
+			 cmd_params->q_obj->cids[0],
+			 data->tx.tx_bd_page_base.lo,
+			 data->tx.tx_bd_page_base.hi);
+}
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ *      - HC: Rx and Tx
+ *      - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+			       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct bnx2x_queue_init_params *init = &params->params.init;
+	u16 hc_usec;
+	u8 cos;
+
+	/* Tx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+			init->tx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+			hc_usec);
+	}
+
+	/* Rx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+			init->rx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+			hc_usec);
+	}
+
+	/* Set CDU context validation values */
+	for (cos = 0; cos < o->max_cos; cos++) {
+		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
+				 o->cids[cos], cos);
+		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
+		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
+	}
+
+	/* As no ramrod is sent, complete the command immediately  */
+	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+	mmiowb();
+	smp_mb();
+
+	return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
+				  struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct tx_queue_init_ramrod_data *rdata =
+		(struct tx_queue_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
+	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+		&params->params.tx_only;
+	u8 cid_index = tx_only_params->cid_index;
+
+	if (cid_index >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_index);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
+			 tx_only_params->gen_params.cos,
+			 tx_only_params->gen_params.spcl_id);
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
+
+	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
+			 o->cids[cid_index], rdata->general.client_id,
+			 rdata->general.sp_client_id, rdata->general.cos);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+				     struct bnx2x_queue_sp_obj *obj,
+				     struct bnx2x_queue_update_params *params,
+				     struct client_update_ramrod_data *data)
+{
+	/* Client ID of the client to update */
+	data->client_id = obj->cl_id;
+
+	/* Function ID of the client to update */
+	data->func_id = obj->func_id;
+
+	/* Default VLAN value */
+	data->default_vlan = cpu_to_le16(params->def_vlan);
+
+	/* Inner VLAN stripping */
+	data->inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+	data->inner_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Outer VLAN stripping */
+	data->outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+	data->outer_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Drop packets that have source MAC that doesn't belong to this
+	 * Queue.
+	 */
+	data->anti_spoofing_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+	data->anti_spoofing_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+	/* Activate/Deactivate */
+	data->activate_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+	data->activate_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+	/* Enable default VLAN */
+	data->default_vlan_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+	data->default_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			 &params->update_flags);
+
+	/* silent vlan removal */
+	data->silent_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			 &params->update_flags);
+	data->silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+	/* tx switching */
+	data->tx_switching_flg =
+		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+	data->tx_switching_change_flg =
+		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+			 &params->update_flags);
+
+	/* PTP */
+	data->handle_ptp_pkts_flg =
+		test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
+	data->handle_ptp_pkts_change_flg =
+		test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+				      struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_update_ramrod_data *rdata =
+		(struct client_update_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_queue_update_params *update_params =
+		&params->params.update;
+	u8 cid_index = update_params->cid_index;
+
+	if (cid_index >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_index);
+		return -EINVAL;
+	}
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+			     o->cids[cid_index], U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *obj,
+				struct bnx2x_queue_update_tpa_params *params,
+				struct tpa_update_ramrod_data *data)
+{
+	data->client_id = obj->cl_id;
+	data->complete_on_both_clients = params->complete_on_both_clients;
+	data->dont_verify_rings_pause_thr_flg =
+		params->dont_verify_thr;
+	data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+	data->max_sges_for_packet = params->max_sges_pkt;
+	data->max_tpa_queues = params->max_tpa_queues;
+	data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+	data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+	data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+	data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+	data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+	data->tpa_mode = params->tpa_mode;
+	data->update_ipv4 = params->update_ipv4;
+	data->update_ipv6 = params->update_ipv6;
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct tpa_update_ramrod_data *rdata =
+		(struct tpa_update_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_queue_update_tpa_params *update_tpa_params =
+		&params->params.update_tpa;
+	u16 type;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+	/* Add the function id inside the type, so that sp post function
+	 * doesn't automatically add the PF func-id, this is required
+	 * for operations done by PFs on behalf of their VFs
+	 */
+	type = ETH_CONNECTION_TYPE |
+		((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+			     o->cids[BNX2X_PRIMARY_CID_INDEX],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), type);
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
+			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+				       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	u8 cid_idx = params->params.cfc_del.cid_index;
+
+	if (cid_idx >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_idx);
+		return -EINVAL;
+	}
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
+			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	u8 cid_index = params->params.terminate.cid_index;
+
+	if (cid_index >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_index);
+		return -EINVAL;
+	}
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
+			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+				     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
+			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_INIT:
+		return bnx2x_q_init(bp, params);
+	case BNX2X_Q_CMD_SETUP_TX_ONLY:
+		return bnx2x_q_send_setup_tx_only(bp, params);
+	case BNX2X_Q_CMD_DEACTIVATE:
+		return bnx2x_q_send_deactivate(bp, params);
+	case BNX2X_Q_CMD_ACTIVATE:
+		return bnx2x_q_send_activate(bp, params);
+	case BNX2X_Q_CMD_UPDATE:
+		return bnx2x_q_send_update(bp, params);
+	case BNX2X_Q_CMD_UPDATE_TPA:
+		return bnx2x_q_send_update_tpa(bp, params);
+	case BNX2X_Q_CMD_HALT:
+		return bnx2x_q_send_halt(bp, params);
+	case BNX2X_Q_CMD_CFC_DEL:
+		return bnx2x_q_send_cfc_del(bp, params);
+	case BNX2X_Q_CMD_TERMINATE:
+		return bnx2x_q_send_terminate(bp, params);
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_q_send_empty(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e1x(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_SETUP_TX_ONLY:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+				   struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e2(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_SETUP_TX_ONLY:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+				      struct bnx2x_queue_sp_obj *o,
+				      struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+	enum bnx2x_queue_cmd cmd = params->cmd;
+	struct bnx2x_queue_update_params *update_params =
+		 &params->params.update;
+	u8 next_tx_only = o->num_tx_only;
+
+	/* Forget all pending for completion commands if a driver only state
+	 * transition has been requested.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		o->pending = 0;
+		o->next_state = BNX2X_Q_STATE_MAX;
+	}
+
+	/* Don't allow a next state transition if we are in the middle of
+	 * the previous one.
+	 */
+	if (o->pending) {
+		BNX2X_ERR("Blocking transition since pending was %lx\n",
+			  o->pending);
+		return -EBUSY;
+	}
+
+	switch (state) {
+	case BNX2X_Q_STATE_RESET:
+		if (cmd == BNX2X_Q_CMD_INIT)
+			next_state = BNX2X_Q_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_Q_STATE_INITIALIZED:
+		if (cmd == BNX2X_Q_CMD_SETUP) {
+			if (test_bit(BNX2X_Q_FLG_ACTIVE,
+				     &params->params.setup.flags))
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_ACTIVE:
+		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+			next_state = BNX2X_Q_STATE_MULTI_COS;
+			next_tx_only = 1;
+		}
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			/* If "active" state change is requested, update the
+			 *  state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				      &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_INACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_ACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_MULTI_COS:
+		if (cmd == BNX2X_Q_CMD_TERMINATE)
+			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
+
+		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+			next_state = BNX2X_Q_STATE_MULTI_COS;
+			next_tx_only = o->num_tx_only + 1;
+		}
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_MULTI_COS;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			/* If "active" state change is requested, update the
+			 *  state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				      &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_INACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_MULTI_COS;
+		}
+
+		break;
+	case BNX2X_Q_STATE_MCOS_TERMINATED:
+		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
+			next_tx_only = o->num_tx_only - 1;
+			if (next_tx_only == 0)
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_MULTI_COS;
+		}
+
+		break;
+	case BNX2X_Q_STATE_INACTIVE:
+		if (cmd == BNX2X_Q_CMD_ACTIVATE)
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			/* If "active" state change is requested, update the
+			 * state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				     &update_params->update_flags)){
+				if (o->num_tx_only == 0)
+					next_state = BNX2X_Q_STATE_ACTIVE;
+				else /* tx only queues exist for this queue */
+					next_state = BNX2X_Q_STATE_MULTI_COS;
+			} else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_STOPPED:
+		if (cmd == BNX2X_Q_CMD_TERMINATE)
+			next_state = BNX2X_Q_STATE_TERMINATED;
+
+		break;
+	case BNX2X_Q_STATE_TERMINATED:
+		if (cmd == BNX2X_Q_CMD_CFC_DEL)
+			next_state = BNX2X_Q_STATE_RESET;
+
+		break;
+	default:
+		BNX2X_ERR("Illegal state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_Q_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		o->next_tx_only = next_tx_only;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+	return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj,
+			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
+			  void *rdata,
+			  dma_addr_t rdata_mapping, unsigned long type)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
+	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
+
+	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
+	obj->max_cos = cid_cnt;
+	obj->cl_id = cl_id;
+	obj->func_id = func_id;
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+	obj->type = type;
+	obj->next_state = BNX2X_Q_STATE_MAX;
+
+	if (CHIP_IS_E1x(bp))
+		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+	else
+		obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+	obj->check_transition = bnx2x_queue_chk_transition;
+
+	obj->complete_cmd = bnx2x_queue_comp_cmd;
+	obj->wait_comp = bnx2x_queue_wait_comp;
+	obj->set_pending = bnx2x_queue_set_pending;
+}
+
+/* return a queue object's logical state*/
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+			       struct bnx2x_queue_sp_obj *obj)
+{
+	switch (obj->state) {
+	case BNX2X_Q_STATE_ACTIVE:
+	case BNX2X_Q_STATE_MULTI_COS:
+		return BNX2X_Q_LOGICAL_STATE_ACTIVE;
+	case BNX2X_Q_STATE_RESET:
+	case BNX2X_Q_STATE_INITIALIZED:
+	case BNX2X_Q_STATE_MCOS_TERMINATED:
+	case BNX2X_Q_STATE_INACTIVE:
+	case BNX2X_Q_STATE_STOPPED:
+	case BNX2X_Q_STATE_TERMINATED:
+	case BNX2X_Q_STATE_FLRED:
+		return BNX2X_Q_LOGICAL_STATE_STOPPED;
+	default:
+		return -EINVAL;
+	}
+}
+
+/********************** Function state object *********************************/
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+					   struct bnx2x_func_sp_obj *o)
+{
+	/* in the middle of transaction - return INVALID state */
+	if (o->pending)
+		return BNX2X_F_STATE_MAX;
+
+	/* unsure the order of reading of o->pending and o->state
+	 * o->pending should be read first
+	 */
+	rmb();
+
+	return o->state;
+}
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				enum bnx2x_func_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+					       struct bnx2x_func_sp_obj *o,
+					       enum bnx2x_func_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
+			  cmd, BP_FUNC(bp), o->state,
+			  cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP,
+	   "Completing command %d for func %d, setting state to %d\n",
+	   cmd, BP_FUNC(bp), o->next_state);
+
+	o->state = o->next_state;
+	o->next_state = BNX2X_F_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_atomic();
+
+	return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_sp_obj *o,
+			       enum bnx2x_func_cmd cmd)
+{
+	/* Complete the state machine part first, check if it's a
+	 * legal completion.
+	 */
+	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+	return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+				     struct bnx2x_func_sp_obj *o,
+				     struct bnx2x_func_state_params *params)
+{
+	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+	enum bnx2x_func_cmd cmd = params->cmd;
+
+	/* Forget all pending for completion commands if a driver only state
+	 * transition has been requested.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		o->pending = 0;
+		o->next_state = BNX2X_F_STATE_MAX;
+	}
+
+	/* Don't allow a next state transition if we are in the middle of
+	 * the previous one.
+	 */
+	if (o->pending)
+		return -EBUSY;
+
+	switch (state) {
+	case BNX2X_F_STATE_RESET:
+		if (cmd == BNX2X_F_CMD_HW_INIT)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_F_STATE_INITIALIZED:
+		if (cmd == BNX2X_F_CMD_START)
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if (cmd == BNX2X_F_CMD_HW_RESET)
+			next_state = BNX2X_F_STATE_RESET;
+
+		break;
+	case BNX2X_F_STATE_STARTED:
+		if (cmd == BNX2X_F_CMD_STOP)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+		/* afex ramrods can be sent only in started mode, and only
+		 * if not pending for function_stop ramrod completion
+		 * for these events - next state remained STARTED.
+		 */
+		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
+		/* Switch_update ramrod can be sent in either started or
+		 * tx_stopped state, and it doesn't change the state.
+		 */
+		else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if (cmd == BNX2X_F_CMD_TX_STOP)
+			next_state = BNX2X_F_STATE_TX_STOPPED;
+
+		break;
+	case BNX2X_F_STATE_TX_STOPPED:
+		if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+		    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_TX_STOPPED;
+
+		else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_TX_STOPPED;
+
+		else if (cmd == BNX2X_F_CMD_TX_START)
+			next_state = BNX2X_F_STATE_STARTED;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_F_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+			 state, cmd);
+
+	return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_port(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn_chip(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+				      const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+			      struct bnx2x_func_state_params *params)
+{
+	u32 load_code = params->params.hw_init.load_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+	int rc = 0;
+
+	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
+			 BP_ABS_FUNC(bp), load_code);
+
+	/* Prepare buffers for unzipping the FW */
+	rc = drv->gunzip_init(bp);
+	if (rc)
+		return rc;
+
+	/* Prepare FW */
+	rc = drv->init_fw(bp);
+	if (rc) {
+		BNX2X_ERR("Error loading firmware\n");
+		goto init_err;
+	}
+
+	/* Handle the beginning of COMMON_XXX pases separately... */
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+		rc = bnx2x_func_init_cmn_chip(bp, drv);
+		if (rc)
+			goto init_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		rc = bnx2x_func_init_cmn(bp, drv);
+		if (rc)
+			goto init_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		rc = bnx2x_func_init_port(bp, drv);
+		if (rc)
+			goto init_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		rc = bnx2x_func_init_func(bp, drv);
+		if (rc)
+			goto init_err;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		rc = -EINVAL;
+	}
+
+init_err:
+	drv->gunzip_end(bp);
+
+	/* In case of success, complete the command immediately: no ramrods
+	 * have been sent.
+	 */
+	if (!rc)
+		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+	return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reset HW at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ *                 !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_port(bp);
+	bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reset HW at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	bnx2x_func_reset_port(bp, drv);
+	drv->reset_hw_cmn(bp);
+}
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+				      struct bnx2x_func_state_params *params)
+{
+	u32 reset_phase = params->params.hw_reset.reset_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
+			 reset_phase);
+
+	switch (reset_phase) {
+	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+		bnx2x_func_reset_cmn(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_PORT:
+		bnx2x_func_reset_port(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+		bnx2x_func_reset_func(bp, drv);
+		break;
+	default:
+		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+			   reset_phase);
+		break;
+	}
+
+	/* Complete the command immediately: no ramrods have been sent. */
+	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+	return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+					struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_start_data *rdata =
+		(struct function_start_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_start_params *start_params = &params->params.start;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->function_mode	= (u8)start_params->mf_mode;
+	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
+	rdata->path_id		= BP_PATH(bp);
+	rdata->network_cos_mode	= start_params->network_cos_mode;
+
+	rdata->vxlan_dst_port	= cpu_to_le16(start_params->vxlan_dst_port);
+	rdata->geneve_dst_port	= cpu_to_le16(start_params->geneve_dst_port);
+	rdata->inner_clss_l2gre	= start_params->inner_clss_l2gre;
+	rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+	rdata->inner_clss_vxlan	= start_params->inner_clss_vxlan;
+	rdata->inner_rss	= start_params->inner_rss;
+
+	rdata->sd_accept_mf_clss_fail = start_params->class_fail;
+	if (start_params->class_fail_ethtype) {
+		rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
+		rdata->sd_accept_mf_clss_fail_ethtype =
+			cpu_to_le16(start_params->class_fail_ethtype);
+	}
+
+	rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
+	rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
+	if (start_params->sd_vlan_eth_type)
+		rdata->sd_vlan_eth_type =
+			cpu_to_le16(start_params->sd_vlan_eth_type);
+	else
+		rdata->sd_vlan_eth_type =
+			cpu_to_le16(0x8100);
+
+	rdata->no_added_tags = start_params->no_added_tags;
+
+	rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+	if (rdata->c2s_pri_tt_valid) {
+		memcpy(rdata->c2s_pri_trans_table.val,
+		       start_params->c2s_pri,
+		       MAX_VLAN_PRIORITIES);
+		rdata->c2s_pri_default = start_params->c2s_pri_default;
+	}
+	/* No need for an explicit memory barrier here as long we would
+	 * need to ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read and we will have to put a full memory barrier there
+	 * (inside bnx2x_sp_post()).
+	 */
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
+					struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_update_data *rdata =
+		(struct function_update_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_switch_update_params *switch_update_params =
+		&params->params.switch_update;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->tx_switch_suspend_change_flg = 1;
+		rdata->tx_switch_suspend =
+			test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+				 &switch_update_params->changes);
+	}
+
+	if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->sd_vlan_tag_change_flg = 1;
+		rdata->sd_vlan_tag =
+			cpu_to_le16(switch_update_params->vlan);
+	}
+
+	if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->sd_vlan_eth_type_change_flg = 1;
+		rdata->sd_vlan_eth_type =
+			cpu_to_le16(switch_update_params->vlan_eth_type);
+	}
+
+	if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->sd_vlan_force_pri_change_flg = 1;
+		if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+			     &switch_update_params->changes))
+			rdata->sd_vlan_force_pri_flg = 1;
+		rdata->sd_vlan_force_pri_flg =
+			switch_update_params->vlan_force_prio;
+	}
+
+	if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->update_tunn_cfg_flg = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+			     &switch_update_params->changes))
+			rdata->inner_clss_l2gre = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+			     &switch_update_params->changes))
+			rdata->inner_clss_vxlan = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+			     &switch_update_params->changes))
+			rdata->inner_clss_l2geneve = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+			     &switch_update_params->changes))
+			rdata->inner_rss = 1;
+		rdata->vxlan_dst_port =
+			cpu_to_le16(switch_update_params->vxlan_dst_port);
+		rdata->geneve_dst_port =
+			cpu_to_le16(switch_update_params->geneve_dst_port);
+	}
+
+	rdata->echo = SWITCH_UPDATE;
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
+					 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_update_data *rdata =
+		(struct function_update_data *)o->afex_rdata;
+	dma_addr_t data_mapping = o->afex_rdata_mapping;
+	struct bnx2x_func_afex_update_params *afex_update_params =
+		&params->params.afex_update;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->vif_id_change_flg = 1;
+	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
+	rdata->afex_default_vlan_change_flg = 1;
+	rdata->afex_default_vlan =
+		cpu_to_le16(afex_update_params->afex_default_vlan);
+	rdata->allowed_priorities_change_flg = 1;
+	rdata->allowed_priorities = afex_update_params->allowed_priorities;
+	rdata->echo = AFEX_UPDATE;
+
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	DP(BNX2X_MSG_SP,
+	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
+	   rdata->vif_id,
+	   rdata->afex_default_vlan, rdata->allowed_priorities);
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static
+inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
+					 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct afex_vif_list_ramrod_data *rdata =
+		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
+	struct bnx2x_func_afex_viflists_params *afex_vif_params =
+		&params->params.afex_viflists;
+	u64 *p_rdata = (u64 *)rdata;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
+	rdata->func_bit_map          = afex_vif_params->func_bit_map;
+	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+	rdata->func_to_clear         = afex_vif_params->func_to_clear;
+
+	/* send in echo type of sub command */
+	rdata->echo = afex_vif_params->afex_vif_list_command;
+
+	/*  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
+	   rdata->afex_vif_list_command, rdata->vif_list_index,
+	   rdata->func_bit_map, rdata->func_to_clear);
+
+	/* this ramrod sends data directly and not through DMA mapping */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
+			     U64_HI(*p_rdata), U64_LO(*p_rdata),
+			     NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct flow_control_configuration *rdata =
+		(struct flow_control_configuration *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_tx_start_params *tx_start_params =
+		&params->params.tx_start;
+	int i;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	rdata->dcb_enabled = tx_start_params->dcb_enabled;
+	rdata->dcb_version = tx_start_params->dcb_version;
+	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
+
+	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+		rdata->traffic_type_to_priority_cos[i] =
+			tx_start_params->traffic_type_to_priority_cos[i];
+
+	for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+		rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
+	/* No need for an explicit memory barrier here as long as we
+	 * ensure the ordering of writing to the SPQ element
+	 * and updating of the SPQ producer which involves a memory
+	 * read. If the memory read is removed we will have to put a
+	 * full memory barrier there (inside bnx2x_sp_post()).
+	 */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline
+int bnx2x_func_send_set_timesync(struct bnx2x *bp,
+				 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct set_timesync_ramrod_data *rdata =
+		(struct set_timesync_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_set_timesync_params *set_timesync_params =
+		&params->params.set_timesync;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
+	rdata->offset_cmd = set_timesync_params->offset_cmd;
+	rdata->add_sub_drift_adjust_value =
+		set_timesync_params->add_sub_drift_adjust_value;
+	rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
+	rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
+	rdata->offset_delta.lo =
+		cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
+	rdata->offset_delta.hi =
+		cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
+
+	DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
+	   rdata->drift_adjust_cmd, rdata->offset_cmd,
+	   rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
+	   rdata->drift_adjust_period, rdata->offset_delta.lo,
+	   rdata->offset_delta.hi);
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_F_CMD_HW_INIT:
+		return bnx2x_func_hw_init(bp, params);
+	case BNX2X_F_CMD_START:
+		return bnx2x_func_send_start(bp, params);
+	case BNX2X_F_CMD_STOP:
+		return bnx2x_func_send_stop(bp, params);
+	case BNX2X_F_CMD_HW_RESET:
+		return bnx2x_func_hw_reset(bp, params);
+	case BNX2X_F_CMD_AFEX_UPDATE:
+		return bnx2x_func_send_afex_update(bp, params);
+	case BNX2X_F_CMD_AFEX_VIFLISTS:
+		return bnx2x_func_send_afex_viflists(bp, params);
+	case BNX2X_F_CMD_TX_STOP:
+		return bnx2x_func_send_tx_stop(bp, params);
+	case BNX2X_F_CMD_TX_START:
+		return bnx2x_func_send_tx_start(bp, params);
+	case BNX2X_F_CMD_SWITCH_UPDATE:
+		return bnx2x_func_send_switch_update(bp, params);
+	case BNX2X_F_CMD_SET_TIMESYNC:
+		return bnx2x_func_send_set_timesync(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	mutex_init(&obj->one_pending_mutex);
+
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+	obj->afex_rdata = afex_rdata;
+	obj->afex_rdata_mapping = afex_rdata_mapping;
+	obj->send_cmd = bnx2x_func_send_cmd;
+	obj->check_transition = bnx2x_func_chk_transition;
+	obj->complete_cmd = bnx2x_func_comp_cmd;
+	obj->wait_comp = bnx2x_func_wait_comp;
+
+	obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ *         negative error code in case of failure, positive
+ *         (EBUSY) value if there is a completion to that is
+ *         still pending (possible only if RAMROD_COMP_WAIT is
+ *         not set in params->ramrod_flags for asynchronous
+ *         commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	int rc, cnt = 300;
+	enum bnx2x_func_cmd cmd = params->cmd;
+	unsigned long *pending = &o->pending;
+
+	mutex_lock(&o->one_pending_mutex);
+
+	/* Check that the requested transition is legal */
+	rc = o->check_transition(bp, o, params);
+	if ((rc == -EBUSY) &&
+	    (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
+		while ((rc == -EBUSY) && (--cnt > 0)) {
+			mutex_unlock(&o->one_pending_mutex);
+			msleep(10);
+			mutex_lock(&o->one_pending_mutex);
+			rc = o->check_transition(bp, o, params);
+		}
+		if (rc == -EBUSY) {
+			mutex_unlock(&o->one_pending_mutex);
+			BNX2X_ERR("timeout waiting for previous ramrod completion\n");
+			return rc;
+		}
+	} else if (rc) {
+		mutex_unlock(&o->one_pending_mutex);
+		return rc;
+	}
+
+	/* Set "pending" bit */
+	set_bit(cmd, pending);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		bnx2x_func_state_change_comp(bp, o, cmd);
+		mutex_unlock(&o->one_pending_mutex);
+	} else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+
+		mutex_unlock(&o->one_pending_mutex);
+
+		if (rc) {
+			o->next_state = BNX2X_F_STATE_MAX;
+			clear_bit(cmd, pending);
+			smp_mb__after_atomic();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, cmd);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(cmd, pending);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
new file mode 100644
index 0000000..4048fc5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1531 @@
+/* bnx2x_sp.h: Qlogic Everest network driver.
+ *
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and Qlogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+	RAMROD_TX,
+	RAMROD_RX,
+	/* Wait until all pending commands complete */
+	RAMROD_COMP_WAIT,
+	/* Don't send a ramrod, only update a registry */
+	RAMROD_DRV_CLR_ONLY,
+	/* Configure HW according to the current object state */
+	RAMROD_RESTORE,
+	 /* Execute the next command now */
+	RAMROD_EXEC,
+	/* Don't add a new command and continue execution of postponed
+	 * commands. If not set a new command will be added to the
+	 * pending commands list.
+	 */
+	RAMROD_CONT,
+	/* If there is another pending ramrod, wait until it finishes and
+	 * re-try to submit this one. This flag can be set only in sleepable
+	 * context, and should not be set from the context that completes the
+	 * ramrods as deadlock will occur.
+	 */
+	RAMROD_RETRY,
+};
+
+typedef enum {
+	BNX2X_OBJ_TYPE_RX,
+	BNX2X_OBJ_TYPE_TX,
+	BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Public slow path states */
+enum {
+	BNX2X_FILTER_MAC_PENDING,
+	BNX2X_FILTER_VLAN_PENDING,
+	BNX2X_FILTER_VLAN_MAC_PENDING,
+	BNX2X_FILTER_RX_MODE_PENDING,
+	BNX2X_FILTER_RX_MODE_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+	BNX2X_FILTER_FCOE_ETH_START_SCHED,
+	BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+	BNX2X_FILTER_MCAST_PENDING,
+	BNX2X_FILTER_MCAST_SCHED,
+	BNX2X_FILTER_RSS_CONF_PENDING,
+	BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
+	BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
+};
+
+struct bnx2x_raw_obj {
+	u8		func_id;
+
+	/* Queue params */
+	u8		cl_id;
+	u32		cid;
+
+	/* Ramrod data buffer params */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/* Ramrod state params */
+	int		state;   /* "ramrod is pending" state bit */
+	unsigned long	*pstate; /* pointer to state buffer */
+
+	bnx2x_obj_type	obj_type;
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_raw_obj *o);
+
+	bool (*check_pending)(struct bnx2x_raw_obj *o);
+	void (*clear_pending)(struct bnx2x_raw_obj *o);
+	void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+	u8 is_inner_mac;
+};
+
+struct bnx2x_vlan_ramrod_data {
+	u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+	u8 is_inner_mac;
+	u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+	struct bnx2x_mac_ramrod_data mac;
+	struct bnx2x_vlan_ramrod_data vlan;
+	struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+	BNX2X_VLAN_MAC_ADD,
+	BNX2X_VLAN_MAC_DEL,
+	BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+	/* Requested command: BNX2X_VLAN_MAC_XX */
+	enum bnx2x_vlan_mac_cmd cmd;
+	/* used to contain the data related vlan_mac_flags bits from
+	 * ramrod parameters.
+	 */
+	unsigned long vlan_mac_flags;
+
+	/* Needed for MOVE command */
+	struct bnx2x_vlan_mac_obj *target_obj;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+	struct bnx2x_vlan_mac_data vlan_mac;
+
+	struct {
+		/* TODO */
+	} mcast;
+};
+
+struct bnx2x_exeq_elem {
+	struct list_head		link;
+
+	/* Length of this element in the exe_chunk. */
+	int				cmd_len;
+
+	union bnx2x_exe_queue_cmd_data	cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+	union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+
+typedef int (*exe_q_remove)(struct bnx2x *bp,
+			    union bnx2x_qable_obj *o,
+			    struct bnx2x_exeq_elem *elem);
+
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+			     union bnx2x_qable_obj *o,
+			     struct list_head *exe_chunk,
+			     unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+			(*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+				     struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+	/* Commands pending for an execution. */
+	struct list_head	exe_queue;
+
+	/* Commands pending for an completion. */
+	struct list_head	pending_comp;
+
+	spinlock_t		lock;
+
+	/* Maximum length of commands' list for one execution */
+	int			exe_chunk_len;
+
+	union bnx2x_qable_obj	*owner;
+
+	/****** Virtual functions ******/
+	/**
+	 * Called before commands execution for commands that are really
+	 * going to be executed (after 'optimize').
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_validate		validate;
+
+	/**
+	 * Called before removing pending commands, cleaning allocated
+	 * resources (e.g., credits from validate)
+	 */
+	 exe_q_remove		remove;
+
+	/**
+	 * This will try to cancel the current pending commands list
+	 * considering the new command.
+	 *
+	 * Returns the number of optimized commands or a negative error code
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_optimize		optimize;
+
+	/**
+	 * Run the next commands chunk (owner specific).
+	 */
+	exe_q_execute		execute;
+
+	/**
+	 * Return the exe_queue element containing the specific command
+	 * if any. Otherwise return NULL.
+	 */
+	exe_q_get		get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currently configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+	struct list_head	link;
+
+	/* Used to store the cam offset used for the mac/vlan/vlan-mac.
+	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
+	 * same CAM for these chips.
+	 */
+	int			cam_offset;
+
+	/* Needed for DEL and RESTORE flows */
+	unsigned long		vlan_mac_flags;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+	BNX2X_UC_LIST_MAC,
+	BNX2X_ETH_MAC,
+	BNX2X_ISCSI_ETH_MAC,
+	BNX2X_NETQ_ETH_MAC,
+	BNX2X_DONT_CONSUME_CAM_CREDIT,
+	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK	(1 << BNX2X_UC_LIST_MAC | \
+				 1 << BNX2X_ETH_MAC | \
+				 1 << BNX2X_ISCSI_ETH_MAC | \
+				 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+	((flags) & BNX2X_VLAN_MAC_CMP_MASK)
+
+struct bnx2x_vlan_mac_ramrod_params {
+	/* Object to run the command from */
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* General command flags: COMP_WAIT, etc. */
+	unsigned long ramrod_flags;
+
+	/* Command specific configuration request */
+	struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+	struct bnx2x_raw_obj raw;
+
+	/* Bookkeeping list: will prevent the addition of already existing
+	 * entries.
+	 */
+	struct list_head		head;
+	/* Implement a simple reader/writer lock on the head list.
+	 * all these fields should only be accessed under the exe_queue lock
+	 */
+	u8		head_reader; /* Num. of readers accessing head list */
+	bool		head_exe_request; /* Pending execution request. */
+	unsigned long	saved_ramrod_flags; /* Ramrods of pending execution */
+
+	/* TODO: Add it's initialization in the init functions */
+	struct bnx2x_exe_queue_obj	exe_queue;
+
+	/* MACs credit pool */
+	struct bnx2x_credit_pool_obj	*macs_pool;
+
+	/* VLANs credit pool */
+	struct bnx2x_credit_pool_obj	*vlans_pool;
+
+	/* RAMROD command to be used */
+	int				ramrod_cmd;
+
+	/* copy first n elements onto preallocated buffer
+	 *
+	 * @param n number of elements to get
+	 * @param buf buffer preallocated by caller into which elements
+	 *            will be copied. Note elements are 4-byte aligned
+	 *            so buffer size must be able to accommodate the
+	 *            aligned elements.
+	 *
+	 * @return number of copied bytes
+	 */
+	int (*get_n_elements)(struct bnx2x *bp,
+			      struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+			      u8 stride, u8 size);
+
+	/**
+	 * Checks if ADD-ramrod with the given params may be performed.
+	 *
+	 * @return zero if the element may be added
+	 */
+
+	int (*check_add)(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *o,
+			 union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	struct bnx2x_vlan_mac_registry_elem *
+		(*check_del)(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	bool (*check_move)(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_obj *src_o,
+			   struct bnx2x_vlan_mac_obj *dst_o,
+			   union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 *  Update the relevant credit object(s) (consume/return
+	 *  correspondingly).
+	 */
+	bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+	bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+	/**
+	 * Configures one rule in the ramrod data buffer.
+	 */
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *o,
+			     struct bnx2x_exeq_elem *elem, int rule_idx,
+			     int cam_offset);
+
+	/**
+	*  Delete all configured elements having the given
+	*  vlan_mac_flags specification. Assumes no pending for
+	*  execution commands. Will schedule all all currently
+	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+	*  specification for deletion and will use the given
+	*  ramrod_flags for the last DEL operation.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param ramrod_flags RAMROD_XX flags
+	 *
+	 * @return 0 if the last operation has completed successfully
+	 *         and there are no more elements left, positive value
+	 *         if there are pending for completion commands,
+	 *         negative value in case of failure.
+	 */
+	int (*delete_all)(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_obj *o,
+			  unsigned long *vlan_mac_flags,
+			  unsigned long *ramrod_flags);
+
+	/**
+	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+	 * configured elements list.
+	 *
+	 * @param bp
+	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
+	 *          ramrod_flags is only taken into an account)
+	 * @param ppos a pointer to the cookie that should be given back in the
+	 *        next call to make function handle the next element. If
+	 *        *ppos is set to NULL it will restart the iterator.
+	 *        If returned *ppos == NULL this means that the last
+	 *        element has been handled.
+	 *
+	 * @return int
+	 */
+	int (*restore)(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_ramrod_params *p,
+		       struct bnx2x_vlan_mac_registry_elem **ppos);
+
+	/**
+	 * Should be called on a completion arrival.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param cqe Completion element we are handling
+	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+	 *		       pending commands will be executed.
+	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+	 *		       may also be set if needed.
+	 *
+	 * @return 0 if there are neither pending nor waiting for
+	 *         completion commands. Positive value if there are
+	 *         pending for execution or for completion commands.
+	 *         Negative value in case of an error (including an
+	 *         error in the cqe).
+	 */
+	int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+			union event_ring_elem *cqe,
+			unsigned long *ramrod_flags);
+
+	/**
+	 * Wait for completion of all commands. Don't schedule new ones,
+	 * just wait. It assumes that the completion code will schedule
+	 * for new commands.
+	 */
+	int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+enum {
+	BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0,
+	BNX2X_LLH_CAM_ETH_LINE,
+	BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod special flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+	BNX2X_RX_MODE_FCOE_ETH,
+	BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+	BNX2X_ACCEPT_UNICAST,
+	BNX2X_ACCEPT_MULTICAST,
+	BNX2X_ACCEPT_ALL_UNICAST,
+	BNX2X_ACCEPT_ALL_MULTICAST,
+	BNX2X_ACCEPT_BROADCAST,
+	BNX2X_ACCEPT_UNMATCHED,
+	BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+	struct bnx2x_rx_mode_obj *rx_mode_obj;
+	unsigned long *pstate;
+	int state;
+	u8 cl_id;
+	u32 cid;
+	u8 func_id;
+	unsigned long ramrod_flags;
+	unsigned long rx_mode_flags;
+
+	/* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+	 * a tstorm_eth_mac_filter_config (e1x).
+	 */
+	void *rdata;
+	dma_addr_t rdata_mapping;
+
+	/* Rx mode settings */
+	unsigned long rx_accept_flags;
+
+	/* internal switching settings */
+	unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+	int (*config_rx_mode)(struct bnx2x *bp,
+			      struct bnx2x_rx_mode_ramrod_params *p);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+	struct list_head link;
+	u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+	u8 *mac;
+	u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+	struct bnx2x_mcast_obj *mcast_obj;
+
+	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+	unsigned long ramrod_flags;
+
+	struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+	/** TODO:
+	 *      - rename it to macs_num.
+	 *      - Add a new command type for handling pending commands
+	 *        (remove "zero semantics").
+	 *
+	 *  Length of mcast_list. If zero and ADD_CONT command - post
+	 *  pending commands.
+	 */
+	int mcast_list_len;
+};
+
+enum bnx2x_mcast_cmd {
+	BNX2X_MCAST_CMD_ADD,
+	BNX2X_MCAST_CMD_CONT,
+	BNX2X_MCAST_CMD_DEL,
+	BNX2X_MCAST_CMD_RESTORE,
+};
+
+struct bnx2x_mcast_obj {
+	struct bnx2x_raw_obj raw;
+
+	union {
+		struct {
+		#define BNX2X_MCAST_BINS_NUM	256
+		#define BNX2X_MCAST_VEC_SZ	(BNX2X_MCAST_BINS_NUM / 64)
+			u64 vec[BNX2X_MCAST_VEC_SZ];
+
+			/** Number of BINs to clear. Should be updated
+			 *  immediately when a command arrives in order to
+			 *  properly create DEL commands.
+			 */
+			int num_bins_set;
+		} aprox_match;
+
+		struct {
+			struct list_head macs;
+			int num_macs_set;
+		} exact_match;
+	} registry;
+
+	/* Pending commands */
+	struct list_head pending_cmds_head;
+
+	/* A state that is set in raw.pstate, when there are pending commands */
+	int sched_state;
+
+	/* Maximal number of mcast MACs configured in one command */
+	int max_cmd_len;
+
+	/* Total number of currently pending MACs to configure: both
+	 * in the pending commands list and in the current command.
+	 */
+	int total_pending_num;
+
+	u8 engine_id;
+
+	/**
+	 * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+	 */
+	int (*config_mcast)(struct bnx2x *bp,
+			    struct bnx2x_mcast_ramrod_params *p,
+			    enum bnx2x_mcast_cmd cmd);
+
+	/**
+	 * Fills the ramrod data during the RESTORE flow.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param start_idx Registry index to start from
+	 * @param rdata_idx Index in the ramrod data to start from
+	 *
+	 * @return -1 if we handled the whole registry or index of the last
+	 *         handled registry element.
+	 */
+	int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   int start_bin, int *rdata_idx);
+
+	int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   struct bnx2x_mcast_ramrod_params *p,
+			   enum bnx2x_mcast_cmd cmd);
+
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_mcast_obj *o, int idx,
+			     union bnx2x_mcast_config_data *cfg_data,
+			     enum bnx2x_mcast_cmd cmd);
+
+	/** Checks if there are more mcast MACs to be set or a previous
+	 *  command is still pending.
+	 */
+	bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Set/Clear/Check SCHEDULED state of the object
+	 */
+	void (*set_sched)(struct bnx2x_mcast_obj *o);
+	void (*clear_sched)(struct bnx2x_mcast_obj *o);
+	bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+	/* Wait until all pending commands complete */
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Handle the internal object counters needed for proper
+	 * commands handling. Checks that the provided parameters are
+	 * feasible.
+	 */
+	int (*validate)(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p,
+			enum bnx2x_mcast_cmd cmd);
+
+	/**
+	 * Restore the values of internal counters in case of a failure.
+	 */
+	void (*revert)(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int old_num_bins);
+
+	int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+	void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+	/* Current amount of credit in the pool */
+	atomic_t	credit;
+
+	/* Maximum allowed credit. put() will check against it. */
+	int		pool_sz;
+
+	/* Allocate a pool table statically.
+	 *
+	 * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
+	 *
+	 * The set bit in the table will mean that the entry is available.
+	 */
+#define BNX2X_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
+	u64		pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+	/* Base pool offset (initialized differently */
+	int		base_pool_offset;
+
+	/**
+	 * Get the next free pool entry.
+	 *
+	 * @return true if there was a free entry in the pool
+	 */
+	bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+	/**
+	 * Return the entry back to the pool.
+	 *
+	 * @return true if entry is legal and has been successfully
+	 *         returned to the pool.
+	 */
+	bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+	/**
+	 * Get the requested amount of credit from the pool.
+	 *
+	 * @param cnt Amount of requested credit
+	 * @return true if the operation is successful
+	 */
+	bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Returns the credit to the pool.
+	 *
+	 * @param cnt Amount of credit to return
+	 * @return true if the operation is successful
+	 */
+	bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Reads the current amount of credit.
+	 */
+	int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+	/* RSS_MODE bits are mutually exclusive */
+	BNX2X_RSS_MODE_DISABLED,
+	BNX2X_RSS_MODE_REGULAR,
+
+	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+	BNX2X_RSS_IPV4,
+	BNX2X_RSS_IPV4_TCP,
+	BNX2X_RSS_IPV4_UDP,
+	BNX2X_RSS_IPV6,
+	BNX2X_RSS_IPV6_TCP,
+	BNX2X_RSS_IPV6_UDP,
+
+	BNX2X_RSS_IPV4_VXLAN,
+	BNX2X_RSS_IPV6_VXLAN,
+	BNX2X_RSS_TUNN_INNER_HDRS,
+};
+
+struct bnx2x_config_rss_params {
+	struct bnx2x_rss_config_obj *rss_obj;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* BNX2X_RSS_X bits */
+	unsigned long	rss_flags;
+
+	/* Number hash bits to take into an account */
+	u8		rss_result_mask;
+
+	/* Indirection table */
+	u8		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	/* RSS hash values */
+	u32		rss_key[10];
+
+	/* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+	u16		toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+	struct bnx2x_raw_obj	raw;
+
+	/* RSS engine to use */
+	u8			engine_id;
+
+	/* Last configured indirection table */
+	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	/* flags for enabling 4-tupple hash on UDP */
+	u8			udp_rss_v4;
+	u8			udp_rss_v6;
+
+	int (*config_rss)(struct bnx2x *bp,
+			  struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+	BNX2X_Q_UPDATE_IN_VLAN_REM,
+	BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_ANTI_SPOOF,
+	BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+	BNX2X_Q_UPDATE_ACTIVATE,
+	BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+	BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+	BNX2X_Q_UPDATE_TX_SWITCHING,
+	BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+	BNX2X_Q_UPDATE_PTP_PKTS,
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+	BNX2X_Q_STATE_RESET,
+	BNX2X_Q_STATE_INITIALIZED,
+	BNX2X_Q_STATE_ACTIVE,
+	BNX2X_Q_STATE_MULTI_COS,
+	BNX2X_Q_STATE_MCOS_TERMINATED,
+	BNX2X_Q_STATE_INACTIVE,
+	BNX2X_Q_STATE_STOPPED,
+	BNX2X_Q_STATE_TERMINATED,
+	BNX2X_Q_STATE_FLRED,
+	BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_logical_state {
+	BNX2X_Q_LOGICAL_STATE_ACTIVE,
+	BNX2X_Q_LOGICAL_STATE_STOPPED,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+	BNX2X_Q_CMD_INIT,
+	BNX2X_Q_CMD_SETUP,
+	BNX2X_Q_CMD_SETUP_TX_ONLY,
+	BNX2X_Q_CMD_DEACTIVATE,
+	BNX2X_Q_CMD_ACTIVATE,
+	BNX2X_Q_CMD_UPDATE,
+	BNX2X_Q_CMD_UPDATE_TPA,
+	BNX2X_Q_CMD_HALT,
+	BNX2X_Q_CMD_CFC_DEL,
+	BNX2X_Q_CMD_TERMINATE,
+	BNX2X_Q_CMD_EMPTY,
+	BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+	BNX2X_Q_FLG_TPA,
+	BNX2X_Q_FLG_TPA_IPV6,
+	BNX2X_Q_FLG_TPA_GRO,
+	BNX2X_Q_FLG_STATS,
+	BNX2X_Q_FLG_ZERO_STATS,
+	BNX2X_Q_FLG_ACTIVE,
+	BNX2X_Q_FLG_OV,
+	BNX2X_Q_FLG_VLAN,
+	BNX2X_Q_FLG_COS,
+	BNX2X_Q_FLG_HC,
+	BNX2X_Q_FLG_HC_EN,
+	BNX2X_Q_FLG_DHC,
+	BNX2X_Q_FLG_FCOE,
+	BNX2X_Q_FLG_LEADING_RSS,
+	BNX2X_Q_FLG_MCAST,
+	BNX2X_Q_FLG_DEF_VLAN,
+	BNX2X_Q_FLG_TX_SWITCH,
+	BNX2X_Q_FLG_TX_SEC,
+	BNX2X_Q_FLG_ANTI_SPOOF,
+	BNX2X_Q_FLG_SILENT_VLAN_REM,
+	BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+	BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
+	BNX2X_Q_FLG_PCSUM_ON_PKT,
+	BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
+};
+
+/* Queue type options: queue type may be a combination of below. */
+enum bnx2x_q_type {
+	/** TODO: Consider moving both these flags into the init()
+	 *        ramrod params.
+	 */
+	BNX2X_Q_TYPE_HAS_RX,
+	BNX2X_Q_TYPE_HAS_TX,
+};
+
+#define BNX2X_PRIMARY_CID_INDEX			0
+#define BNX2X_MULTI_TX_COS_E1X			3 /* QM only */
+#define BNX2X_MULTI_TX_COS_E2_E3A0		2
+#define BNX2X_MULTI_TX_COS_E3B0			3
+#define BNX2X_MULTI_TX_COS			3 /* Maximum possible */
+
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+ * timesync-related ramrods must not use this DMAE command ID.
+ */
+#define FW_DMAE_CMD_ID 6
+
+struct bnx2x_queue_init_params {
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} tx;
+
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} rx;
+
+	/* CID context in the host memory */
+	struct eth_context *cxts[BNX2X_MULTI_TX_COS];
+
+	/* maximum number of cos supported by hardware */
+	u8 max_cos;
+};
+
+struct bnx2x_queue_terminate_params {
+	/* index within the tx_only cids of this queue object */
+	u8 cid_index;
+};
+
+struct bnx2x_queue_cfc_del_params {
+	/* index within the tx_only cids of this queue object */
+	u8 cid_index;
+};
+
+struct bnx2x_queue_update_params {
+	unsigned long	update_flags; /* BNX2X_Q_UPDATE_XX bits */
+	u16		def_vlan;
+	u16		silent_removal_value;
+	u16		silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+	u8		cid_index;
+};
+
+struct bnx2x_queue_update_tpa_params {
+	dma_addr_t sge_map;
+	u8 update_ipv4;
+	u8 update_ipv6;
+	u8 max_tpa_queues;
+	u8 max_sges_pkt;
+	u8 complete_on_both_clients;
+	u8 dont_verify_thr;
+	u8 tpa_mode;
+	u8 _pad;
+
+	u16 sge_buff_sz;
+	u16 max_agg_sz;
+
+	u16 sge_pause_thr_low;
+	u16 sge_pause_thr_high;
+};
+
+struct rxq_pause_params {
+	u16		bd_th_lo;
+	u16		bd_th_hi;
+	u16		rcq_th_lo;
+	u16		rcq_th_hi;
+	u16		sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+	/* valid iff BNX2X_Q_FLG_STATS */
+	u8		stat_id;
+
+	u8		spcl_id;
+	u16		mtu;
+	u8		cos;
+
+	u8		fp_hsi;
+};
+
+struct bnx2x_rxq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+	dma_addr_t	sge_map;
+	dma_addr_t	rcq_map;
+	dma_addr_t	rcq_np_map;
+
+	u16		drop_flags;
+	u16		buf_sz;
+	u8		fw_sb_id;
+	u8		cl_qzone_id;
+
+	/* valid iff BNX2X_Q_FLG_TPA */
+	u16		tpa_agg_sz;
+	u16		sge_buf_sz;
+	u8		max_sges_pkt;
+	u8		max_tpa_queues;
+	u8		rss_engine_id;
+
+	/* valid iff BNX2X_Q_FLG_MCAST */
+	u8		mcast_engine_id;
+
+	u8		cache_line_log;
+
+	u8		sb_cq_index;
+
+	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+	u16 silent_removal_value;
+	u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+
+	u8		fw_sb_id;
+	u8		sb_cq_index;
+	u8		cos;		/* valid iff BNX2X_Q_FLG_COS */
+	u16		traffic_type;
+	/* equals to the leading rss client id, used for TX classification*/
+	u8		tss_leading_cl_id;
+
+	/* valid iff BNX2X_Q_FLG_DEF_VLAN */
+	u16		default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+	struct bnx2x_general_setup_params gen_params;
+	struct bnx2x_txq_setup_params txq_params;
+	struct bnx2x_rxq_setup_params rxq_params;
+	struct rxq_pause_params pause_params;
+	unsigned long flags;
+};
+
+struct bnx2x_queue_setup_tx_only_params {
+	struct bnx2x_general_setup_params	gen_params;
+	struct bnx2x_txq_setup_params		txq_params;
+	unsigned long				flags;
+	/* index within the tx_only cids of this queue object */
+	u8					cid_index;
+};
+
+struct bnx2x_queue_state_params {
+	struct bnx2x_queue_sp_obj *q_obj;
+
+	/* Current command */
+	enum bnx2x_queue_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_queue_update_params	update;
+		struct bnx2x_queue_update_tpa_params    update_tpa;
+		struct bnx2x_queue_setup_params		setup;
+		struct bnx2x_queue_init_params		init;
+		struct bnx2x_queue_setup_tx_only_params	tx_only;
+		struct bnx2x_queue_terminate_params	terminate;
+		struct bnx2x_queue_cfc_del_params	cfc_del;
+	} params;
+};
+
+struct bnx2x_viflist_params {
+	u8 echo_res;
+	u8 func_bit_map_res;
+};
+
+struct bnx2x_queue_sp_obj {
+	u32		cids[BNX2X_MULTI_TX_COS];
+	u8		cl_id;
+	u8		func_id;
+
+	/* number of traffic classes supported by queue.
+	 * The primary connection of the queue supports the first traffic
+	 * class. Any further traffic class is supported by a tx-only
+	 * connection.
+	 *
+	 * Therefore max_cos is also a number of valid entries in the cids
+	 * array.
+	 */
+	u8 max_cos;
+	u8 num_tx_only, next_tx_only;
+
+	enum bnx2x_q_state state, next_state;
+
+	/* bits from enum bnx2x_q_type */
+	unsigned long	type;
+
+	/* BNX2X_Q_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convenient to have different bits for different
+	 * commands.
+	 */
+	unsigned long	pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Sets the pending bit according to the requested transition.
+	 */
+	int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+			   struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_queue_sp_obj *o,
+			    enum bnx2x_queue_cmd);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_queue_sp_obj *o,
+			 enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+
+/* UPDATE command options */
+enum {
+	BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+	BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+	BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+	BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+	BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+	BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+};
+
+/* Allowed Function states */
+enum bnx2x_func_state {
+	BNX2X_F_STATE_RESET,
+	BNX2X_F_STATE_INITIALIZED,
+	BNX2X_F_STATE_STARTED,
+	BNX2X_F_STATE_TX_STOPPED,
+	BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+	BNX2X_F_CMD_HW_INIT,
+	BNX2X_F_CMD_START,
+	BNX2X_F_CMD_STOP,
+	BNX2X_F_CMD_HW_RESET,
+	BNX2X_F_CMD_AFEX_UPDATE,
+	BNX2X_F_CMD_AFEX_VIFLISTS,
+	BNX2X_F_CMD_TX_STOP,
+	BNX2X_F_CMD_TX_START,
+	BNX2X_F_CMD_SWITCH_UPDATE,
+	BNX2X_F_CMD_SET_TIMESYNC,
+	BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+	/* Multi Function mode:
+	 *	- Single Function
+	 *	- Switch Dependent
+	 *	- Switch Independent
+	 */
+	u16 mf_mode;
+
+	/* Switch Dependent mode outer VLAN tag */
+	u16 sd_vlan_tag;
+
+	/* Function cos mode */
+	u8 network_cos_mode;
+
+	/* UDP dest port for VXLAN */
+	u16 vxlan_dst_port;
+
+	/* UDP dest port for Geneve */
+	u16 geneve_dst_port;
+
+	/* Enable inner Rx classifications for L2GRE packets */
+	u8 inner_clss_l2gre;
+
+	/* Enable inner Rx classifications for L2-Geneve packets */
+	u8 inner_clss_l2geneve;
+
+	/* Enable inner Rx classification for vxlan packets */
+	u8 inner_clss_vxlan;
+
+	/* Enable RSS according to inner header */
+	u8 inner_rss;
+
+	/* Allows accepting of packets failing MF classification, possibly
+	 * only matching a given ethertype
+	 */
+	u8 class_fail;
+	u16 class_fail_ethtype;
+
+	/* Override priority of output packets */
+	u8 sd_vlan_force_pri;
+	u8 sd_vlan_force_pri_val;
+
+	/* Replace vlan's ethertype */
+	u16 sd_vlan_eth_type;
+
+	/* Prevent inner vlans from being added by FW */
+	u8 no_added_tags;
+
+	/* Inner-to-Outer vlan priority mapping */
+	u8 c2s_pri[MAX_VLAN_PRIORITIES];
+	u8 c2s_pri_default;
+	u8 c2s_pri_valid;
+};
+
+struct bnx2x_func_switch_update_params {
+	unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
+	u16 vlan;
+	u16 vlan_eth_type;
+	u8 vlan_force_prio;
+	u16 vxlan_dst_port;
+	u16 geneve_dst_port;
+};
+
+struct bnx2x_func_afex_update_params {
+	u16 vif_id;
+	u16 afex_default_vlan;
+	u8 allowed_priorities;
+};
+
+struct bnx2x_func_afex_viflists_params {
+	u16 vif_list_index;
+	u8 func_bit_map;
+	u8 afex_vif_list_command;
+	u8 func_to_clear;
+};
+
+struct bnx2x_func_tx_start_params {
+	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+	u8 dcb_enabled;
+	u8 dcb_version;
+	u8 dont_add_pri_0_en;
+	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
+};
+
+struct bnx2x_func_set_timesync_params {
+	/* Reset, set or keep the current drift value */
+	u8 drift_adjust_cmd;
+
+	/* Dec, inc or keep the current offset */
+	u8 offset_cmd;
+
+	/* Drift value direction */
+	u8 add_sub_drift_adjust_value;
+
+	/* Drift, period and offset values to be used according to the commands
+	 * above.
+	 */
+	u8 drift_adjust_value;
+	u32 drift_adjust_period;
+	u64 offset_delta;
+};
+
+struct bnx2x_func_state_params {
+	struct bnx2x_func_sp_obj *f_obj;
+
+	/* Current command */
+	enum bnx2x_func_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_func_hw_init_params hw_init;
+		struct bnx2x_func_hw_reset_params hw_reset;
+		struct bnx2x_func_start_params start;
+		struct bnx2x_func_switch_update_params switch_update;
+		struct bnx2x_func_afex_update_params afex_update;
+		struct bnx2x_func_afex_viflists_params afex_viflists;
+		struct bnx2x_func_tx_start_params tx_start;
+		struct bnx2x_func_set_timesync_params set_timesync;
+	} params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+	/* Init tool + runtime initialization:
+	 *      - Common Chip
+	 *      - Common (per Path)
+	 *      - Port
+	 *      - Function phases
+	 */
+	int (*init_hw_cmn_chip)(struct bnx2x *bp);
+	int (*init_hw_cmn)(struct bnx2x *bp);
+	int (*init_hw_port)(struct bnx2x *bp);
+	int (*init_hw_func)(struct bnx2x *bp);
+
+	/* Reset Function HW: Common, Port, Function phases. */
+	void (*reset_hw_cmn)(struct bnx2x *bp);
+	void (*reset_hw_port)(struct bnx2x *bp);
+	void (*reset_hw_func)(struct bnx2x *bp);
+
+	/* Init/Free GUNZIP resources */
+	int (*gunzip_init)(struct bnx2x *bp);
+	void (*gunzip_end)(struct bnx2x *bp);
+
+	/* Prepare/Release FW resources */
+	int (*init_fw)(struct bnx2x *bp);
+	void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+	enum bnx2x_func_state	state, next_state;
+
+	/* BNX2X_FUNC_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convenient to have different bits for different
+	 * commands.
+	 */
+	unsigned long		pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void			*rdata;
+	dma_addr_t		rdata_mapping;
+
+	/* Buffer to use as a afex ramrod data and its mapping.
+	 * This can't be same rdata as above because afex ramrod requests
+	 * can arrive to the object in parallel to other ramrod requests.
+	 */
+	void			*afex_rdata;
+	dma_addr_t		afex_rdata_mapping;
+
+	/* this mutex validates that when pending flag is taken, the next
+	 * ramrod to be sent will be the one set the pending bit
+	 */
+	struct mutex		one_pending_mutex;
+
+	/* Driver interface */
+	struct bnx2x_func_sp_drv_ops	*drv;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_func_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				struct bnx2x_func_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_func_sp_obj *o,
+			    enum bnx2x_func_cmd cmd);
+
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+			 enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+	struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params);
+
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+					   struct bnx2x_func_sp_obj *o);
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
+			  u8 cid_cnt, u8 func_id, void *rdata,
+			  dma_addr_t rdata_mapping, unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params);
+
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+			       struct bnx2x_queue_sp_obj *obj);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+					struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+				struct bnx2x_vlan_mac_obj *o);
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_ramrod_params *p,
+			struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o);
+
+/**
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @p: Command parameters
+ *
+ * Return: 0 - if operation was successful and there is no pending completions,
+ *         positive number - if there are pending completions,
+ *         negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate,
+			  bnx2x_obj_type type);
+
+/**
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * Return: 0 is operation was successful and there are no pending completions,
+ *         negative if there were errors, positive if there are pending
+ *         completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       enum bnx2x_mcast_cmd cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p, u8 func_id,
+				 u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit);
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type);
+
+/**
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
+ *
+ * Return: 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p);
+
+/**
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
+ *
+ * @ind_table: buffer to fill with the current indirection
+ *                  table content. Should be at least
+ *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table);
+
+#define PF_MAC_CREDIT_E2(bp, func_num)					\
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num)					 \
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
+#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
new file mode 100644
index 0000000..9d02734
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -0,0 +1,3112 @@
+/* bnx2x_sriov.c: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ *	       Ariel Elior <ariel.elior@qlogic.com>
+ *
+ */
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+			    struct bnx2x_virtf **vf,
+			    struct pf_vf_bulletin_content **bulletin,
+			    bool test_queue);
+
+/* General service functions */
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+					 u16 pf_id)
+{
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+					u8 enable)
+{
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+}
+
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+	int idx;
+
+	for_each_vf(bp, idx)
+		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
+			break;
+	return idx;
+}
+
+static
+struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
+	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
+}
+
+static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				u8 igu_sb_id, u8 segment, u16 index, u8 op,
+				u8 update)
+{
+	/* acking a VF sb through the PF - use the GRC */
+	u32 ctl;
+	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+	u32 func_encode = vf->abs_vfid;
+	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
+	struct igu_regular cmd_data = {0};
+
+	cmd_data.sb_id_and_flags =
+			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
+			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
+	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
+	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+	   cmd_data.sb_id_and_flags, igu_addr_data);
+	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
+	mmiowb();
+	barrier();
+
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+	   ctl, igu_addr_ctl);
+	REG_WR(bp, igu_addr_ctl, ctl);
+	mmiowb();
+	barrier();
+}
+
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+				       struct bnx2x_virtf *vf,
+				       bool print_err)
+{
+	if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+		if (print_err)
+			BNX2X_ERR("Slowpath objects not yet initialized!\n");
+		else
+			DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+		return false;
+	}
+	return true;
+}
+
+/* VFOP operations states */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			      struct bnx2x_queue_init_params *init_params,
+			      struct bnx2x_queue_setup_params *setup_params,
+			      u16 q_idx, u16 sb_idx)
+{
+	DP(BNX2X_MSG_IOV,
+	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
+	   vf->abs_vfid,
+	   q_idx,
+	   sb_idx,
+	   init_params->tx.sb_cq_index,
+	   init_params->tx.hc_rate,
+	   setup_params->flags,
+	   setup_params->txq_params.traffic_type);
+}
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			    struct bnx2x_queue_init_params *init_params,
+			    struct bnx2x_queue_setup_params *setup_params,
+			    u16 q_idx, u16 sb_idx)
+{
+	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
+
+	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
+	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
+	   vf->abs_vfid,
+	   q_idx,
+	   sb_idx,
+	   init_params->rx.sb_cq_index,
+	   init_params->rx.hc_rate,
+	   setup_params->gen_params.mtu,
+	   rxq_params->buf_sz,
+	   rxq_params->sge_buf_sz,
+	   rxq_params->max_sges_pkt,
+	   rxq_params->tpa_agg_sz,
+	   setup_params->flags,
+	   rxq_params->drop_flags,
+	   rxq_params->cache_line_log);
+}
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+			   struct bnx2x_virtf *vf,
+			   struct bnx2x_vf_queue *q,
+			   struct bnx2x_vf_queue_construct_params *p,
+			   unsigned long q_type)
+{
+	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
+	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
+
+	/* INIT */
+
+	/* Enable host coalescing in the transition to INIT state */
+	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
+
+	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
+
+	/* FW SB ID */
+	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+
+	/* context */
+	init_p->cxts[0] = q->cxt;
+
+	/* SETUP */
+
+	/* Setup-op general parameters */
+	setup_p->gen_params.spcl_id = vf->sp_cl_id;
+	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+	setup_p->gen_params.fp_hsi = vf->fp_hsi;
+
+	/* Setup-op flags:
+	 * collect statistics, zero statistics, local-switching, security,
+	 * OV for Flex10, RSS and MCAST for leading
+	 */
+	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
+		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
+
+	/* for VFs, enable tx switching, bd coherency, and mac address
+	 * anti-spoofing
+	 */
+	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
+	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
+	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+
+	/* Setup-op rx parameters */
+	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
+		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
+
+		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
+		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
+
+		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
+			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
+	}
+
+	/* Setup-op tx parameters */
+	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
+		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
+		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+	}
+}
+
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+				 struct bnx2x_virtf *vf, int qid,
+				 struct bnx2x_vf_queue_construct_params *qctor)
+{
+	struct bnx2x_queue_state_params *q_params;
+	int rc = 0;
+
+	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+	/* Prepare ramrod information */
+	q_params = &qctor->qstate;
+	q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+	set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
+
+	if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+	    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+		DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+		goto out;
+	}
+
+	/* Run Queue 'construction' ramrods */
+	q_params->cmd = BNX2X_Q_CMD_INIT;
+	rc = bnx2x_queue_state_change(bp, q_params);
+	if (rc)
+		goto out;
+
+	memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+	       sizeof(struct bnx2x_queue_setup_params));
+	q_params->cmd = BNX2X_Q_CMD_SETUP;
+	rc = bnx2x_queue_state_change(bp, q_params);
+	if (rc)
+		goto out;
+
+	/* enable interrupts */
+	bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+			    USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+	return rc;
+}
+
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				  int qid)
+{
+	enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+				       BNX2X_Q_CMD_TERMINATE,
+				       BNX2X_Q_CMD_CFC_DEL};
+	struct bnx2x_queue_state_params q_params;
+	int rc, i;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	/* Prepare ramrod information */
+	memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+	q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+	set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+	if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+	    BNX2X_Q_LOGICAL_STATE_STOPPED) {
+		DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+		goto out;
+	}
+
+	/* Run Queue 'destruction' ramrods */
+	for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+		q_params.cmd = cmds[i];
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+			return rc;
+		}
+	}
+out:
+	/* Clean Context */
+	if (bnx2x_vfq(vf, qid, cxt)) {
+		bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+		bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
+	}
+
+	return 0;
+}
+
+static void
+bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
+{
+	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+	if (vf) {
+		/* the first igu entry belonging to VFs of this PF */
+		if (!BP_VFDB(bp)->first_vf_igu_entry)
+			BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+		/* the first igu entry belonging to this VF */
+		if (!vf_sb_count(vf))
+			vf->igu_base_id = igu_sb_id;
+
+		++vf_sb_count(vf);
+		++vf->sb_count;
+	}
+	BP_VFDB(bp)->vf_sbs_pool++;
+}
+
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
+					struct bnx2x_vlan_mac_obj *obj,
+					atomic_t *counter)
+{
+	struct list_head *pos;
+	int read_lock;
+	int cnt = 0;
+
+	read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+	if (read_lock)
+		DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
+
+	list_for_each(pos, &obj->head)
+		cnt++;
+
+	if (!read_lock)
+		bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
+	atomic_set(counter, cnt);
+}
+
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				   int qid, bool drv_only, int type)
+{
+	struct bnx2x_vlan_mac_ramrod_params ramrod;
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
+
+	/* Prepare ramrod params */
+	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+	if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+	} else if (type == BNX2X_VF_FILTER_MAC) {
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+	} else {
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+	}
+	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+	if (drv_only)
+		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+	else
+		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+	/* Start deleting */
+	rc = ramrod.vlan_mac_obj->delete_all(bp,
+					     ramrod.vlan_mac_obj,
+					     &ramrod.user_req.vlan_mac_flags,
+					     &ramrod.ramrod_flags);
+	if (rc) {
+		BNX2X_ERR("Failed to delete all %s\n",
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+				    struct bnx2x_virtf *vf, int qid,
+				    struct bnx2x_vf_mac_vlan_filter *filter,
+				    bool drv_only)
+{
+	struct bnx2x_vlan_mac_ramrod_params ramrod;
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+	   vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+	   (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+	   (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
+
+	/* Prepare ramrod params */
+	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+	if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+		ramrod.user_req.u.vlan.vlan = filter->vid;
+		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+	} else if (filter->type == BNX2X_VF_FILTER_VLAN) {
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+		ramrod.user_req.u.vlan.vlan = filter->vid;
+	} else {
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+	}
+	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+					    BNX2X_VLAN_MAC_DEL;
+
+	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+	if (drv_only)
+		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+	else
+		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+	/* Add/Remove the filter */
+	rc = bnx2x_config_vlan_mac(bp, &ramrod);
+	if (rc && rc != -EEXIST) {
+		BNX2X_ERR("Failed to %s %s\n",
+			  filter->add ? "add" : "delete",
+			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+				"VLAN-MAC" :
+			  (filter->type == BNX2X_VF_FILTER_MAC) ?
+				"MAC" : "VLAN");
+		return rc;
+	}
+
+	return 0;
+}
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				  struct bnx2x_vf_mac_vlan_filters *filters,
+				  int qid, bool drv_only)
+{
+	int rc = 0, i;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+		return -EINVAL;
+
+	/* Prepare ramrod params */
+	for (i = 0; i < filters->count; i++) {
+		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+					      &filters->filters[i], drv_only);
+		if (rc)
+			break;
+	}
+
+	/* Rollback if needed */
+	if (i != filters->count) {
+		BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+			  i, filters->count + 1);
+		while (--i >= 0) {
+			filters->filters[i].add = !filters->filters[i].add;
+			bnx2x_vf_mac_vlan_config(bp, vf, qid,
+						 &filters->filters[i],
+						 drv_only);
+		}
+	}
+
+	/* It's our responsibility to free the filters */
+	kfree(filters);
+
+	return rc;
+}
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+			 struct bnx2x_vf_queue_construct_params *qctor)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+	rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+	if (rc)
+		goto op_err;
+
+	/* Schedule the configuration of any pending vlan filters */
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+			       BNX2X_MSG_IOV);
+	return 0;
+op_err:
+	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+	return rc;
+}
+
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			       int qid)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+	/* If needed, clean the filtering data base */
+	if ((qid == LEADING_IDX) &&
+	    bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN_MAC);
+		if (rc)
+			goto op_err;
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN);
+		if (rc)
+			goto op_err;
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_MAC);
+		if (rc)
+			goto op_err;
+	}
+
+	/* Terminate queue */
+	if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+		struct bnx2x_queue_state_params qstate;
+
+		memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+		qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+		qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+		set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+		rc = bnx2x_queue_state_change(bp, &qstate);
+		if (rc)
+			goto op_err;
+	}
+
+	return 0;
+op_err:
+	BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+	return rc;
+}
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
+{
+	struct bnx2x_mcast_list_elem *mc = NULL;
+	struct bnx2x_mcast_ramrod_params mcast;
+	int rc, i;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	/* Prepare Multicast command */
+	memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+	mcast.mcast_obj = &vf->mcast_obj;
+	if (drv_only)
+		set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+	else
+		set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+	if (mc_num) {
+		mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+			     GFP_KERNEL);
+		if (!mc) {
+			BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* clear existing mcasts */
+	mcast.mcast_list_len = vf->mcast_list_len;
+	vf->mcast_list_len = mc_num;
+	rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+	if (rc) {
+		BNX2X_ERR("Failed to remove multicasts\n");
+		kfree(mc);
+		return rc;
+	}
+
+	/* update mcast list on the ramrod params */
+	if (mc_num) {
+		INIT_LIST_HEAD(&mcast.mcast_list);
+		for (i = 0; i < mc_num; i++) {
+			mc[i].mac = mcasts[i];
+			list_add_tail(&mc[i].link,
+				      &mcast.mcast_list);
+		}
+
+		/* add new mcasts */
+		mcast.mcast_list_len = mc_num;
+		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+		if (rc)
+			BNX2X_ERR("Faled to add multicasts\n");
+		kfree(mc);
+	}
+
+	return rc;
+}
+
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+				  struct bnx2x_rx_mode_ramrod_params *ramrod,
+				  struct bnx2x_virtf *vf,
+				  unsigned long accept_flags)
+{
+	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+	memset(ramrod, 0, sizeof(*ramrod));
+	ramrod->cid = vfq->cid;
+	ramrod->cl_id = vfq_cl_id(vf, vfq);
+	ramrod->rx_mode_obj = &bp->rx_mode_obj;
+	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+	ramrod->rx_accept_flags = accept_flags;
+	ramrod->tx_accept_flags = accept_flags;
+	ramrod->pstate = &vf->filter_state;
+	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+	set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+	set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		    int qid, unsigned long accept_flags)
+{
+	struct bnx2x_rx_mode_ramrod_params ramrod;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+	set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+	vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+	return bnx2x_config_rx_mode(bp, &ramrod);
+}
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+	/* Remove all classification configuration for leading queue */
+	if (qid == LEADING_IDX) {
+		rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+		if (rc)
+			goto op_err;
+
+		/* Remove filtering if feasible */
+		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+						     false,
+						     BNX2X_VF_FILTER_VLAN_MAC);
+			if (rc)
+				goto op_err;
+			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+						     false,
+						     BNX2X_VF_FILTER_VLAN);
+			if (rc)
+				goto op_err;
+			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+						     false,
+						     BNX2X_VF_FILTER_MAC);
+			if (rc)
+				goto op_err;
+			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+			if (rc)
+				goto op_err;
+		}
+	}
+
+	/* Destroy queue */
+	rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+	if (rc)
+		goto op_err;
+	return rc;
+op_err:
+	BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+		  vf->abs_vfid, qid, rc);
+	return rc;
+}
+
+/* VF enable primitives
+ * when pretend is required the caller is responsible
+ * for calling pretend prior to calling these routines
+ */
+
+/* internal vf enable - until vf is enabled internally all transactions
+ * are blocked. This routine should always be called last with pretend.
+ */
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
+{
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
+}
+
+/* clears vf error in all semi blocks */
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
+	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
+	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
+	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
+}
+
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
+	u32 was_err_reg = 0;
+
+	switch (was_err_group) {
+	case 0:
+	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
+	    break;
+	case 1:
+	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
+	    break;
+	case 2:
+	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
+	    break;
+	case 3:
+	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
+	    break;
+	}
+	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
+}
+
+static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int i;
+	u32 val;
+
+	/* Set VF masks and configuration - pretend */
+	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+
+	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+
+	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
+	val &= ~IGU_VF_CONF_PARENT_MASK;
+	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
+	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+
+	DP(BNX2X_MSG_IOV,
+	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+	   vf->abs_vfid, val);
+
+	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+	/* iterate over all queues, clear sb consumer */
+	for (i = 0; i < vf_sb_count(vf); i++) {
+		u8 igu_sb_id = vf_igu_sb(vf, i);
+
+		/* zero prod memory */
+		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
+
+		/* clear sb state machine */
+		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
+				       false /* VF */);
+
+		/* disable + update */
+		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
+				    IGU_INT_DISABLE, 1);
+	}
+}
+
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
+{
+	/* set the VF-PF association in the FW */
+	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
+	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+
+	/* clear vf errors*/
+	bnx2x_vf_semi_clear_err(bp, abs_vfid);
+	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
+
+	/* internal vf-enable - pretend */
+	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
+	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
+	bnx2x_vf_enable_internal(bp, true);
+	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	/* Reset vf in IGU  interrupts are still disabled */
+	bnx2x_vf_igu_reset(bp, vf);
+
+	/* pretend to enable the vf with the PBF */
+	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
+	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
+{
+	struct pci_dev *dev;
+	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+	if (!vf)
+		return false;
+
+	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
+	if (dev)
+		return bnx2x_is_pcie_pending(dev);
+	return false;
+}
+
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
+{
+	/* Verify no pending pci transactions */
+	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
+		BNX2X_ERR("PCIE Transactions still pending\n");
+
+	return 0;
+}
+
+/* must be called after the number of PF queues and the number of VFs are
+ * both known
+ */
+static void
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	struct vf_pf_resc_request *resc = &vf->alloc_resc;
+
+	/* will be set only during VF-ACQUIRE */
+	resc->num_rxqs = 0;
+	resc->num_txqs = 0;
+
+	resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+	resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
+
+	/* no real limitation */
+	resc->num_mc_filters = 0;
+
+	/* num_sbs already set */
+	resc->num_sbs = vf->sb_count;
+}
+
+/* FLR routines: */
+static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	/* reset the state variables */
+	bnx2x_iov_static_resc(bp, vf);
+	vf->state = VF_FREE;
+}
+
+static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+	/* DQ usage counter */
+	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+	bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
+					"DQ VF usage counter timed out",
+					poll_cnt);
+	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+	/* FW cleanup command - poll for the results */
+	if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
+				   poll_cnt))
+		BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
+
+	/* verify TX hw is flushed */
+	bnx2x_tx_hw_flushed(bp, poll_cnt);
+}
+
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int rc, i;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	/* the cleanup operations are valid if and only if the VF
+	 * was first acquired.
+	 */
+	for (i = 0; i < vf_rxq_count(vf); i++) {
+		rc = bnx2x_vf_queue_flr(bp, vf, i);
+		if (rc)
+			goto out;
+	}
+
+	/* remove multicasts */
+	bnx2x_vf_mcast(bp, vf, NULL, 0, true);
+
+	/* dispatch final cleanup and wait for HW queues to flush */
+	bnx2x_vf_flr_clnup_hw(bp, vf);
+
+	/* release VF resources */
+	bnx2x_vf_free_resc(bp, vf);
+
+	/* re-open the mailbox */
+	bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+	return;
+out:
+	BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+		  vf->abs_vfid, i, rc);
+}
+
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
+{
+	struct bnx2x_virtf *vf;
+	int i;
+
+	for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+		/* VF should be RESET & in FLR cleanup states */
+		if (bnx2x_vf(bp, i, state) != VF_RESET ||
+		    !bnx2x_vf(bp, i, flr_clnup_stage))
+			continue;
+
+		DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+		   i, BNX2X_NR_VIRTFN(bp));
+
+		vf = BP_VF(bp, i);
+
+		/* lock the vf pf channel */
+		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+
+		/* invoke the VF FLR SM */
+		bnx2x_vf_flr(bp, vf);
+
+		/* mark the VF to be ACKED and continue */
+		vf->flr_clnup_stage = false;
+		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+	}
+
+	/* Acknowledge the handled VFs.
+	 * we are acknowledge all the vfs which an flr was requested for, even
+	 * if amongst them there are such that we never opened, since the mcp
+	 * will interrupt us immediately again if we only ack some of the bits,
+	 * resulting in an endless loop. This can happen for example in KVM
+	 * where an 'all ones' flr request is sometimes given by hyper visor
+	 */
+	DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
+	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+	for (i = 0; i < FLRD_VFS_DWORDS; i++)
+		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
+			  bp->vfdb->flrd_vfs[i]);
+
+	bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
+
+	/* clear the acked bits - better yet if the MCP implemented
+	 * write to clear semantics
+	 */
+	for (i = 0; i < FLRD_VFS_DWORDS; i++)
+		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
+}
+
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
+{
+	int i;
+
+	/* Read FLR'd VFs */
+	for (i = 0; i < FLRD_VFS_DWORDS; i++)
+		bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
+
+	DP(BNX2X_MSG_MCP,
+	   "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
+	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+
+	for_each_vf(bp, i) {
+		struct bnx2x_virtf *vf = BP_VF(bp, i);
+		u32 reset = 0;
+
+		if (vf->abs_vfid < 32)
+			reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
+		else
+			reset = bp->vfdb->flrd_vfs[1] &
+				(1 << (vf->abs_vfid - 32));
+
+		if (reset) {
+			/* set as reset and ready for cleanup */
+			vf->state = VF_RESET;
+			vf->flr_clnup_stage = true;
+
+			DP(BNX2X_MSG_IOV,
+			   "Initiating Final cleanup for VF %d\n",
+			   vf->abs_vfid);
+		}
+	}
+
+	/* do the FLR cleanup for all marked VFs*/
+	bnx2x_vf_flr_clnup(bp);
+}
+
+/* IOV global initialization routines  */
+void bnx2x_iov_init_dq(struct bnx2x *bp)
+{
+	if (!IS_SRIOV(bp))
+		return;
+
+	/* Set the DQ such that the CID reflect the abs_vfid */
+	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
+	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
+
+	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
+	 * the PF L2 queues
+	 */
+	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
+
+	/* The VF window size is the log2 of the max number of CIDs per VF */
+	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
+
+	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
+	 * the Pf doorbell size although the 2 are independent.
+	 */
+	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
+
+	/* No security checks for now -
+	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
+	 * CID range 0 - 0x1ffff
+	 */
+	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
+	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
+	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
+	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
+
+	/* set the VF doorbell threshold. This threshold represents the amount
+	 * of doorbells allowed in the main DORQ fifo for a specific VF.
+	 */
+	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
+}
+
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
+{
+	if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+		REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+}
+
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
+{
+	struct pci_dev *dev = bp->pdev;
+	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+	return dev->bus->number + ((dev->devfn + iov->offset +
+				    iov->stride * vfid) >> 8);
+}
+
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
+{
+	struct pci_dev *dev = bp->pdev;
+	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
+}
+
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int i, n;
+	struct pci_dev *dev = bp->pdev;
+	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
+		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
+		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
+
+		size /= iov->total;
+		vf->bars[n].bar = start + size * vf->abs_vfid;
+		vf->bars[n].size = size;
+	}
+}
+
+static int bnx2x_ari_enabled(struct pci_dev *dev)
+{
+	return dev->bus->self && dev->bus->self->ari_enabled;
+}
+
+static int
+bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
+{
+	int sb_id;
+	u32 val;
+	u8 fid, current_pf = 0;
+
+	/* IGU in normal mode - read CAM */
+	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
+		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
+		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+			continue;
+		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
+		if (fid & IGU_FID_ENCODE_IS_PF)
+			current_pf = fid & IGU_FID_PF_NUM_MASK;
+		else if (current_pf == BP_FUNC(bp))
+			bnx2x_vf_set_igu_info(bp, sb_id,
+					      (fid & IGU_FID_VF_NUM_MASK));
+		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
+		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
+		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
+		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
+		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
+	}
+	DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
+	return BP_VFDB(bp)->vf_sbs_pool;
+}
+
+static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
+{
+	if (bp->vfdb) {
+		kfree(bp->vfdb->vfqs);
+		kfree(bp->vfdb->vfs);
+		kfree(bp->vfdb);
+	}
+	bp->vfdb = NULL;
+}
+
+static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+	int pos;
+	struct pci_dev *dev = bp->pdev;
+
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos) {
+		BNX2X_ERR("failed to find SRIOV capability in device\n");
+		return -ENODEV;
+	}
+
+	iov->pos = pos;
+	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
+	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
+	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
+	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+	return 0;
+}
+
+static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+	u32 val;
+
+	/* read the SRIOV capability structure
+	 * The fields can be read via configuration read or
+	 * directly from the device (starting at offset PCICFG_OFFSET)
+	 */
+	if (bnx2x_sriov_pci_cfg_info(bp, iov))
+		return -ENODEV;
+
+	/* get the number of SRIOV bars */
+	iov->nres = 0;
+
+	/* read the first_vfid */
+	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
+	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
+			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
+
+	DP(BNX2X_MSG_IOV,
+	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+	   BP_FUNC(bp),
+	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
+	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+	return 0;
+}
+
+/* must be called after PF bars are mapped */
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+		       int num_vfs_param)
+{
+	int err, i;
+	struct bnx2x_sriov *iov;
+	struct pci_dev *dev = bp->pdev;
+
+	bp->vfdb = NULL;
+
+	/* verify is pf */
+	if (IS_VF(bp))
+		return 0;
+
+	/* verify sriov capability is present in configuration space */
+	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
+		return 0;
+
+	/* verify chip revision */
+	if (CHIP_IS_E1x(bp))
+		return 0;
+
+	/* check if SRIOV support is turned off */
+	if (!num_vfs_param)
+		return 0;
+
+	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
+	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
+		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
+			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
+		return 0;
+	}
+
+	/* SRIOV can be enabled only with MSIX */
+	if (int_mode_param == BNX2X_INT_MODE_MSI ||
+	    int_mode_param == BNX2X_INT_MODE_INTX) {
+		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+		return 0;
+	}
+
+	err = -EIO;
+	/* verify ari is enabled */
+	if (!bnx2x_ari_enabled(bp->pdev)) {
+		BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+		return 0;
+	}
+
+	/* verify igu is in normal mode */
+	if (CHIP_INT_MODE_IS_BC(bp)) {
+		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
+		return 0;
+	}
+
+	/* allocate the vfs database */
+	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+	if (!bp->vfdb) {
+		BNX2X_ERR("failed to allocate vf database\n");
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	/* get the sriov info - Linux already collected all the pertinent
+	 * information, however the sriov structure is for the private use
+	 * of the pci module. Also we want this information regardless
+	 * of the hyper-visor.
+	 */
+	iov = &(bp->vfdb->sriov);
+	err = bnx2x_sriov_info(bp, iov);
+	if (err)
+		goto failed;
+
+	/* SR-IOV capability was enabled but there are no VFs*/
+	if (iov->total == 0)
+		goto failed;
+
+	iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+	DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+	   num_vfs_param, iov->nr_virtfn);
+
+	/* allocate the vf array */
+	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
+				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+	if (!bp->vfdb->vfs) {
+		BNX2X_ERR("failed to allocate vf array\n");
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
+	for_each_vf(bp, i) {
+		bnx2x_vf(bp, i, index) = i;
+		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
+		bnx2x_vf(bp, i, state) = VF_FREE;
+		mutex_init(&bnx2x_vf(bp, i, op_mutex));
+		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+	}
+
+	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
+	if (!bnx2x_get_vf_igu_cam_info(bp)) {
+		BNX2X_ERR("No entries in IGU CAM for vfs\n");
+		err = -EINVAL;
+		goto failed;
+	}
+
+	/* allocate the queue arrays for all VFs */
+	bp->vfdb->vfqs = kzalloc(
+		BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+		GFP_KERNEL);
+
+	if (!bp->vfdb->vfqs) {
+		BNX2X_ERR("failed to allocate vf queue array\n");
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	/* Prepare the VFs event synchronization mechanism */
+	mutex_init(&bp->vfdb->event_mutex);
+
+	mutex_init(&bp->vfdb->bulletin_mutex);
+
+	if (SHMEM2_HAS(bp, sriov_switch_mode))
+		SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
+	return 0;
+failed:
+	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
+	__bnx2x_iov_free_vfdb(bp);
+	return err;
+}
+
+void bnx2x_iov_remove_one(struct bnx2x *bp)
+{
+	int vf_idx;
+
+	/* if SRIOV is not enabled there's nothing to do */
+	if (!IS_SRIOV(bp))
+		return;
+
+	bnx2x_disable_sriov(bp);
+
+	/* disable access to all VFs */
+	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
+		bnx2x_pretend_func(bp,
+				   HW_VF_HANDLE(bp,
+						bp->vfdb->sriov.first_vf_in_pf +
+						vf_idx));
+		DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
+		   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
+		bnx2x_vf_enable_internal(bp, 0);
+		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+	}
+
+	/* free vf database */
+	__bnx2x_iov_free_vfdb(bp);
+}
+
+void bnx2x_iov_free_mem(struct bnx2x *bp)
+{
+	int i;
+
+	if (!IS_SRIOV(bp))
+		return;
+
+	/* free vfs hw contexts */
+	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+		struct hw_dma *cxt = &bp->vfdb->context[i];
+		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
+	}
+
+	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
+		       BP_VFDB(bp)->sp_dma.mapping,
+		       BP_VFDB(bp)->sp_dma.size);
+
+	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
+		       BP_VF_MBX_DMA(bp)->mapping,
+		       BP_VF_MBX_DMA(bp)->size);
+
+	BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
+		       BP_VF_BULLETIN_DMA(bp)->mapping,
+		       BP_VF_BULLETIN_DMA(bp)->size);
+}
+
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
+{
+	size_t tot_size;
+	int i, rc = 0;
+
+	if (!IS_SRIOV(bp))
+		return rc;
+
+	/* allocate vfs hw contexts */
+	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
+		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
+
+	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
+		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
+
+		if (cxt->size) {
+			cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+			if (!cxt->addr)
+				goto alloc_mem_err;
+		} else {
+			cxt->addr = NULL;
+			cxt->mapping = 0;
+		}
+		tot_size -= cxt->size;
+	}
+
+	/* allocate vfs ramrods dma memory - client_init and set_mac */
+	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
+	BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+						   tot_size);
+	if (!BP_VFDB(bp)->sp_dma.addr)
+		goto alloc_mem_err;
+	BP_VFDB(bp)->sp_dma.size = tot_size;
+
+	/* allocate mailboxes */
+	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
+	BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+						  tot_size);
+	if (!BP_VF_MBX_DMA(bp)->addr)
+		goto alloc_mem_err;
+
+	BP_VF_MBX_DMA(bp)->size = tot_size;
+
+	/* allocate local bulletin boards */
+	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
+	BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+						       tot_size);
+	if (!BP_VF_BULLETIN_DMA(bp)->addr)
+		goto alloc_mem_err;
+
+	BP_VF_BULLETIN_DMA(bp)->size = tot_size;
+
+	return 0;
+
+alloc_mem_err:
+	return -ENOMEM;
+}
+
+static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			   struct bnx2x_vf_queue *q)
+{
+	u8 cl_id = vfq_cl_id(vf, q);
+	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+	unsigned long q_type = 0;
+
+	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+	/* Queue State object */
+	bnx2x_init_queue_obj(bp, &q->sp_obj,
+			     cl_id, &q->cid, 1, func_id,
+			     bnx2x_vf_sp(bp, vf, q_data),
+			     bnx2x_vf_sp_map(bp, vf, q_data),
+			     q_type);
+
+	/* sp indication is set only when vlan/mac/etc. are initialized */
+	q->sp_initialized = false;
+
+	DP(BNX2X_MSG_IOV,
+	   "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+	   vf->abs_vfid, q->sp_obj.func_id, q->cid);
+}
+
+static int bnx2x_max_speed_cap(struct bnx2x *bp)
+{
+	u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
+
+	if (supported &
+	    (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
+		return 20000;
+
+	return 10000; /* assume lowest supported speed is 10G */
+}
+
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
+{
+	struct bnx2x_link_report_data *state = &bp->last_reported_link;
+	struct pf_vf_bulletin_content *bulletin;
+	struct bnx2x_virtf *vf;
+	bool update = true;
+	int rc = 0;
+
+	/* sanity and init */
+	rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
+	if (rc)
+		return rc;
+
+	mutex_lock(&bp->vfdb->bulletin_mutex);
+
+	if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
+		bulletin->valid_bitmap |= 1 << LINK_VALID;
+
+		bulletin->link_speed = state->line_speed;
+		bulletin->link_flags = 0;
+		if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+			     &state->link_report_flags))
+			bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+		if (test_bit(BNX2X_LINK_REPORT_FD,
+			     &state->link_report_flags))
+			bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
+		if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+			     &state->link_report_flags))
+			bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
+		if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+			     &state->link_report_flags))
+			bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
+	} else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
+		   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+		bulletin->valid_bitmap |= 1 << LINK_VALID;
+		bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+	} else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
+		   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+		bulletin->valid_bitmap |= 1 << LINK_VALID;
+		bulletin->link_speed = bnx2x_max_speed_cap(bp);
+		bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
+	} else {
+		update = false;
+	}
+
+	if (update) {
+		DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
+		   "vf %d mode %u speed %d flags %x\n", idx,
+		   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
+
+		/* Post update on VF's bulletin board */
+		rc = bnx2x_post_vf_bulletin(bp, idx);
+		if (rc) {
+			BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&bp->vfdb->bulletin_mutex);
+	return rc;
+}
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_virtf *vf = BP_VF(bp, idx);
+
+	if (!vf)
+		return -EINVAL;
+
+	if (vf->link_cfg == link_state)
+		return 0; /* nothing todo */
+
+	vf->link_cfg = link_state;
+
+	return bnx2x_iov_link_update_vf(bp, idx);
+}
+
+void bnx2x_iov_link_update(struct bnx2x *bp)
+{
+	int vfid;
+
+	if (!IS_SRIOV(bp))
+		return;
+
+	for_each_vf(bp, vfid)
+		bnx2x_iov_link_update_vf(bp, vfid);
+}
+
+/* called by bnx2x_nic_load */
+int bnx2x_iov_nic_init(struct bnx2x *bp)
+{
+	int vfid;
+
+	if (!IS_SRIOV(bp)) {
+		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
+		return 0;
+	}
+
+	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
+
+	/* let FLR complete ... */
+	msleep(100);
+
+	/* initialize vf database */
+	for_each_vf(bp, vfid) {
+		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
+			BNX2X_CIDS_PER_VF;
+
+		union cdu_context *base_cxt = (union cdu_context *)
+			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+			(base_vf_cid & (ILT_PAGE_CIDS-1));
+
+		DP(BNX2X_MSG_IOV,
+		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
+		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
+		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
+
+		/* init statically provisioned resources */
+		bnx2x_iov_static_resc(bp, vf);
+
+		/* queues are initialized during VF-ACQUIRE */
+		vf->filter_state = 0;
+		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+		bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+				       vf_vlan_rules_cnt(vf));
+		bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+				       vf_mac_rules_cnt(vf));
+
+		/*  init mcast object - This object will be re-initialized
+		 *  during VF-ACQUIRE with the proper cl_id and cid.
+		 *  It needs to be initialized here so that it can be safely
+		 *  handled by a subsequent FLR flow.
+		 */
+		vf->mcast_list_len = 0;
+		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
+				     0xFF, 0xFF, 0xFF,
+				     bnx2x_vf_sp(bp, vf, mcast_rdata),
+				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+				     BNX2X_FILTER_MCAST_PENDING,
+				     &vf->filter_state,
+				     BNX2X_OBJ_TYPE_RX_TX);
+
+		/* set the mailbox message addresses */
+		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
+			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
+			MBX_MSG_ALIGNED_SIZE);
+
+		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
+			vfid * MBX_MSG_ALIGNED_SIZE;
+
+		/* Enable vf mailbox */
+		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+	}
+
+	/* Final VF init */
+	for_each_vf(bp, vfid) {
+		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+		/* fill in the BDF and bars */
+		vf->bus = bnx2x_vf_bus(bp, vfid);
+		vf->devfn = bnx2x_vf_devfn(bp, vfid);
+		bnx2x_vf_set_bars(bp, vf);
+
+		DP(BNX2X_MSG_IOV,
+		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
+		   vf->abs_vfid, vf->bus, vf->devfn,
+		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
+		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
+		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
+	}
+
+	return 0;
+}
+
+/* called by bnx2x_chip_cleanup */
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
+{
+	int i;
+
+	if (!IS_SRIOV(bp))
+		return 0;
+
+	/* release all the VFs */
+	for_each_vf(bp, i)
+		bnx2x_vf_release(bp, BP_VF(bp, i));
+
+	return 0;
+}
+
+/* called by bnx2x_init_hw_func, returns the next ilt line */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
+{
+	int i;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+	if (!IS_SRIOV(bp))
+		return line;
+
+	/* set vfs ilt lines */
+	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
+
+		ilt->lines[line+i].page = hw_cxt->addr;
+		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
+		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
+	}
+	return line + i;
+}
+
+static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
+{
+	return ((cid >= BNX2X_FIRST_VF_CID) &&
+		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
+}
+
+static
+void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
+					struct bnx2x_vf_queue *vfq,
+					union event_ring_elem *elem)
+{
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+
+	/* Always push next commands out, don't wait here */
+	set_bit(RAMROD_CONT, &ramrod_flags);
+
+	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+	case BNX2X_FILTER_MAC_PENDING:
+		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
+					   &ramrod_flags);
+		break;
+	case BNX2X_FILTER_VLAN_PENDING:
+		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
+					    &ramrod_flags);
+		break;
+	default:
+		BNX2X_ERR("Unsupported classification command: %d\n",
+			  elem->message.data.eth_event.echo);
+		return;
+	}
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+	else if (rc > 0)
+		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
+}
+
+static
+void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
+			       struct bnx2x_virtf *vf)
+{
+	struct bnx2x_mcast_ramrod_params rparam = {NULL};
+	int rc;
+
+	rparam.mcast_obj = &vf->mcast_obj;
+	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
+
+	/* If there are pending mcast commands - send them */
+	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+		if (rc < 0)
+			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+				  rc);
+	}
+}
+
+static
+void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
+				 struct bnx2x_virtf *vf)
+{
+	smp_mb__before_atomic();
+	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+	smp_mb__after_atomic();
+}
+
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+					   struct bnx2x_virtf *vf)
+{
+	vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
+}
+
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
+{
+	struct bnx2x_virtf *vf;
+	int qidx = 0, abs_vfid;
+	u8 opcode;
+	u16 cid = 0xffff;
+
+	if (!IS_SRIOV(bp))
+		return 1;
+
+	/* first get the cid - the only events we handle here are cfc-delete
+	 * and set-mac completion
+	 */
+	opcode = elem->message.opcode;
+
+	switch (opcode) {
+	case EVENT_RING_OPCODE_CFC_DEL:
+		cid = SW_CID((__force __le32)
+			     elem->message.data.cfc_del_event.cid);
+		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
+		break;
+	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+	case EVENT_RING_OPCODE_MULTICAST_RULES:
+	case EVENT_RING_OPCODE_FILTERS_RULES:
+	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+		cid = (elem->message.data.eth_event.echo &
+		       BNX2X_SWCID_MASK);
+		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
+		break;
+	case EVENT_RING_OPCODE_VF_FLR:
+		abs_vfid = elem->message.data.vf_flr_event.vf_id;
+		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
+		   abs_vfid);
+		goto get_vf;
+	case EVENT_RING_OPCODE_MALICIOUS_VF:
+		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
+		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+			  abs_vfid,
+			  elem->message.data.malicious_vf_event.err_id);
+		goto get_vf;
+	default:
+		return 1;
+	}
+
+	/* check if the cid is the VF range */
+	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
+		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
+		return 1;
+	}
+
+	/* extract vf and rxq index from vf_cid - relies on the following:
+	 * 1. vfid on cid reflects the true abs_vfid
+	 * 2. The max number of VFs (per path) is 64
+	 */
+	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
+	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+get_vf:
+	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+	if (!vf) {
+		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
+			  cid, abs_vfid);
+		return 0;
+	}
+
+	switch (opcode) {
+	case EVENT_RING_OPCODE_CFC_DEL:
+		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
+		   vf->abs_vfid, qidx);
+		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
+						       &vfq_get(vf,
+								qidx)->sp_obj,
+						       BNX2X_Q_CMD_CFC_DEL);
+		break;
+	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
+		   vf->abs_vfid, qidx);
+		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
+		break;
+	case EVENT_RING_OPCODE_MULTICAST_RULES:
+		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
+		   vf->abs_vfid, qidx);
+		bnx2x_vf_handle_mcast_eqe(bp, vf);
+		break;
+	case EVENT_RING_OPCODE_FILTERS_RULES:
+		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
+		   vf->abs_vfid, qidx);
+		bnx2x_vf_handle_filters_eqe(bp, vf);
+		break;
+	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+		DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+		   vf->abs_vfid, qidx);
+		bnx2x_vf_handle_rss_update_eqe(bp, vf);
+	case EVENT_RING_OPCODE_VF_FLR:
+	case EVENT_RING_OPCODE_MALICIOUS_VF:
+		/* Do nothing for now */
+		return 0;
+	}
+
+	return 0;
+}
+
+static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
+{
+	/* extract the vf from vf_cid - relies on the following:
+	 * 1. vfid on cid reflects the true abs_vfid
+	 * 2. The max number of VFs (per path) is 64
+	 */
+	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
+}
+
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+				struct bnx2x_queue_sp_obj **q_obj)
+{
+	struct bnx2x_virtf *vf;
+
+	if (!IS_SRIOV(bp))
+		return;
+
+	vf = bnx2x_vf_by_cid(bp, vf_cid);
+
+	if (vf) {
+		/* extract queue index from vf_cid - relies on the following:
+		 * 1. vfid on cid reflects the true abs_vfid
+		 * 2. The max number of VFs (per path) is 64
+		 */
+		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
+		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
+	} else {
+		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
+	}
+}
+
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
+{
+	int i;
+	int first_queue_query_index, num_queues_req;
+	dma_addr_t cur_data_offset;
+	struct stats_query_entry *cur_query_entry;
+	u8 stats_count = 0;
+	bool is_fcoe = false;
+
+	if (!IS_SRIOV(bp))
+		return;
+
+	if (!NO_FCOE(bp))
+		is_fcoe = true;
+
+	/* fcoe adds one global request and one queue request */
+	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
+	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
+		(is_fcoe ? 0 : 1);
+
+	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+	       "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+	       BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+	       first_queue_query_index + num_queues_req);
+
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
+		num_queues_req * sizeof(struct per_queue_stats);
+
+	cur_query_entry = &bp->fw_stats_req->
+		query[first_queue_query_index + num_queues_req];
+
+	for_each_vf(bp, i) {
+		int j;
+		struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+		if (vf->state != VF_ENABLED) {
+			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+			       "vf %d not enabled so no stats for it\n",
+			       vf->abs_vfid);
+			continue;
+		}
+
+		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+		for_each_vfq(vf, j) {
+			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+
+			dma_addr_t q_stats_addr =
+				vf->fw_stat_map + j * vf->stats_stride;
+
+			/* collect stats fro active queues only */
+			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
+			    BNX2X_Q_LOGICAL_STATE_STOPPED)
+				continue;
+
+			/* create stats query entry for this queue */
+			cur_query_entry->kind = STATS_TYPE_QUEUE;
+			cur_query_entry->index = vfq_stat_id(vf, rxq);
+			cur_query_entry->funcID =
+				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
+			cur_query_entry->address.hi =
+				cpu_to_le32(U64_HI(q_stats_addr));
+			cur_query_entry->address.lo =
+				cpu_to_le32(U64_LO(q_stats_addr));
+			DP(BNX2X_MSG_IOV,
+			   "added address %x %x for vf %d queue %d client %d\n",
+			   cur_query_entry->address.hi,
+			   cur_query_entry->address.lo, cur_query_entry->funcID,
+			   j, cur_query_entry->index);
+			cur_query_entry++;
+			cur_data_offset += sizeof(struct per_queue_stats);
+			stats_count++;
+
+			/* all stats are coalesced to the leading queue */
+			if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+				break;
+		}
+	}
+	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
+}
+
+/* VF API helpers */
+static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
+				u8 enable)
+{
+	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
+	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
+
+	REG_WR(bp, reg, val);
+}
+
+static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int i;
+
+	for_each_vfq(vf, i)
+		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
+}
+
+static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	u32 val;
+
+	/* clear the VF configuration - pretend */
+	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
+		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
+	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
+		     BNX2X_VF_MAX_QUEUES);
+}
+
+static
+int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			    struct vf_pf_resc_request *req_resc)
+{
+	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+	return ((req_resc->num_rxqs <= rxq_cnt) &&
+		(req_resc->num_txqs <= txq_cnt) &&
+		(req_resc->num_sbs <= vf_sb_count(vf))   &&
+		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
+		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+}
+
+/* CORE VF API */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		     struct vf_pf_resc_request *resc)
+{
+	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
+		BNX2X_CIDS_PER_VF;
+
+	union cdu_context *base_cxt = (union cdu_context *)
+		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+		(base_vf_cid & (ILT_PAGE_CIDS-1));
+	int i;
+
+	/* if state is 'acquired' the VF was not released or FLR'd, in
+	 * this case the returned resources match the acquired already
+	 * acquired resources. Verify that the requested numbers do
+	 * not exceed the already acquired numbers.
+	 */
+	if (vf->state == VF_ACQUIRED) {
+		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
+		   vf->abs_vfid);
+
+		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
+				  vf->abs_vfid);
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	/* Otherwise vf state must be 'free' or 'reset' */
+	if (vf->state != VF_FREE && vf->state != VF_RESET) {
+		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
+			  vf->abs_vfid, vf->state);
+		return -EINVAL;
+	}
+
+	/* static allocation:
+	 * the global maximum number are fixed per VF. Fail the request if
+	 * requested number exceed these globals
+	 */
+	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+		DP(BNX2X_MSG_IOV,
+		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
+		/* set the max resource in the vf */
+		return -ENOMEM;
+	}
+
+	/* Set resources counters - 0 request means max available */
+	vf_sb_count(vf) = resc->num_sbs;
+	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+	DP(BNX2X_MSG_IOV,
+	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
+	   vf_sb_count(vf), vf_rxq_count(vf),
+	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
+	   vf_vlan_rules_cnt(vf));
+
+	/* Initialize the queues */
+	if (!vf->vfqs) {
+		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
+		return -EINVAL;
+	}
+
+	for_each_vfq(vf, i) {
+		struct bnx2x_vf_queue *q = vfq_get(vf, i);
+
+		if (!q) {
+			BNX2X_ERR("q number %d was not allocated\n", i);
+			return -EINVAL;
+		}
+
+		q->index = i;
+		q->cxt = &((base_cxt + i)->eth);
+		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
+
+		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
+		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
+
+		/* init SP objects */
+		bnx2x_vfq_init(bp, vf, q);
+	}
+	vf->state = VF_ACQUIRED;
+	return 0;
+}
+
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
+{
+	struct bnx2x_func_init_params func_init = {0};
+	int i;
+
+	/* the sb resources are initialized at this point, do the
+	 * FW/HW initializations
+	 */
+	for_each_vf_sb(vf, i)
+		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
+			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
+
+	/* Sanity checks */
+	if (vf->state != VF_ACQUIRED) {
+		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
+		   vf->abs_vfid, vf->state);
+		return -EINVAL;
+	}
+
+	/* let FLR complete ... */
+	msleep(100);
+
+	/* FLR cleanup epilogue */
+	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
+		return -EBUSY;
+
+	/* reset IGU VF statistics: MSIX */
+	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
+
+	/* function setup */
+	func_init.pf_id = BP_FUNC(bp);
+	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
+	bnx2x_func_init(bp, &func_init);
+
+	/* Enable the vf */
+	bnx2x_vf_enable_access(bp, vf->abs_vfid);
+	bnx2x_vf_enable_traffic(bp, vf);
+
+	/* queue protection table */
+	for_each_vfq(vf, i)
+		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
+
+	vf->state = VF_ENABLED;
+
+	/* update vf bulletin board */
+	bnx2x_post_vf_bulletin(bp, vf->index);
+
+	return 0;
+}
+
+struct set_vf_state_cookie {
+	struct bnx2x_virtf *vf;
+	u8 state;
+};
+
+static void bnx2x_set_vf_state(void *cookie)
+{
+	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
+
+	p->vf->state = p->state;
+}
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int rc = 0, i;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	/* Close all queues */
+	for (i = 0; i < vf_rxq_count(vf); i++) {
+		rc = bnx2x_vf_queue_teardown(bp, vf, i);
+		if (rc)
+			goto op_err;
+	}
+
+	/* disable the interrupts */
+	DP(BNX2X_MSG_IOV, "disabling igu\n");
+	bnx2x_vf_igu_disable(bp, vf);
+
+	/* disable the VF */
+	DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+	bnx2x_vf_clr_qtbl(bp, vf);
+
+	/* need to make sure there are no outstanding stats ramrods which may
+	 * cause the device to access the VF's stats buffer which it will free
+	 * as soon as we return from the close flow.
+	 */
+	{
+		struct set_vf_state_cookie cookie;
+
+		cookie.vf = vf;
+		cookie.state = VF_ACQUIRED;
+		rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+		if (rc)
+			goto op_err;
+	}
+
+	DP(BNX2X_MSG_IOV, "set state to acquired\n");
+
+	return 0;
+op_err:
+	BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+	return rc;
+}
+
+/* VF release can be called either: 1. The VF was acquired but
+ * not enabled 2. the vf was enabled or in the process of being
+ * enabled
+ */
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
+	   vf->state == VF_FREE ? "Free" :
+	   vf->state == VF_ACQUIRED ? "Acquired" :
+	   vf->state == VF_ENABLED ? "Enabled" :
+	   vf->state == VF_RESET ? "Reset" :
+	   "Unknown");
+
+	switch (vf->state) {
+	case VF_ENABLED:
+		rc = bnx2x_vf_close(bp, vf);
+		if (rc)
+			goto op_err;
+		/* Fallthrough to release resources */
+	case VF_ACQUIRED:
+		DP(BNX2X_MSG_IOV, "about to free resources\n");
+		bnx2x_vf_free_resc(bp, vf);
+		break;
+
+	case VF_FREE:
+	case VF_RESET:
+	default:
+		break;
+	}
+	return 0;
+op_err:
+	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+	return rc;
+}
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			struct bnx2x_config_rss_params *rss)
+{
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+	set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+	return bnx2x_config_rss(bp, rss);
+}
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			struct vfpf_tpa_tlv *tlv,
+			struct bnx2x_queue_update_tpa_params *params)
+{
+	aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+	struct bnx2x_queue_state_params qstate;
+	int qid, rc = 0;
+
+	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+	/* Set ramrod params */
+	memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+	memcpy(&qstate.params.update_tpa, params,
+	       sizeof(struct bnx2x_queue_update_tpa_params));
+	qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+	set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+
+	for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+		qstate.params.update_tpa.sge_map = sge_addr[qid];
+		DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+		   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+		   U64_LO(sge_addr[qid]));
+		rc = bnx2x_queue_state_change(bp, &qstate);
+		if (rc) {
+			BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+				  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+				  vf->abs_vfid, qid);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+/* VF release ~ VF close + VF release-resources
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
+	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+
+	rc = bnx2x_vf_free(bp, vf);
+	if (rc)
+		WARN(rc,
+		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
+		     vf->abs_vfid, rc);
+	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+	return rc;
+}
+
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			      enum channel_tlvs tlv)
+{
+	/* we don't lock the channel for unsupported tlvs */
+	if (!bnx2x_tlv_supported(tlv)) {
+		BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+		return;
+	}
+
+	/* lock the channel */
+	mutex_lock(&vf->op_mutex);
+
+	/* record the locking op */
+	vf->op_current = tlv;
+
+	/* log the lock */
+	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
+	   vf->abs_vfid, tlv);
+}
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				enum channel_tlvs expected_tlv)
+{
+	enum channel_tlvs current_tlv;
+
+	if (!vf) {
+		BNX2X_ERR("VF was %p\n", vf);
+		return;
+	}
+
+	current_tlv = vf->op_current;
+
+	/* we don't unlock the channel for unsupported tlvs */
+	if (!bnx2x_tlv_supported(expected_tlv))
+		return;
+
+	WARN(expected_tlv != vf->op_current,
+	     "lock mismatch: expected %d found %d", expected_tlv,
+	     vf->op_current);
+
+	/* record the locking op */
+	vf->op_current = CHANNEL_TLV_NONE;
+
+	/* lock the channel */
+	mutex_unlock(&vf->op_mutex);
+
+	/* log the unlock */
+	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
+	   vf->abs_vfid, current_tlv);
+}
+
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+	struct bnx2x_queue_state_params q_params;
+	u32 prev_flags;
+	int i, rc;
+
+	/* Verify changes are needed and record current Tx switching state */
+	prev_flags = bp->flags;
+	if (enable)
+		bp->flags |= TX_SWITCHING;
+	else
+		bp->flags &= ~TX_SWITCHING;
+	if (prev_flags == bp->flags)
+		return 0;
+
+	/* Verify state enables the sending of queue ramrods */
+	if ((bp->state != BNX2X_STATE_OPEN) ||
+	    (bnx2x_get_q_logical_state(bp,
+				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+	     BNX2X_Q_LOGICAL_STATE_ACTIVE))
+		return 0;
+
+	/* send q. update ramrod to configure Tx switching */
+	memset(&q_params, 0, sizeof(q_params));
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+	q_params.cmd = BNX2X_Q_CMD_UPDATE;
+	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+		  &q_params.params.update.update_flags);
+	if (enable)
+		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+			  &q_params.params.update.update_flags);
+	else
+		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+			    &q_params.params.update.update_flags);
+
+	/* send the ramrod on all the queues of the PF */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		/* Set the appropriate Queue object */
+		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to configure Tx switching\n");
+			return rc;
+		}
+	}
+
+	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+	return 0;
+}
+
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
+{
+	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+	if (!IS_SRIOV(bp)) {
+		BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+	   num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+	/* HW channel is only operational when PF is up */
+	if (bp->state != BNX2X_STATE_OPEN) {
+		BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
+		return -EINVAL;
+	}
+
+	/* we are always bound by the total_vfs in the configuration space */
+	if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+		BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+			  num_vfs_param, BNX2X_NR_VIRTFN(bp));
+		num_vfs_param = BNX2X_NR_VIRTFN(bp);
+	}
+
+	bp->requested_nr_virtfn = num_vfs_param;
+	if (num_vfs_param == 0) {
+		bnx2x_set_pf_tx_switching(bp, false);
+		bnx2x_disable_sriov(bp);
+		return 0;
+	} else {
+		return bnx2x_enable_sriov(bp);
+	}
+}
+
+#define IGU_ENTRY_SIZE 4
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+	int rc = 0, req_vfs = bp->requested_nr_virtfn;
+	int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+	u32 igu_entry, address;
+	u16 num_vf_queues;
+
+	if (req_vfs == 0)
+		return 0;
+
+	first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+	/* statically distribute vf sb pool between VFs */
+	num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+			      BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+	/* zero previous values learned from igu cam */
+	for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+		vf->sb_count = 0;
+		vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+	}
+	bp->vfdb->vf_sbs_pool = 0;
+
+	/* prepare IGU cam */
+	sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+	address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+		for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+			igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+				vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+				IGU_REG_MAPPING_MEMORY_VALID;
+			DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+			   sb_idx, vf_idx);
+			REG_WR(bp, address, igu_entry);
+			sb_idx++;
+			address += IGU_ENTRY_SIZE;
+		}
+	}
+
+	/* Reinitialize vf database according to igu cam */
+	bnx2x_get_vf_igu_cam_info(bp);
+
+	DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+	   BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+	qcount = 0;
+	for_each_vf(bp, vf_idx) {
+		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+		/* set local queue arrays */
+		vf->vfqs = &bp->vfdb->vfqs[qcount];
+		qcount += vf_sb_count(vf);
+		bnx2x_iov_static_resc(bp, vf);
+	}
+
+	/* prepare msix vectors in VF configuration space - the value in the
+	 * PCI configuration space should be the index of the last entry,
+	 * namely one less than the actual size of the table
+	 */
+	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+		       num_vf_queues - 1);
+		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+		   vf_idx, num_vf_queues - 1);
+	}
+	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+	/* enable sriov. This will probe all the VFs, and consequentially cause
+	 * the "acquire" messages to appear on the VF PF channel.
+	 */
+	DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+	bnx2x_disable_sriov(bp);
+
+	rc = bnx2x_set_pf_tx_switching(bp, true);
+	if (rc)
+		return rc;
+
+	rc = pci_enable_sriov(bp->pdev, req_vfs);
+	if (rc) {
+		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+		return rc;
+	}
+	DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+	return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+	int vfidx;
+	struct pf_vf_bulletin_content *bulletin;
+
+	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+	for_each_vf(bp, vfidx) {
+		bulletin = BP_VF_BULLETIN(bp, vfidx);
+		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+	}
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
+	if (pci_vfs_assigned(bp->pdev)) {
+		DP(BNX2X_MSG_IOV,
+		   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+		return;
+	}
+
+	pci_disable_sriov(bp->pdev);
+}
+
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+			    struct bnx2x_virtf **vf,
+			    struct pf_vf_bulletin_content **bulletin,
+			    bool test_queue)
+{
+	if (bp->state != BNX2X_STATE_OPEN) {
+		BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
+		return -EINVAL;
+	}
+
+	if (!IS_SRIOV(bp)) {
+		BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
+		return -EINVAL;
+	}
+
+	if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+		BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+			  vfidx, BNX2X_NR_VIRTFN(bp));
+		return -EINVAL;
+	}
+
+	/* init members */
+	*vf = BP_VF(bp, vfidx);
+	*bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+	if (!*vf) {
+		BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
+		return -EINVAL;
+	}
+
+	if (test_queue && !(*vf)->vfqs) {
+		BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+			  vfidx);
+		return -EINVAL;
+	}
+
+	if (!*bulletin) {
+		BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
+			  vfidx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+			struct ifla_vf_info *ivi)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_virtf *vf = NULL;
+	struct pf_vf_bulletin_content *bulletin = NULL;
+	struct bnx2x_vlan_mac_obj *mac_obj;
+	struct bnx2x_vlan_mac_obj *vlan_obj;
+	int rc;
+
+	/* sanity and init */
+	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+	if (rc)
+		return rc;
+
+	mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+	if (!mac_obj || !vlan_obj) {
+		BNX2X_ERR("VF partially initialized\n");
+		return -EINVAL;
+	}
+
+	ivi->vf = vfidx;
+	ivi->qos = 0;
+	ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+	ivi->min_tx_rate = 0;
+	ivi->spoofchk = 1; /*always enabled */
+	if (vf->state == VF_ENABLED) {
+		/* mac and vlan are in vlan_mac objects */
+		if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+			mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+						0, ETH_ALEN);
+			vlan_obj->get_n_elements(bp, vlan_obj, 1,
+						 (u8 *)&ivi->vlan, 0,
+						 VLAN_HLEN);
+		}
+	} else {
+		mutex_lock(&bp->vfdb->bulletin_mutex);
+		/* mac */
+		if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+			/* mac configured by ndo so its in bulletin board */
+			memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+		else
+			/* function has not been loaded yet. Show mac as 0s */
+			eth_zero_addr(ivi->mac);
+
+		/* vlan */
+		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+			/* vlan configured by ndo so its in bulletin board */
+			memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+		else
+			/* function has not been loaded yet. Show vlans as 0s */
+			memset(&ivi->vlan, 0, VLAN_HLEN);
+
+		mutex_unlock(&bp->vfdb->bulletin_mutex);
+	}
+
+	return 0;
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ *    supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ *    will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ *    mac on VF's bulletin board right now. VF will configure this mac when it
+ *    is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ *    queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board.
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc, q_logical_state;
+	struct bnx2x_virtf *vf = NULL;
+	struct pf_vf_bulletin_content *bulletin = NULL;
+
+	if (!is_valid_ether_addr(mac)) {
+		BNX2X_ERR("mac address invalid\n");
+		return -EINVAL;
+	}
+
+	/* sanity and init */
+	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&bp->vfdb->bulletin_mutex);
+
+	/* update PF's copy of the VF's bulletin. Will no longer accept mac
+	 * configuration requests from vf unless match this mac
+	 */
+	bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+	memcpy(bulletin->mac, mac, ETH_ALEN);
+
+	/* Post update on VF's bulletin board */
+	rc = bnx2x_post_vf_bulletin(bp, vfidx);
+
+	/* release lock before checking return code */
+	mutex_unlock(&bp->vfdb->bulletin_mutex);
+
+	if (rc) {
+		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+		return rc;
+	}
+
+	q_logical_state =
+		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
+	if (vf->state == VF_ENABLED &&
+	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+		/* configure the mac in device on this vf's queue */
+		unsigned long ramrod_flags = 0;
+		struct bnx2x_vlan_mac_obj *mac_obj;
+
+		/* User should be able to see failure reason in system logs */
+		if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+			return -EINVAL;
+
+		/* must lock vfpf channel to protect against vf flows */
+		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+		/* remove existing eth macs */
+		mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+		if (rc) {
+			BNX2X_ERR("failed to delete eth macs\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		/* remove existing uc list macs */
+		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+		if (rc) {
+			BNX2X_ERR("failed to delete uc_list macs\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		/* configure the new mac to device */
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+				  BNX2X_ETH_MAC, &ramrod_flags);
+
+out:
+		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+	}
+
+	return rc;
+}
+
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf, bool accept)
+{
+	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+	unsigned long accept_flags;
+
+	/* need to remove/add the VF's accept_any_vlan bit */
+	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+	if (accept)
+		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+	else
+		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+			      accept_flags);
+	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+	bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    u16 vlan, bool add)
+{
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+
+	/* configure the new vlan to device */
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.user_req.u.vlan.vlan = vlan;
+	ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+					: BNX2X_VLAN_MAC_DEL;
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+	if (rc) {
+		BNX2X_ERR("failed to configure vlan\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+	struct pf_vf_bulletin_content *bulletin = NULL;
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_mac_obj *vlan_obj;
+	unsigned long vlan_mac_flags = 0;
+	unsigned long ramrod_flags = 0;
+	struct bnx2x_virtf *vf = NULL;
+	int i, rc;
+
+	if (vlan > 4095) {
+		BNX2X_ERR("illegal vlan value %d\n", vlan);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+	   vfidx, vlan, 0);
+
+	/* sanity and init */
+	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+	if (rc)
+		return rc;
+
+	/* update PF's copy of the VF's bulletin. No point in posting the vlan
+	 * to the VF since it doesn't have anything to do with it. But it useful
+	 * to store it here in case the VF is not up yet and we can only
+	 * configure the vlan later when it does. Treat vlan id 0 as remove the
+	 * Host tag.
+	 */
+	mutex_lock(&bp->vfdb->bulletin_mutex);
+
+	if (vlan > 0)
+		bulletin->valid_bitmap |= 1 << VLAN_VALID;
+	else
+		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
+	bulletin->vlan = vlan;
+
+	/* Post update on VF's bulletin board */
+	rc = bnx2x_post_vf_bulletin(bp, vfidx);
+	if (rc)
+		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+	mutex_unlock(&bp->vfdb->bulletin_mutex);
+
+	/* is vf initialized and queue set up? */
+	if (vf->state != VF_ENABLED ||
+	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
+		return rc;
+
+	/* User should be able to see error in system logs */
+	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+		return -EINVAL;
+
+	/* must lock vfpf channel to protect against vf flows */
+	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+	/* remove existing vlans */
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+				  &ramrod_flags);
+	if (rc) {
+		BNX2X_ERR("failed to delete vlans\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* clear accept_any_vlan when HV forces vlan, otherwise
+	 * according to VF capabilities
+	 */
+	if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+		bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
+
+	rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+	if (rc)
+		goto out;
+
+	/* send queue update ramrods to configure default vlan and
+	 * silent vlan removal
+	 */
+	for_each_vfq(vf, i) {
+		struct bnx2x_queue_state_params q_params = {NULL};
+		struct bnx2x_queue_update_params *update_params;
+
+		q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+		/* validate the Q is UP */
+		if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+		    BNX2X_Q_LOGICAL_STATE_ACTIVE)
+			continue;
+
+		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+		q_params.cmd = BNX2X_Q_CMD_UPDATE;
+		update_params = &q_params.params.update;
+		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			  &update_params->update_flags);
+		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			  &update_params->update_flags);
+		if (vlan == 0) {
+			/* if vlan is 0 then we want to leave the VF traffic
+			 * untagged, and leave the incoming traffic untouched
+			 * (i.e. do not remove any vlan tags).
+			 */
+			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				    &update_params->update_flags);
+			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				    &update_params->update_flags);
+		} else {
+			/* configure default vlan to vf queue and set silent
+			 * vlan removal (the vf remains unaware of this vlan).
+			 */
+			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				  &update_params->update_flags);
+			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				  &update_params->update_flags);
+			update_params->def_vlan = vlan;
+			update_params->silent_removal_value =
+				vlan & VLAN_VID_MASK;
+			update_params->silent_removal_mask = VLAN_VID_MASK;
+		}
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+				  i);
+			goto out;
+		}
+	}
+out:
+	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+	if (rc)
+		DP(BNX2X_MSG_IOV,
+		   "updated VF[%d] vlan configuration (vlan = %d)\n",
+		   vfidx, vlan);
+
+	return rc;
+}
+
+/* crc is the first field in the bulletin board. Compute the crc over the
+ * entire bulletin board excluding the crc field itself. Use the length field
+ * as the Bulletin Board was posted by a PF with possibly a different version
+ * from the vf which will sample it. Therefore, the length is computed by the
+ * PF and then used blindly by the VF.
+ */
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
+{
+	return crc32(BULLETIN_CRC_SEED,
+		 ((u8 *)bulletin) + sizeof(bulletin->crc),
+		 bulletin->length - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+	struct pf_vf_bulletin_content *bulletin;
+	int attempts;
+
+	/* sampling structure in mid post may result with corrupted data
+	 * validate crc to ensure coherency.
+	 */
+	for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+		u32 crc;
+
+		/* sample the bulletin board */
+		memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
+		       sizeof(union pf_vf_bulletin));
+
+		crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
+
+		if (bp->shadow_bulletin.content.crc == crc)
+			break;
+
+		BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
+			  bp->shadow_bulletin.content.crc, crc);
+	}
+
+	if (attempts >= BULLETIN_ATTEMPTS) {
+		BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+			  attempts);
+		return PFVF_BULLETIN_CRC_ERR;
+	}
+	bulletin = &bp->shadow_bulletin.content;
+
+	/* bulletin board hasn't changed since last sample */
+	if (bp->old_bulletin.version == bulletin->version)
+		return PFVF_BULLETIN_UNCHANGED;
+
+	/* the mac address in bulletin board is valid and is new */
+	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
+	    !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
+		/* update new mac to net device */
+		memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+	}
+
+	if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
+		DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
+		   bulletin->link_speed, bulletin->link_flags);
+
+		bp->vf_link_vars.line_speed = bulletin->link_speed;
+		bp->vf_link_vars.link_report_flags = 0;
+		/* Link is down */
+		if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
+			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+				  &bp->vf_link_vars.link_report_flags);
+		/* Full DUPLEX */
+		if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
+			__set_bit(BNX2X_LINK_REPORT_FD,
+				  &bp->vf_link_vars.link_report_flags);
+		/* Rx Flow Control is ON */
+		if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
+			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+				  &bp->vf_link_vars.link_report_flags);
+		/* Tx Flow Control is ON */
+		if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
+			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+				  &bp->vf_link_vars.link_report_flags);
+		__bnx2x_link_report(bp);
+	}
+
+	/* copy new bulletin board to bp */
+	memcpy(&bp->old_bulletin, bulletin,
+	       sizeof(struct pf_vf_bulletin_content));
+
+	return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_timer_sriov(struct bnx2x *bp)
+{
+	bnx2x_sample_bulletin(bp);
+
+	/* if channel is down we need to self destruct */
+	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+				       BNX2X_MSG_IOV);
+}
+
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+	/* vf doorbells are embedded within the regview */
+	return bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+		       sizeof(struct bnx2x_vf_mbx_msg));
+	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+		       sizeof(union pf_vf_bulletin));
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+	mutex_init(&bp->vf2pf_mutex);
+
+	/* allocate vf2pf mailbox for vf to pf channel */
+	bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+					 sizeof(struct bnx2x_vf_mbx_msg));
+	if (!bp->vf2pf_mbox)
+		goto alloc_mem_err;
+
+	/* allocate pf 2 vf bulletin board */
+	bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+					     sizeof(union pf_vf_bulletin));
+	if (!bp->pf2vf_bulletin)
+		goto alloc_mem_err;
+
+	bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
+
+	return 0;
+
+alloc_mem_err:
+	bnx2x_vf_pci_dealloc(bp);
+	return -ENOMEM;
+}
+
+void bnx2x_iov_channel_down(struct bnx2x *bp)
+{
+	int vf_idx;
+	struct pf_vf_bulletin_content *bulletin;
+
+	if (!IS_SRIOV(bp))
+		return;
+
+	for_each_vf(bp, vf_idx) {
+		/* locate this VFs bulletin board and update the channel down
+		 * bit
+		 */
+		bulletin = BP_VF_BULLETIN(bp, vf_idx);
+		bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
+
+		/* update vf bulletin board */
+		bnx2x_post_vf_bulletin(bp, vf_idx);
+	}
+}
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+			       &bp->iov_task_state))
+		bnx2x_vf_handle_flr_event(bp);
+
+	if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+			       &bp->iov_task_state))
+		bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+	smp_mb__before_atomic();
+	set_bit(flag, &bp->iov_task_state);
+	smp_mb__after_atomic();
+	DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+	queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
new file mode 100644
index 0000000..670a581
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -0,0 +1,628 @@
+/* bnx2x_sriov.h: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ *	       Ariel Elior <ariel.elior@qlogic.com>
+ */
+#ifndef BNX2X_SRIOV_H
+#define BNX2X_SRIOV_H
+
+#include "bnx2x_vfpf.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+	   PFVF_BULLETIN_UNCHANGED,
+	   PFVF_BULLETIN_UPDATED,
+	   PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+extern struct workqueue_struct *bnx2x_iov_wq;
+
+/* The bnx2x device structure holds vfdb structure described below.
+ * The VF array is indexed by the relative vfid.
+ */
+#define BNX2X_VF_MAX_QUEUES		16
+#define BNX2X_VF_MAX_TPA_AGG_QUEUES	8
+
+struct bnx2x_sriov {
+	u32 first_vf_in_pf;
+
+	/* standard SRIOV capability fields, mostly for debugging */
+	int pos;		/* capability position */
+	int nres;		/* number of resources */
+	u32 cap;		/* SR-IOV Capabilities */
+	u16 ctrl;		/* SR-IOV Control */
+	u16 total;		/* total VFs associated with the PF */
+	u16 initial;		/* initial VFs associated with the PF */
+	u16 nr_virtfn;		/* number of VFs available */
+	u16 offset;		/* first VF Routing ID offset */
+	u16 stride;		/* following VF stride */
+	u32 pgsz;		/* page size for BAR alignment */
+	u8 link;		/* Function Dependency Link */
+};
+
+/* bars */
+struct bnx2x_vf_bar {
+	u64 bar;
+	u32 size;
+};
+
+struct bnx2x_vf_bar_info {
+	struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+	u8 nr_bars;
+};
+
+/* vf queue (used both for rx or tx) */
+struct bnx2x_vf_queue {
+	struct eth_context		*cxt;
+
+	/* MACs object */
+	struct bnx2x_vlan_mac_obj	mac_obj;
+
+	/* VLANs object */
+	struct bnx2x_vlan_mac_obj	vlan_obj;
+
+	/* VLAN-MACs object */
+	struct bnx2x_vlan_mac_obj	vlan_mac_obj;
+
+	unsigned long accept_flags;	/* last accept flags configured */
+
+	/* Queue Slow-path State object */
+	struct bnx2x_queue_sp_obj	sp_obj;
+
+	u32 cid;
+	u16 index;
+	u16 sb_idx;
+	bool is_leading;
+	bool sp_initialized;
+};
+
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
+ */
+struct bnx2x_vf_queue_construct_params {
+	struct bnx2x_queue_state_params		qstate;
+	struct bnx2x_queue_setup_params		prep_qsetup;
+};
+
+/* forward */
+struct bnx2x_virtf;
+
+/* VFOP definitions */
+
+struct bnx2x_vf_mac_vlan_filter {
+	int type;
+#define BNX2X_VF_FILTER_MAC	BIT(0)
+#define BNX2X_VF_FILTER_VLAN	BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+	(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
+
+	bool add;
+	u8 *mac;
+	u16 vid;
+};
+
+struct bnx2x_vf_mac_vlan_filters {
+	int count;
+	struct bnx2x_vf_mac_vlan_filter filters[];
+};
+
+/* vf context */
+struct bnx2x_virtf {
+	u16 cfg_flags;
+#define VF_CFG_STATS_COALESCE	0x1
+#define VF_CFG_EXT_BULLETIN	0x2
+#define VF_CFG_VLAN_FILTER	0x4
+	u8 link_cfg;		/* IFLA_VF_LINK_STATE_AUTO
+				 * IFLA_VF_LINK_STATE_ENABLE
+				 * IFLA_VF_LINK_STATE_DISABLE
+				 */
+	u8 state;
+#define VF_FREE		0	/* VF ready to be acquired holds no resc */
+#define VF_ACQUIRED	1	/* VF acquired, but not initialized */
+#define VF_ENABLED	2	/* VF Enabled */
+#define VF_RESET	3	/* VF FLR'd, pending cleanup */
+
+	bool flr_clnup_stage;	/* true during flr cleanup */
+
+	/* dma */
+	dma_addr_t fw_stat_map;
+	u16 stats_stride;
+	dma_addr_t bulletin_map;
+
+	/* Allocated resources counters. Before the VF is acquired, the
+	 * counters hold the following values:
+	 *
+	 * - xxq_count = 0 as the queues memory is not allocated yet.
+	 *
+	 * - sb_count  = The number of status blocks configured for this VF in
+	 *		 the IGU CAM. Initially read during probe.
+	 *
+	 * - xx_rules_count = The number of rules statically and equally
+	 *		      allocated for each VF, during PF load.
+	 */
+	struct vf_pf_resc_request	alloc_resc;
+#define vf_rxq_count(vf)		((vf)->alloc_resc.num_rxqs)
+#define vf_txq_count(vf)		((vf)->alloc_resc.num_txqs)
+#define vf_sb_count(vf)			((vf)->alloc_resc.num_sbs)
+#define vf_mac_rules_cnt(vf)		((vf)->alloc_resc.num_mac_filters)
+#define vf_vlan_rules_cnt(vf)		((vf)->alloc_resc.num_vlan_filters)
+#define vf_mc_rules_cnt(vf)		((vf)->alloc_resc.num_mc_filters)
+
+	u8 sb_count;	/* actual number of SBs */
+	u8 igu_base_id;	/* base igu status block id */
+
+	struct bnx2x_vf_queue	*vfqs;
+#define LEADING_IDX			0
+#define bnx2x_vfq_is_leading(vfq)	((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var)		((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var)	((vf)->vfqs[LEADING_IDX].var)
+
+	u8 index;	/* index in the vf array */
+	u8 abs_vfid;
+	u8 sp_cl_id;
+	u32 error;	/* 0 means all's-well */
+
+	/* BDF */
+	unsigned int bus;
+	unsigned int devfn;
+
+	/* bars */
+	struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+
+	/* set-mac ramrod state 1-pending, 0-done */
+	unsigned long	filter_state;
+
+	/* leading rss client id ~~ the client id of the first rxq, must be
+	 * set for each txq.
+	 */
+	int leading_rss;
+
+	/* MCAST object */
+	int mcast_list_len;
+	struct bnx2x_mcast_obj		mcast_obj;
+
+	/* RSS configuration object */
+	struct bnx2x_rss_config_obj     rss_conf_obj;
+
+	/* slow-path operations */
+	struct mutex			op_mutex; /* one vfop at a time mutex */
+	enum channel_tlvs		op_current;
+
+	u8 fp_hsi;
+
+	struct bnx2x_credit_pool_obj	vf_vlans_pool;
+	struct bnx2x_credit_pool_obj	vf_macs_pool;
+};
+
+#define BNX2X_NR_VIRTFN(bp)	((bp)->vfdb->sriov.nr_virtfn)
+
+#define for_each_vf(bp, var) \
+		for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
+
+#define for_each_vfq(vf, var) \
+		for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
+
+#define for_each_vf_sb(vf, var) \
+		for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
+
+#define is_vf_multi(vf)	(vf_rxq_count(vf) > 1)
+
+#define HW_VF_HANDLE(bp, abs_vfid) \
+	(u16)(BP_ABS_FUNC((bp)) | (1<<3) |  ((u16)(abs_vfid) << 4))
+
+#define FW_PF_MAX_HANDLE	8
+
+#define FW_VF_HANDLE(abs_vfid)	\
+	(abs_vfid + FW_PF_MAX_HANDLE)
+
+#define GET_NUM_VFS_PER_PATH(bp)	64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp)		((bp)->vfdb ? (bp)->vfdb->sriov.total \
+						    : 0)
+#define VF_MAC_CREDIT_CNT		1
+#define VF_VLAN_CREDIT_CNT		2 /* VLAN0 + 'real' VLAN */
+
+/* locking and unlocking the channel mutex */
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			      enum channel_tlvs tlv);
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				enum channel_tlvs expected_tlv);
+
+/* VF mail box (aka vf-pf channel) */
+
+/* a container for the bi-directional vf<-->pf messages.
+ *  The actual response will be placed according to the offset parameter
+ *  provided in the request
+ */
+
+#define MBX_MSG_ALIGN	8
+#define MBX_MSG_ALIGNED_SIZE	(roundup(sizeof(struct bnx2x_vf_mbx_msg), \
+				MBX_MSG_ALIGN))
+
+struct bnx2x_vf_mbx_msg {
+	union vfpf_tlvs req;
+	union pfvf_tlvs resp;
+};
+
+struct bnx2x_vf_mbx {
+	struct bnx2x_vf_mbx_msg *msg;
+	dma_addr_t msg_mapping;
+
+	/* VF GPA address */
+	u32 vf_addr_lo;
+	u32 vf_addr_hi;
+
+	struct vfpf_first_tlv first_tlv;	/* saved VF request header */
+};
+
+struct bnx2x_vf_sp {
+	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} mac_rdata;
+
+	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_rdata;
+
+	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_mac_rdata;
+
+	union {
+		struct eth_filter_rules_ramrod_data	e2;
+	} rx_mode_rdata;
+
+	union {
+		struct eth_multicast_rules_ramrod_data  e2;
+	} mcast_rdata;
+
+	union {
+		struct client_init_ramrod_data  init_data;
+		struct client_update_ramrod_data update_data;
+	} q_data;
+
+	union {
+		struct eth_rss_update_ramrod_data e2;
+	} rss_rdata;
+};
+
+struct hw_dma {
+	void *addr;
+	dma_addr_t mapping;
+	size_t size;
+};
+
+struct bnx2x_vfdb {
+#define BP_VFDB(bp)		((bp)->vfdb)
+	/* vf array */
+	struct bnx2x_virtf	*vfs;
+#define BP_VF(bp, idx)		((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
+					&((bp)->vfdb->vfs[idx]) : NULL)
+#define bnx2x_vf(bp, idx, var)	((bp)->vfdb->vfs[idx].var)
+
+	/* queue array - for all vfs */
+	struct bnx2x_vf_queue *vfqs;
+
+	/* vf HW contexts */
+	struct hw_dma		context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
+#define	BP_VF_CXT_PAGE(bp, i)	(&(bp)->vfdb->context[i])
+
+	/* SR-IOV information */
+	struct bnx2x_sriov	sriov;
+	struct hw_dma		mbx_dma;
+#define BP_VF_MBX_DMA(bp)	(&((bp)->vfdb->mbx_dma))
+	struct bnx2x_vf_mbx	mbxs[BNX2X_MAX_NUM_OF_VFS];
+#define BP_VF_MBX(bp, vfid)	(&((bp)->vfdb->mbxs[vfid]))
+
+	struct hw_dma		bulletin_dma;
+#define BP_VF_BULLETIN_DMA(bp)	(&((bp)->vfdb->bulletin_dma))
+#define	BP_VF_BULLETIN(bp, vf) \
+	(((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+	 + (vf))
+
+	struct hw_dma		sp_dma;
+#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr +		\
+		(vf)->index * sizeof(struct bnx2x_vf_sp) +		\
+		offsetof(struct bnx2x_vf_sp, field))
+#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping +	\
+		(vf)->index * sizeof(struct bnx2x_vf_sp) +		\
+		offsetof(struct bnx2x_vf_sp, field))
+
+#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
+	u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+	/* the number of msix vectors belonging to this PF designated for VFs */
+	u16 vf_sbs_pool;
+	u16 first_vf_igu_entry;
+
+	/* sp_rtnl synchronization */
+	struct mutex			event_mutex;
+	u64				event_occur;
+
+	/* bulletin board update synchronization */
+	struct mutex			bulletin_mutex;
+};
+
+/* queue access */
+static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
+{
+	return &(vf->vfqs[index]);
+}
+
+/* FW ids */
+static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+	return vf->igu_base_id + sb_idx;
+}
+
+static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+	return vf_igu_sb(vf, sb_idx);
+}
+
+static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+	return vf->igu_base_id + q->index;
+}
+
+static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+	if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+		return vf->leading_rss;
+	else
+		return vfq_cl_id(vf, q);
+}
+
+static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+	return vfq_cl_id(vf, q);
+}
+
+/* global iov routines */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
+void bnx2x_iov_remove_one(struct bnx2x *bp);
+void bnx2x_iov_free_mem(struct bnx2x *bp);
+int bnx2x_iov_alloc_mem(struct bnx2x *bp);
+int bnx2x_iov_nic_init(struct bnx2x *bp);
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
+void bnx2x_iov_init_dq(struct bnx2x *bp);
+void bnx2x_iov_init_dmae(struct bnx2x *bp);
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+				struct bnx2x_queue_sp_obj **q_obj);
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
+void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
+/* global vf mailbox routines */
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+			   struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
+
+/* CORE VF API */
+typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
+
+/* acquire */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		     struct vf_pf_resc_request *resc);
+/* init */
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		  dma_addr_t *sb_map);
+
+/* VFOP queue construction helpers */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			    struct bnx2x_queue_init_params *init_params,
+			    struct bnx2x_queue_setup_params *setup_params,
+			    u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			    struct bnx2x_queue_init_params *init_params,
+			    struct bnx2x_queue_setup_params *setup_params,
+			    u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+			   struct bnx2x_virtf *vf,
+			   struct bnx2x_vf_queue *q,
+			   struct bnx2x_vf_queue_construct_params *p,
+			   unsigned long q_type);
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				  struct bnx2x_vf_mac_vlan_filters *filters,
+				  int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+			 struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+		    int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			struct vfpf_tpa_tlv *tlv,
+			struct bnx2x_queue_update_tpa_params *params);
+
+/* VF release ~ VF close + VF release-resources
+ *
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+/* FLR routines */
+
+/* VF FLR helpers */
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
+
+/* Handles an FLR (or VF_DISABLE) notification form the MCP */
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
+
+bool bnx2x_tlv_supported(u16 tlvtype);
+
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin);
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+				bool support_long);
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool is_leading);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+			  struct bnx2x_config_rss_params *params);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+					size_t buf_len)
+{
+	strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+					       struct bnx2x_fastpath *fp)
+{
+	return PXP_VF_ADDR_USDM_QUEUES_START +
+		bp->acquire_resp.resc.hw_qid[fp->index] *
+		sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_timer_sriov(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+	return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
+}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+void bnx2x_iov_channel_down(struct bnx2x *bp);
+
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
+
+void bnx2x_iov_link_update(struct bnx2x *bp);
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
+#else /* CONFIG_BNX2X_SRIOV */
+
+#define GET_NUM_VFS_PER_PATH(bp)	0
+#define GET_NUM_VFS_PER_PF(bp)		0
+#define VF_MAC_CREDIT_CNT		0
+#define VF_VLAN_CREDIT_CNT		0
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+				struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+					union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+					 struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+				     int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+				     u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+					u8 vf_qid, bool set) {return 0; }
+static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+					struct bnx2x_config_rss_params *params) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+					size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+					       struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+	return PFVF_BULLETIN_UNCHANGED;
+}
+static inline void bnx2x_timer_sriov(struct bnx2x *bp) {}
+
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+	return NULL;
+}
+
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
+
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+static inline void bnx2x_iov_link_update(struct bnx2x *bp) {}
+static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; }
+
+static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf,
+					  int link_state) {return 0; }
+struct pf_vf_bulletin_content;
+static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+					      bool support_long) {}
+
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
new file mode 100644
index 0000000..7e0919a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -0,0 +1,2004 @@
+/* bnx2x_stats.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sriov.h"
+
+/* Statistics */
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+	u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+	u32 hi = *hiref;
+
+	return HILO_U64(hi, lo);
+#else
+	return lo;
+#endif
+}
+
+static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+	u16 res = 0;
+
+	/* 'newest' convention - shmem2 cotains the size of the port stats */
+	if (SHMEM2_HAS(bp, sizeof_port_stats)) {
+		u32 size = SHMEM2_RD(bp, sizeof_port_stats);
+		if (size)
+			res = size;
+
+		/* prevent newer BC from causing buffer overflow */
+		if (res > sizeof(struct host_port_stats))
+			res = sizeof(struct host_port_stats);
+	}
+
+	/* Older convention - all BCs support the port stats' fields up until
+	 * the 'not_used' field
+	 */
+	if (!res) {
+		res = offsetof(struct host_port_stats, not_used) + 4;
+
+		/* if PFC stats are supported by the MFW, DMA them as well */
+		if (bp->flags & BC_SUPPORTS_PFC_STATS) {
+			res += offsetof(struct host_port_stats,
+					pfc_frames_rx_lo) -
+			       offsetof(struct host_port_stats,
+					pfc_frames_tx_hi) + 4 ;
+		}
+	}
+
+	res >>= 2;
+
+	WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
+	return res;
+}
+
+/*
+ * Init service functions
+ */
+
+static void bnx2x_dp_stats(struct bnx2x *bp)
+{
+	int i;
+
+	DP(BNX2X_MSG_STATS, "dumping stats:\n"
+	   "fw_stats_req\n"
+	   "    hdr\n"
+	   "        cmd_num %d\n"
+	   "        reserved0 %d\n"
+	   "        drv_stats_counter %d\n"
+	   "        reserved1 %d\n"
+	   "        stats_counters_addrs %x %x\n",
+	   bp->fw_stats_req->hdr.cmd_num,
+	   bp->fw_stats_req->hdr.reserved0,
+	   bp->fw_stats_req->hdr.drv_stats_counter,
+	   bp->fw_stats_req->hdr.reserved1,
+	   bp->fw_stats_req->hdr.stats_counters_addrs.hi,
+	   bp->fw_stats_req->hdr.stats_counters_addrs.lo);
+
+	for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
+		DP(BNX2X_MSG_STATS,
+		   "query[%d]\n"
+		   "              kind %d\n"
+		   "              index %d\n"
+		   "              funcID %d\n"
+		   "              reserved %d\n"
+		   "              address %x %x\n",
+		   i, bp->fw_stats_req->query[i].kind,
+		   bp->fw_stats_req->query[i].index,
+		   bp->fw_stats_req->query[i].funcID,
+		   bp->fw_stats_req->query[i].reserved,
+		   bp->fw_stats_req->query[i].address.hi,
+		   bp->fw_stats_req->query[i].address.lo);
+	}
+}
+
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+	int rc;
+
+	if (bp->stats_pending)
+		return;
+
+	bp->fw_stats_req->hdr.drv_stats_counter =
+		cpu_to_le16(bp->stats_counter++);
+
+	DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
+	   le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
+
+	/* adjust the ramrod to include VF queues statistics */
+	bnx2x_iov_adjust_stats_req(bp);
+	bnx2x_dp_stats(bp);
+
+	/* send FW stats ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+			   U64_HI(bp->fw_stats_req_mapping),
+			   U64_LO(bp->fw_stats_req_mapping),
+			   NONE_CONNECTION_TYPE);
+	if (rc == 0)
+		bp->stats_pending = 1;
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	*stats_comp = DMAE_COMP_VAL;
+	if (CHIP_REV_IS_SLOW(bp))
+		return;
+
+	/* Update MCP's statistics if possible */
+	if (bp->func_stx)
+		memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
+		       sizeof(bp->func_stats));
+
+	/* loader */
+	if (bp->executer_idx) {
+		int loader_idx = PMF_DMAE_C(bp);
+		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+						 true, DMAE_COMP_GRC);
+		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
+
+		memset(dmae, 0, sizeof(struct dmae_command));
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+				     sizeof(struct dmae_command) *
+				     (loader_idx + 1)) >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct dmae_command) >> 2;
+		if (CHIP_IS_E1(bp))
+			dmae->len--;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		*stats_comp = 0;
+		bnx2x_post_dmae(bp, dmae, loader_idx);
+
+	} else if (bp->func_stx) {
+		*stats_comp = 0;
+		bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
+	}
+}
+
+static void bnx2x_stats_comp(struct bnx2x *bp)
+{
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+	int cnt = 10;
+
+	might_sleep();
+	while (*stats_comp != DMAE_COMP_VAL) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for stats finished\n");
+			break;
+		}
+		cnt--;
+		usleep_range(1000, 2000);
+	}
+}
+
+/*
+ * Statistics service functions
+ */
+
+/* should be called under stats_sema */
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->port.pmf || !bp->port.port_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+	dmae->src_addr_lo = bp->port.port_stx >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+	dmae->len = DMAE_LEN32_RD_MAX;
+	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+	dmae->comp_addr_hi = 0;
+	dmae->comp_val = 1;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+				   DMAE_LEN32_RD_MAX * 4);
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+				   DMAE_LEN32_RD_MAX * 4);
+	dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	int port = BP_PORT(bp);
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 mac_addr;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->link_vars.link_up || !bp->port.pmf) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	/* MCP */
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+				    true, DMAE_COMP_GRC);
+
+	if (bp->port.port_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+		dmae->dst_addr_lo = bp->port.port_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = bnx2x_get_port_stats_dma_len(bp);
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	if (bp->func_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+		dmae->dst_addr_lo = bp->func_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_func_stats) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	/* MAC */
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+				   true, DMAE_COMP_GRC);
+
+	/* EMAC is special */
+	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+		dmae->len = 1;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	} else {
+		u32 tx_src_addr_lo, rx_src_addr_lo;
+		u16 rx_len, tx_len;
+
+		/* configure the params according to MAC type */
+		switch (bp->link_vars.mac_type) {
+		case MAC_TYPE_BMAC:
+			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+					   NIG_REG_INGRESS_BMAC0_MEM);
+
+			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+			   BIGMAC_REGISTER_TX_STAT_GTBYT */
+			if (CHIP_IS_E1x(bp)) {
+				tx_src_addr_lo = (mac_addr +
+					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+				rx_src_addr_lo = (mac_addr +
+					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+			} else {
+				tx_src_addr_lo = (mac_addr +
+					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+				rx_src_addr_lo = (mac_addr +
+					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+			}
+			break;
+
+		case MAC_TYPE_UMAC: /* handled by MSTAT */
+		case MAC_TYPE_XMAC: /* handled by MSTAT */
+		default:
+			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+			tx_src_addr_lo = (mac_addr +
+					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+			rx_src_addr_lo = (mac_addr +
+					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+			tx_len = sizeof(bp->slowpath->
+					mac_stats.mstat_stats.stats_tx) >> 2;
+			rx_len = sizeof(bp->slowpath->
+					mac_stats.mstat_stats.stats_rx) >> 2;
+			break;
+		}
+
+		/* TX stats */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = tx_src_addr_lo;
+		dmae->src_addr_hi = 0;
+		dmae->len = tx_len;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* RX stats */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_hi = 0;
+		dmae->src_addr_lo = rx_src_addr_lo;
+		dmae->dst_addr_lo =
+			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+		dmae->dst_addr_hi =
+			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+		dmae->len = rx_len;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	/* NIG */
+	if (!CHIP_IS_E3(bp)) {
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt0_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt0_lo));
+		dmae->len = (2*sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt1_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt1_lo));
+		dmae->len = (2*sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+						 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+					 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+	dmae->dst_addr_lo = bp->func_stx >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = sizeof(struct host_func_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+}
+
+/* should be called under stats_sema */
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+	if (IS_PF(bp)) {
+		if (bp->port.pmf)
+			bnx2x_port_stats_init(bp);
+
+		else if (bp->func_stx)
+			bnx2x_func_stats_init(bp);
+
+		bnx2x_hw_stats_post(bp);
+		bnx2x_storm_stats_post(bp);
+	}
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+	bnx2x_stats_comp(bp);
+	bnx2x_stats_pmf_update(bp);
+	bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+	/* vfs travel through here as part of the statistics FSM, but no action
+	 * is required
+	 */
+	if (IS_VF(bp))
+		return;
+
+	bnx2x_stats_comp(bp);
+	bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct {
+		u32 lo;
+		u32 hi;
+	} diff;
+
+	if (CHIP_IS_E1x(bp)) {
+		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
+
+		/* the macros below will use "bmac1_stats" type */
+		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+		UPDATE_STAT64(tx_stat_gt127,
+				tx_stat_etherstatspkts65octetsto127octets);
+		UPDATE_STAT64(tx_stat_gt255,
+				tx_stat_etherstatspkts128octetsto255octets);
+		UPDATE_STAT64(tx_stat_gt511,
+				tx_stat_etherstatspkts256octetsto511octets);
+		UPDATE_STAT64(tx_stat_gt1023,
+				tx_stat_etherstatspkts512octetsto1023octets);
+		UPDATE_STAT64(tx_stat_gt1518,
+				tx_stat_etherstatspkts1024octetsto1522octets);
+		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+		UPDATE_STAT64(tx_stat_gterr,
+				tx_stat_dot3statsinternalmactransmiterrors);
+		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+	} else {
+		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
+
+		/* the macros below will use "bmac2_stats" type */
+		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+		UPDATE_STAT64(tx_stat_gt127,
+				tx_stat_etherstatspkts65octetsto127octets);
+		UPDATE_STAT64(tx_stat_gt255,
+				tx_stat_etherstatspkts128octetsto255octets);
+		UPDATE_STAT64(tx_stat_gt511,
+				tx_stat_etherstatspkts256octetsto511octets);
+		UPDATE_STAT64(tx_stat_gt1023,
+				tx_stat_etherstatspkts512octetsto1023octets);
+		UPDATE_STAT64(tx_stat_gt1518,
+				tx_stat_etherstatspkts1024octetsto1522octets);
+		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+		UPDATE_STAT64(tx_stat_gterr,
+				tx_stat_dot3statsinternalmactransmiterrors);
+		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+		/* collect PFC stats */
+		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+
+		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+	}
+
+	estats->pause_frames_received_hi =
+				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+	estats->pause_frames_received_lo =
+				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+	estats->pause_frames_sent_hi =
+				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+	estats->pause_frames_sent_lo =
+				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+	estats->pfc_frames_received_hi =
+				pstats->pfc_frames_rx_hi;
+	estats->pfc_frames_received_lo =
+				pstats->pfc_frames_rx_lo;
+	estats->pfc_frames_sent_hi =
+				pstats->pfc_frames_tx_hi;
+	estats->pfc_frames_sent_lo =
+				pstats->pfc_frames_tx_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+	/* collect pfc stats */
+	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+		pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+		pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
+
+	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+	ADD_STAT64(stats_tx.tx_gt127,
+			tx_stat_etherstatspkts65octetsto127octets);
+	ADD_STAT64(stats_tx.tx_gt255,
+			tx_stat_etherstatspkts128octetsto255octets);
+	ADD_STAT64(stats_tx.tx_gt511,
+			tx_stat_etherstatspkts256octetsto511octets);
+	ADD_STAT64(stats_tx.tx_gt1023,
+			tx_stat_etherstatspkts512octetsto1023octets);
+	ADD_STAT64(stats_tx.tx_gt1518,
+			tx_stat_etherstatspkts1024octetsto1522octets);
+	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+	ADD_STAT64(stats_tx.tx_gterr,
+			tx_stat_dot3statsinternalmactransmiterrors);
+	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+	estats->etherstatspkts1024octetsto1522octets_hi =
+	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	estats->etherstatspkts1024octetsto1522octets_lo =
+	    pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
+
+	estats->etherstatspktsover1522octets_hi =
+	    pstats->mac_stx[1].tx_stat_mac_2047_hi;
+	estats->etherstatspktsover1522octets_lo =
+	    pstats->mac_stx[1].tx_stat_mac_2047_lo;
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       pstats->mac_stx[1].tx_stat_mac_4095_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       pstats->mac_stx[1].tx_stat_mac_4095_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       pstats->mac_stx[1].tx_stat_mac_9216_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       pstats->mac_stx[1].tx_stat_mac_9216_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       pstats->mac_stx[1].tx_stat_mac_16383_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       pstats->mac_stx[1].tx_stat_mac_16383_lo);
+
+	estats->pause_frames_received_hi =
+				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+	estats->pause_frames_received_lo =
+				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+	estats->pause_frames_sent_hi =
+				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+	estats->pause_frames_sent_lo =
+				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+	estats->pfc_frames_received_hi =
+				pstats->pfc_frames_rx_hi;
+	estats->pfc_frames_received_lo =
+				pstats->pfc_frames_rx_lo;
+	estats->pfc_frames_sent_hi =
+				pstats->pfc_frames_tx_hi;
+	estats->pfc_frames_sent_lo =
+				pstats->pfc_frames_tx_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+	estats->pause_frames_received_hi =
+			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+	estats->pause_frames_received_lo =
+			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+	ADD_64(estats->pause_frames_received_hi,
+	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+	       estats->pause_frames_received_lo,
+	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+	estats->pause_frames_sent_hi =
+			pstats->mac_stx[1].tx_stat_outxonsent_hi;
+	estats->pause_frames_sent_lo =
+			pstats->mac_stx[1].tx_stat_outxonsent_lo;
+	ADD_64(estats->pause_frames_sent_hi,
+	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+	       estats->pause_frames_sent_lo,
+	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+	struct nig_stats *old = &(bp->port.old_nig_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct {
+		u32 lo;
+		u32 hi;
+	} diff;
+
+	switch (bp->link_vars.mac_type) {
+	case MAC_TYPE_BMAC:
+		bnx2x_bmac_stats_update(bp);
+		break;
+
+	case MAC_TYPE_EMAC:
+		bnx2x_emac_stats_update(bp);
+		break;
+
+	case MAC_TYPE_UMAC:
+	case MAC_TYPE_XMAC:
+		bnx2x_mstat_stats_update(bp);
+		break;
+
+	case MAC_TYPE_NONE: /* unreached */
+		DP(BNX2X_MSG_STATS,
+		   "stats updated by DMAE but no MAC active\n");
+		return -1;
+
+	default: /* unreached */
+		BNX2X_ERR("Unknown MAC type\n");
+	}
+
+	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+		      new->brb_discard - old->brb_discard);
+	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+		      new->brb_truncate - old->brb_truncate);
+
+	if (!CHIP_IS_E3(bp)) {
+		UPDATE_STAT64_NIG(egress_mac_pkt0,
+					etherstatspkts1024octetsto1522octets);
+		UPDATE_STAT64_NIG(egress_mac_pkt1,
+					etherstatspktsover1522octets);
+	}
+
+	memcpy(old, new, sizeof(struct nig_stats));
+
+	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+	       sizeof(struct mac_stx));
+	estats->brb_drop_hi = pstats->brb_drop_hi;
+	estats->brb_drop_lo = pstats->brb_drop_lo;
+
+	pstats->host_port_stats_counter++;
+
+	if (CHIP_IS_E3(bp)) {
+		u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
+					  : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
+		estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
+	}
+
+	if (!BP_NOMCP(bp)) {
+		u32 nig_timer_max =
+			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+		if (nig_timer_max != estats->nig_timer_max) {
+			estats->nig_timer_max = nig_timer_max;
+			BNX2X_ERR("NIG timer max (%u)\n",
+				  estats->nig_timer_max);
+		}
+	}
+
+	return 0;
+}
+
+static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
+{
+	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
+	u16 cur_stats_counter;
+	/* Make sure we use the value of the counter
+	 * used for sending the last stats ramrod.
+	 */
+	cur_stats_counter = bp->stats_counter - 1;
+
+	/* are storm stats valid? */
+	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS,
+		   "stats not updated by xstorm  xstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS,
+		   "stats not updated by ustorm  ustorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS,
+		   "stats not updated by cstorm  cstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS,
+		   "stats not updated by tstorm  tstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+	struct tstorm_per_port_stats *tport =
+				&bp->fw_stats_data->port.tstorm_port_statistics;
+	struct tstorm_per_pf_stats *tfunc =
+				&bp->fw_stats_data->pf.tstorm_pf_statistics;
+	struct host_func_stats *fstats = &bp->func_stats;
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
+	int i;
+
+	/* vfs stat counter is managed by pf */
+	if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
+		return -EAGAIN;
+
+	estats->error_bytes_received_hi = 0;
+	estats->error_bytes_received_lo = 0;
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct tstorm_per_queue_stats *tclient =
+			&bp->fw_stats_data->queue_stats[i].
+			tstorm_queue_statistics;
+		struct tstorm_per_queue_stats *old_tclient =
+			&bnx2x_fp_stats(bp, fp)->old_tclient;
+		struct ustorm_per_queue_stats *uclient =
+			&bp->fw_stats_data->queue_stats[i].
+			ustorm_queue_statistics;
+		struct ustorm_per_queue_stats *old_uclient =
+			&bnx2x_fp_stats(bp, fp)->old_uclient;
+		struct xstorm_per_queue_stats *xclient =
+			&bp->fw_stats_data->queue_stats[i].
+			xstorm_queue_statistics;
+		struct xstorm_per_queue_stats *old_xclient =
+			&bnx2x_fp_stats(bp, fp)->old_xclient;
+		struct bnx2x_eth_q_stats *qstats =
+			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
+		struct bnx2x_eth_q_stats_old *qstats_old =
+			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
+
+		u32 diff;
+
+		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
+		   i, xclient->ucast_pkts_sent,
+		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
+
+		DP(BNX2X_MSG_STATS, "---------------\n");
+
+		UPDATE_QSTAT(tclient->rcv_bcast_bytes,
+			     total_broadcast_bytes_received);
+		UPDATE_QSTAT(tclient->rcv_mcast_bytes,
+			     total_multicast_bytes_received);
+		UPDATE_QSTAT(tclient->rcv_ucast_bytes,
+			     total_unicast_bytes_received);
+
+		/*
+		 * sum to total_bytes_received all
+		 * unicast/multicast/broadcast
+		 */
+		qstats->total_bytes_received_hi =
+			qstats->total_broadcast_bytes_received_hi;
+		qstats->total_bytes_received_lo =
+			qstats->total_broadcast_bytes_received_lo;
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       qstats->total_multicast_bytes_received_hi,
+		       qstats->total_bytes_received_lo,
+		       qstats->total_multicast_bytes_received_lo);
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       qstats->total_unicast_bytes_received_hi,
+		       qstats->total_bytes_received_lo,
+		       qstats->total_unicast_bytes_received_lo);
+
+		qstats->valid_bytes_received_hi =
+					qstats->total_bytes_received_hi;
+		qstats->valid_bytes_received_lo =
+					qstats->total_bytes_received_lo;
+
+		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
+					total_unicast_packets_received);
+		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
+					total_multicast_packets_received);
+		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
+					total_broadcast_packets_received);
+		UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
+				      etherstatsoverrsizepkts, 32);
+		UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
+
+		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+					total_unicast_packets_received);
+		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+					total_multicast_packets_received);
+		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+					total_broadcast_packets_received);
+		UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
+		UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
+		UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+		UPDATE_QSTAT(xclient->bcast_bytes_sent,
+			     total_broadcast_bytes_transmitted);
+		UPDATE_QSTAT(xclient->mcast_bytes_sent,
+			     total_multicast_bytes_transmitted);
+		UPDATE_QSTAT(xclient->ucast_bytes_sent,
+			     total_unicast_bytes_transmitted);
+
+		/*
+		 * sum to total_bytes_transmitted all
+		 * unicast/multicast/broadcast
+		 */
+		qstats->total_bytes_transmitted_hi =
+				qstats->total_unicast_bytes_transmitted_hi;
+		qstats->total_bytes_transmitted_lo =
+				qstats->total_unicast_bytes_transmitted_lo;
+
+		ADD_64(qstats->total_bytes_transmitted_hi,
+		       qstats->total_broadcast_bytes_transmitted_hi,
+		       qstats->total_bytes_transmitted_lo,
+		       qstats->total_broadcast_bytes_transmitted_lo);
+
+		ADD_64(qstats->total_bytes_transmitted_hi,
+		       qstats->total_multicast_bytes_transmitted_hi,
+		       qstats->total_bytes_transmitted_lo,
+		       qstats->total_multicast_bytes_transmitted_lo);
+
+		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
+					total_unicast_packets_transmitted);
+		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
+					total_multicast_packets_transmitted);
+		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
+					total_broadcast_packets_transmitted);
+
+		UPDATE_EXTEND_TSTAT(checksum_discard,
+				    total_packets_received_checksum_discarded);
+		UPDATE_EXTEND_TSTAT(ttl0_discard,
+				    total_packets_received_ttl0_discarded);
+
+		UPDATE_EXTEND_XSTAT(error_drop_pkts,
+				    total_transmitted_dropped_packets_error);
+
+		/* TPA aggregations completed */
+		UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
+		/* Number of network frames aggregated by TPA */
+		UPDATE_EXTEND_E_USTAT(coalesced_pkts,
+				      total_tpa_aggregated_frames);
+		/* Total number of bytes in completed TPA aggregations */
+		UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
+
+		UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
+
+		UPDATE_FSTAT_QSTAT(total_bytes_received);
+		UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
+		UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
+		UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
+		UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
+		UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
+		UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
+		UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
+		UPDATE_FSTAT_QSTAT(valid_bytes_received);
+	}
+
+	ADD_64(estats->total_bytes_received_hi,
+	       estats->rx_stat_ifhcinbadoctets_hi,
+	       estats->total_bytes_received_lo,
+	       estats->rx_stat_ifhcinbadoctets_lo);
+
+	ADD_64_LE(estats->total_bytes_received_hi,
+		  tfunc->rcv_error_bytes.hi,
+		  estats->total_bytes_received_lo,
+		  tfunc->rcv_error_bytes.lo);
+
+	ADD_64_LE(estats->error_bytes_received_hi,
+		  tfunc->rcv_error_bytes.hi,
+		  estats->error_bytes_received_lo,
+		  tfunc->rcv_error_bytes.lo);
+
+	UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
+
+	ADD_64(estats->error_bytes_received_hi,
+	       estats->rx_stat_ifhcinbadoctets_hi,
+	       estats->error_bytes_received_lo,
+	       estats->rx_stat_ifhcinbadoctets_lo);
+
+	if (bp->port.pmf) {
+		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+		UPDATE_FW_STAT(mac_filter_discard);
+		UPDATE_FW_STAT(mf_tag_discard);
+		UPDATE_FW_STAT(brb_truncate_discard);
+		UPDATE_FW_STAT(mac_discard);
+	}
+
+	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+	bp->stats_pending = 0;
+
+	return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct net_device_stats *nstats = &bp->dev->stats;
+	unsigned long tmp;
+	int i;
+
+	nstats->rx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+	nstats->tx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+	tmp = estats->mac_discard;
+	for_each_rx_queue(bp, i) {
+		struct tstorm_per_queue_stats *old_tclient =
+			&bp->fp_stats[i].old_tclient;
+		tmp += le32_to_cpu(old_tclient->checksum_discard);
+	}
+	nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
+
+	nstats->tx_dropped = 0;
+
+	nstats->multicast =
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+	nstats->collisions =
+		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+	nstats->rx_length_errors =
+		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+				 bnx2x_hilo(&estats->brb_truncate_hi);
+	nstats->rx_crc_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+	nstats->rx_frame_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+	nstats->rx_missed_errors = 0;
+
+	nstats->rx_errors = nstats->rx_length_errors +
+			    nstats->rx_over_errors +
+			    nstats->rx_crc_errors +
+			    nstats->rx_frame_errors +
+			    nstats->rx_fifo_errors +
+			    nstats->rx_missed_errors;
+
+	nstats->tx_aborted_errors =
+		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+	nstats->tx_carrier_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+	nstats->tx_fifo_errors = 0;
+	nstats->tx_heartbeat_errors = 0;
+	nstats->tx_window_errors = 0;
+
+	nstats->tx_errors = nstats->tx_aborted_errors +
+			    nstats->tx_carrier_errors +
+	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	int i;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
+		struct bnx2x_eth_q_stats_old *qstats_old =
+			&bp->fp_stats[i].eth_q_stats_old;
+
+		UPDATE_ESTAT_QSTAT(driver_xoff);
+		UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
+		UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
+		UPDATE_ESTAT_QSTAT(hw_csum_err);
+		UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
+	}
+}
+
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+	u32 val;
+
+	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+		val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+			return true;
+	}
+
+	return false;
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	if (bnx2x_edebug_stats_stopped(bp))
+		return;
+
+	if (IS_PF(bp)) {
+		if (*stats_comp != DMAE_COMP_VAL)
+			return;
+
+		if (bp->port.pmf)
+			bnx2x_hw_stats_update(bp);
+
+		if (bnx2x_storm_stats_update(bp)) {
+			if (bp->stats_pending++ == 3) {
+				BNX2X_ERR("storm stats were not updated for 3 times\n");
+				bnx2x_panic();
+			}
+			return;
+		}
+	} else {
+		/* vf doesn't collect HW statistics, and doesn't get completions
+		 * perform only update
+		 */
+		bnx2x_storm_stats_update(bp);
+	}
+
+	bnx2x_net_stats_update(bp);
+	bnx2x_drv_stats_update(bp);
+
+	/* vf is done */
+	if (IS_VF(bp))
+		return;
+
+	if (netif_msg_timer(bp)) {
+		struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
+		       estats->brb_drop_lo, estats->brb_truncate_lo);
+	}
+
+	bnx2x_hw_stats_post(bp);
+	bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	bp->executer_idx = 0;
+
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
+
+	if (bp->port.port_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		if (bp->func_stx)
+			dmae->opcode = bnx2x_dmae_opcode_add_comp(
+						opcode, DMAE_COMP_GRC);
+		else
+			dmae->opcode = bnx2x_dmae_opcode_add_comp(
+						opcode, DMAE_COMP_PCI);
+
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+		dmae->dst_addr_lo = bp->port.port_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = bnx2x_get_port_stats_dma_len(bp);
+		if (bp->func_stx) {
+			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+			dmae->comp_addr_hi = 0;
+			dmae->comp_val = 1;
+		} else {
+			dmae->comp_addr_lo =
+				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+			dmae->comp_addr_hi =
+				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+			dmae->comp_val = DMAE_COMP_VAL;
+
+			*stats_comp = 0;
+		}
+	}
+
+	if (bp->func_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode =
+			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+		dmae->dst_addr_lo = bp->func_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_func_stats) >> 2;
+		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+		dmae->comp_val = DMAE_COMP_VAL;
+
+		*stats_comp = 0;
+	}
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+	bool update = false;
+
+	bnx2x_stats_comp(bp);
+
+	if (bp->port.pmf)
+		update = (bnx2x_hw_stats_update(bp) == 0);
+
+	update |= (bnx2x_storm_stats_update(bp) == 0);
+
+	if (update) {
+		bnx2x_net_stats_update(bp);
+
+		if (bp->port.pmf)
+			bnx2x_port_stats_stop(bp);
+
+		bnx2x_hw_stats_post(bp);
+		bnx2x_stats_comp(bp);
+	}
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+	void (*action)(struct bnx2x *bp);
+	enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state	event	*/
+{
+/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
+/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
+/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
+/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
+/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+	enum bnx2x_stats_state state = bp->stats_state;
+
+	if (unlikely(bp->panic))
+		return;
+
+	/* Statistics update run from timer context, and we don't want to stop
+	 * that context in case someone is in the middle of a transition.
+	 * For other events, wait a bit until lock is taken.
+	 */
+	if (down_trylock(&bp->stats_lock)) {
+		if (event == STATS_EVENT_UPDATE)
+			return;
+
+		DP(BNX2X_MSG_STATS,
+		   "Unlikely stats' lock contention [event %d]\n", event);
+		if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
+			BNX2X_ERR("Failed to take stats lock [event %d]\n",
+				  event);
+			return;
+		}
+	}
+
+	bnx2x_stats_stm[state][event].action(bp);
+	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+
+	up(&bp->stats_lock);
+
+	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+		   state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->port.pmf || !bp->port.port_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+					 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+	dmae->dst_addr_lo = bp->port.port_stx >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = bnx2x_get_port_stats_dma_len(bp);
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+/* This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ */
+static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+	int i;
+	int first_queue_query_index;
+	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+	dma_addr_t cur_data_offset;
+	struct stats_query_entry *cur_query_entry;
+
+	stats_hdr->cmd_num = bp->fw_stats_num;
+	stats_hdr->drv_stats_counter = 0;
+
+	/* storm_counters struct contains the counters of completed
+	 * statistics requests per storm which are incremented by FW
+	 * each time it completes hadning a statistics ramrod. We will
+	 * check these counters in the timer handler and discard a
+	 * (statistics) ramrod completion.
+	 */
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+	stats_hdr->stats_counters_addrs.hi =
+		cpu_to_le32(U64_HI(cur_data_offset));
+	stats_hdr->stats_counters_addrs.lo =
+		cpu_to_le32(U64_LO(cur_data_offset));
+
+	/* prepare to the first stats ramrod (will be completed with
+	 * the counters equal to zero) - init counters to somethig different.
+	 */
+	memset(&bp->fw_stats_data->storm_counters, 0xff,
+	       sizeof(struct stats_counter));
+
+	/**** Port FW statistics data ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, port);
+
+	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+	cur_query_entry->kind = STATS_TYPE_PORT;
+	/* For port query index is a DONT CARE */
+	cur_query_entry->index = BP_PORT(bp);
+	/* For port query funcID is a DONT CARE */
+	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+	/**** PF FW statistics data ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, pf);
+
+	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+	cur_query_entry->kind = STATS_TYPE_PF;
+	/* For PF query index is a DONT CARE */
+	cur_query_entry->index = BP_PORT(bp);
+	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+	/**** FCoE FW statistics data ****/
+	if (!NO_FCOE(bp)) {
+		cur_data_offset = bp->fw_stats_data_mapping +
+			offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+		cur_query_entry =
+			&bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+		cur_query_entry->kind = STATS_TYPE_FCOE;
+		/* For FCoE query index is a DONT CARE */
+		cur_query_entry->index = BP_PORT(bp);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+	}
+
+	/**** Clients' queries ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+	/* first queue query index depends whether FCoE offloaded request will
+	 * be included in the ramrod
+	 */
+	if (!NO_FCOE(bp))
+		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+	else
+		first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
+	for_each_eth_queue(bp, i) {
+		cur_query_entry =
+			&bp->fw_stats_req->
+					query[first_queue_query_index + i];
+
+		cur_query_entry->kind = STATS_TYPE_QUEUE;
+		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+
+		cur_data_offset += sizeof(struct per_queue_stats);
+	}
+
+	/* add FCoE queue query if needed */
+	if (!NO_FCOE(bp)) {
+		cur_query_entry =
+			&bp->fw_stats_req->
+					query[first_queue_query_index + i];
+
+		cur_query_entry->kind = STATS_TYPE_QUEUE;
+		cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+	}
+}
+
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+	int i;
+
+	/* function stats */
+	for_each_queue(bp, i) {
+		struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+		memset(&fp_stats->old_tclient, 0,
+		       sizeof(fp_stats->old_tclient));
+		memset(&fp_stats->old_uclient, 0,
+		       sizeof(fp_stats->old_uclient));
+		memset(&fp_stats->old_xclient, 0,
+		       sizeof(fp_stats->old_xclient));
+		if (bp->stats_init) {
+			memset(&fp_stats->eth_q_stats, 0,
+			       sizeof(fp_stats->eth_q_stats));
+			memset(&fp_stats->eth_q_stats_old, 0,
+			       sizeof(fp_stats->eth_q_stats_old));
+		}
+	}
+
+	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+	if (bp->stats_init) {
+		memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+		memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+		memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+		memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+		memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+	}
+
+	bp->stats_state = STATS_STATE_DISABLED;
+
+	if (bp->port.pmf && bp->port.port_stx)
+		bnx2x_port_stats_base_init(bp);
+
+	/* mark the end of statistics initialization */
+	bp->stats_init = false;
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+	int /*abs*/port = BP_PORT(bp);
+	int mb_idx = BP_FW_MB_IDX(bp);
+
+	if (IS_VF(bp)) {
+		bnx2x_memset_stats(bp);
+		return;
+	}
+
+	bp->stats_pending = 0;
+	bp->executer_idx = 0;
+	bp->stats_counter = 0;
+
+	/* port and func stats for management */
+	if (!BP_NOMCP(bp)) {
+		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+
+	} else {
+		bp->port.port_stx = 0;
+		bp->func_stx = 0;
+	}
+	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
+	   bp->port.port_stx, bp->func_stx);
+
+	/* pmf should retrieve port statistics from SP on a non-init*/
+	if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
+		bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+
+	port = BP_PORT(bp);
+	/* port stats */
+	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+	bp->port.old_nig_stats.brb_discard =
+			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+	bp->port.old_nig_stats.brb_truncate =
+			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+	if (!CHIP_IS_E3(bp)) {
+		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+	}
+
+	/* Prepare statistics ramrod data */
+	bnx2x_prep_fw_stats_req(bp);
+
+	/* Clean SP from previous statistics */
+	if (bp->stats_init) {
+		if (bp->func_stx) {
+			memset(bnx2x_sp(bp, func_stats), 0,
+			       sizeof(struct host_func_stats));
+			bnx2x_func_stats_init(bp);
+			bnx2x_hw_stats_post(bp);
+			bnx2x_stats_comp(bp);
+		}
+	}
+
+	bnx2x_memset_stats(bp);
+}
+
+void bnx2x_save_statistics(struct bnx2x *bp)
+{
+	int i;
+	struct net_device_stats *nstats = &bp->dev->stats;
+
+	/* save queue statistics */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct bnx2x_eth_q_stats *qstats =
+			&bnx2x_fp_stats(bp, fp)->eth_q_stats;
+		struct bnx2x_eth_q_stats_old *qstats_old =
+			&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
+
+		UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
+		UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
+		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
+		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
+		UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
+		UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
+		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
+		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
+		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
+		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
+		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
+		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
+		UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
+		UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
+	}
+
+	/* save net_device_stats statistics */
+	bp->net_stats_old.rx_dropped = nstats->rx_dropped;
+
+	/* store port firmware statistics */
+	if (bp->port.pmf && IS_MF(bp)) {
+		struct bnx2x_eth_stats *estats = &bp->eth_stats;
+		struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+		UPDATE_FW_STAT_OLD(mac_filter_discard);
+		UPDATE_FW_STAT_OLD(mf_tag_discard);
+		UPDATE_FW_STAT_OLD(brb_truncate_discard);
+		UPDATE_FW_STAT_OLD(mac_discard);
+	}
+}
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+			      u32 stats_type)
+{
+	int i;
+	struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct per_queue_stats *fcoe_q_stats =
+		&bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
+
+	struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+		&fcoe_q_stats->tstorm_queue_statistics;
+
+	struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
+		&fcoe_q_stats->ustorm_queue_statistics;
+
+	struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+		&fcoe_q_stats->xstorm_queue_statistics;
+
+	struct fcoe_statistics_params *fw_fcoe_stat =
+		&bp->fw_stats_data->fcoe;
+
+	memset(afex_stats, 0, sizeof(struct afex_stats));
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
+
+		ADD_64(afex_stats->rx_unicast_bytes_hi,
+		       qstats->total_unicast_bytes_received_hi,
+		       afex_stats->rx_unicast_bytes_lo,
+		       qstats->total_unicast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_broadcast_bytes_hi,
+		       qstats->total_broadcast_bytes_received_hi,
+		       afex_stats->rx_broadcast_bytes_lo,
+		       qstats->total_broadcast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_multicast_bytes_hi,
+		       qstats->total_multicast_bytes_received_hi,
+		       afex_stats->rx_multicast_bytes_lo,
+		       qstats->total_multicast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_unicast_frames_hi,
+		       qstats->total_unicast_packets_received_hi,
+		       afex_stats->rx_unicast_frames_lo,
+		       qstats->total_unicast_packets_received_lo);
+
+		ADD_64(afex_stats->rx_broadcast_frames_hi,
+		       qstats->total_broadcast_packets_received_hi,
+		       afex_stats->rx_broadcast_frames_lo,
+		       qstats->total_broadcast_packets_received_lo);
+
+		ADD_64(afex_stats->rx_multicast_frames_hi,
+		       qstats->total_multicast_packets_received_hi,
+		       afex_stats->rx_multicast_frames_lo,
+		       qstats->total_multicast_packets_received_lo);
+
+		/* sum to rx_frames_discarded all discraded
+		 * packets due to size, ttl0 and checksum
+		 */
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->total_packets_received_checksum_discarded_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->total_packets_received_checksum_discarded_lo);
+
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->total_packets_received_ttl0_discarded_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->total_packets_received_ttl0_discarded_lo);
+
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->etherstatsoverrsizepkts_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->etherstatsoverrsizepkts_lo);
+
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       qstats->no_buff_discard_hi,
+		       afex_stats->rx_frames_dropped_lo,
+		       qstats->no_buff_discard_lo);
+
+		ADD_64(afex_stats->tx_unicast_bytes_hi,
+		       qstats->total_unicast_bytes_transmitted_hi,
+		       afex_stats->tx_unicast_bytes_lo,
+		       qstats->total_unicast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_broadcast_bytes_hi,
+		       qstats->total_broadcast_bytes_transmitted_hi,
+		       afex_stats->tx_broadcast_bytes_lo,
+		       qstats->total_broadcast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_multicast_bytes_hi,
+		       qstats->total_multicast_bytes_transmitted_hi,
+		       afex_stats->tx_multicast_bytes_lo,
+		       qstats->total_multicast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_unicast_frames_hi,
+		       qstats->total_unicast_packets_transmitted_hi,
+		       afex_stats->tx_unicast_frames_lo,
+		       qstats->total_unicast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_broadcast_frames_hi,
+		       qstats->total_broadcast_packets_transmitted_hi,
+		       afex_stats->tx_broadcast_frames_lo,
+		       qstats->total_broadcast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_multicast_frames_hi,
+		       qstats->total_multicast_packets_transmitted_hi,
+		       afex_stats->tx_multicast_frames_lo,
+		       qstats->total_multicast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_frames_dropped_hi,
+		       qstats->total_transmitted_dropped_packets_error_hi,
+		       afex_stats->tx_frames_dropped_lo,
+		       qstats->total_transmitted_dropped_packets_error_lo);
+	}
+
+	/* now add FCoE statistics which are collected separately
+	 * (both offloaded and non offloaded)
+	 */
+	if (!NO_FCOE(bp)) {
+		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_bytes_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+			  afex_stats->rx_unicast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+			  afex_stats->rx_broadcast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+			  afex_stats->rx_multicast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_frames_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_broadcast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+		ADD_64_LE(afex_stats->rx_multicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_multicast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->checksum_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->pkts_too_big_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->ttl0_discard);
+
+		ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
+			    LE16_0,
+			    afex_stats->rx_frames_dropped_lo,
+			    fcoe_q_tstorm_stats->no_buff_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_bytes_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+			  afex_stats->tx_unicast_bytes_lo,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+			  afex_stats->tx_broadcast_bytes_lo,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+			  afex_stats->tx_multicast_bytes_lo,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_frames_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_frames_lo,
+			  fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_broadcast_frames_lo,
+			  fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_multicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_multicast_frames_lo,
+			  fcoe_q_xstorm_stats->mcast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->tx_frames_dropped_lo,
+			  fcoe_q_xstorm_stats->error_drop_pkts);
+	}
+
+	/* if port stats are requested, add them to the PMF
+	 * stats, as anyway they will be accumulated by the
+	 * MCP before sent to the switch
+	 */
+	if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       0,
+		       afex_stats->rx_frames_dropped_lo,
+		       estats->mac_filter_discard);
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       0,
+		       afex_stats->rx_frames_dropped_lo,
+		       estats->brb_truncate_discard);
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       0,
+		       afex_stats->rx_frames_discarded_lo,
+		       estats->mac_discard);
+	}
+}
+
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+			  void (func_to_exec)(void *cookie),
+			  void *cookie)
+{
+	int cnt = 10, rc = 0;
+
+	/* Wait for statistics to end [while blocking further requests],
+	 * then run supplied function 'safely'.
+	 */
+	rc = down_timeout(&bp->stats_lock, HZ / 10);
+	if (unlikely(rc)) {
+		BNX2X_ERR("Failed to take statistics lock for safe execution\n");
+		goto out_no_lock;
+	}
+
+	bnx2x_stats_comp(bp);
+	while (bp->stats_pending && cnt--)
+		if (bnx2x_storm_stats_update(bp))
+			usleep_range(1000, 2000);
+	if (bp->stats_pending) {
+		BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
+		rc = -EBUSY;
+		goto out;
+	}
+
+	func_to_exec(cookie);
+
+out:
+	/* No need to restart statistics - if they're enabled, the timer
+	 * will restart the statistics.
+	 */
+	up(&bp->stats_lock);
+out_no_lock:
+	return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
new file mode 100644
index 0000000..b2644ed
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -0,0 +1,557 @@
+/* bnx2x_stats.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <linux/types.h>
+
+struct nig_stats {
+	u32 brb_discard;
+	u32 brb_packet;
+	u32 brb_truncate;
+	u32 flow_ctrl_discard;
+	u32 flow_ctrl_octets;
+	u32 flow_ctrl_packet;
+	u32 mng_discard;
+	u32 mng_octet_inp;
+	u32 mng_octet_out;
+	u32 mng_packet_inp;
+	u32 mng_packet_out;
+	u32 pbf_octets;
+	u32 pbf_packet;
+	u32 safc_inp;
+	u32 egress_mac_pkt0_lo;
+	u32 egress_mac_pkt0_hi;
+	u32 egress_mac_pkt1_lo;
+	u32 egress_mac_pkt1_hi;
+};
+
+enum bnx2x_stats_event {
+	STATS_EVENT_PMF = 0,
+	STATS_EVENT_LINK_UP,
+	STATS_EVENT_UPDATE,
+	STATS_EVENT_STOP,
+	STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+	STATS_STATE_DISABLED = 0,
+	STATS_STATE_ENABLED,
+	STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 error_bytes_received_hi;
+	u32 error_bytes_received_lo;
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 rx_stat_ifhcinbadoctets_hi;
+	u32 rx_stat_ifhcinbadoctets_lo;
+	u32 tx_stat_ifhcoutbadoctets_hi;
+	u32 tx_stat_ifhcoutbadoctets_lo;
+	u32 rx_stat_dot3statsfcserrors_hi;
+	u32 rx_stat_dot3statsfcserrors_lo;
+	u32 rx_stat_dot3statsalignmenterrors_hi;
+	u32 rx_stat_dot3statsalignmenterrors_lo;
+	u32 rx_stat_dot3statscarriersenseerrors_hi;
+	u32 rx_stat_dot3statscarriersenseerrors_lo;
+	u32 rx_stat_falsecarriererrors_hi;
+	u32 rx_stat_falsecarriererrors_lo;
+	u32 rx_stat_etherstatsundersizepkts_hi;
+	u32 rx_stat_etherstatsundersizepkts_lo;
+	u32 rx_stat_dot3statsframestoolong_hi;
+	u32 rx_stat_dot3statsframestoolong_lo;
+	u32 rx_stat_etherstatsfragments_hi;
+	u32 rx_stat_etherstatsfragments_lo;
+	u32 rx_stat_etherstatsjabbers_hi;
+	u32 rx_stat_etherstatsjabbers_lo;
+	u32 rx_stat_maccontrolframesreceived_hi;
+	u32 rx_stat_maccontrolframesreceived_lo;
+	u32 rx_stat_bmac_xpf_hi;
+	u32 rx_stat_bmac_xpf_lo;
+	u32 rx_stat_bmac_xcf_hi;
+	u32 rx_stat_bmac_xcf_lo;
+	u32 rx_stat_xoffstateentered_hi;
+	u32 rx_stat_xoffstateentered_lo;
+	u32 rx_stat_xonpauseframesreceived_hi;
+	u32 rx_stat_xonpauseframesreceived_lo;
+	u32 rx_stat_xoffpauseframesreceived_hi;
+	u32 rx_stat_xoffpauseframesreceived_lo;
+	u32 tx_stat_outxonsent_hi;
+	u32 tx_stat_outxonsent_lo;
+	u32 tx_stat_outxoffsent_hi;
+	u32 tx_stat_outxoffsent_lo;
+	u32 tx_stat_flowcontroldone_hi;
+	u32 tx_stat_flowcontroldone_lo;
+	u32 tx_stat_etherstatscollisions_hi;
+	u32 tx_stat_etherstatscollisions_lo;
+	u32 tx_stat_dot3statssinglecollisionframes_hi;
+	u32 tx_stat_dot3statssinglecollisionframes_lo;
+	u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+	u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+	u32 tx_stat_dot3statsdeferredtransmissions_hi;
+	u32 tx_stat_dot3statsdeferredtransmissions_lo;
+	u32 tx_stat_dot3statsexcessivecollisions_hi;
+	u32 tx_stat_dot3statsexcessivecollisions_lo;
+	u32 tx_stat_dot3statslatecollisions_hi;
+	u32 tx_stat_dot3statslatecollisions_lo;
+	u32 tx_stat_etherstatspkts64octets_hi;
+	u32 tx_stat_etherstatspkts64octets_lo;
+	u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+	u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+	u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+	u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+	u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+	u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+	u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+	u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+	u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+	u32 tx_stat_etherstatspktsover1522octets_hi;
+	u32 tx_stat_etherstatspktsover1522octets_lo;
+	u32 tx_stat_bmac_2047_hi;
+	u32 tx_stat_bmac_2047_lo;
+	u32 tx_stat_bmac_4095_hi;
+	u32 tx_stat_bmac_4095_lo;
+	u32 tx_stat_bmac_9216_hi;
+	u32 tx_stat_bmac_9216_lo;
+	u32 tx_stat_bmac_16383_hi;
+	u32 tx_stat_bmac_16383_lo;
+	u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+	u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+	u32 tx_stat_bmac_ufl_hi;
+	u32 tx_stat_bmac_ufl_lo;
+
+	u32 pause_frames_received_hi;
+	u32 pause_frames_received_lo;
+	u32 pause_frames_sent_hi;
+	u32 pause_frames_sent_lo;
+
+	u32 etherstatspkts1024octetsto1522octets_hi;
+	u32 etherstatspkts1024octetsto1522octets_lo;
+	u32 etherstatspktsover1522octets_hi;
+	u32 etherstatspktsover1522octets_lo;
+
+	u32 brb_drop_hi;
+	u32 brb_drop_lo;
+	u32 brb_truncate_hi;
+	u32 brb_truncate_lo;
+
+	u32 mac_filter_discard;
+	u32 mf_tag_discard;
+	u32 brb_truncate_discard;
+	u32 mac_discard;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 nig_timer_max;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+
+	/* PFC */
+	u32 pfc_frames_received_hi;
+	u32 pfc_frames_received_lo;
+	u32 pfc_frames_sent_hi;
+	u32 pfc_frames_sent_lo;
+
+	/* Recovery */
+	u32 recoverable_error;
+	u32 unrecoverable_error;
+	u32 driver_filtered_tx_pkt;
+	/* src: Clear-on-Read register; Will not survive PMF Migration */
+	u32 eee_tx_lpi;
+};
+
+struct bnx2x_eth_q_stats {
+	u32 total_unicast_bytes_received_hi;
+	u32 total_unicast_bytes_received_lo;
+	u32 total_broadcast_bytes_received_hi;
+	u32 total_broadcast_bytes_received_lo;
+	u32 total_multicast_bytes_received_hi;
+	u32 total_multicast_bytes_received_lo;
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_unicast_bytes_transmitted_hi;
+	u32 total_unicast_bytes_transmitted_lo;
+	u32 total_broadcast_bytes_transmitted_hi;
+	u32 total_broadcast_bytes_transmitted_lo;
+	u32 total_multicast_bytes_transmitted_hi;
+	u32 total_multicast_bytes_transmitted_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 total_packets_received_checksum_discarded_hi;
+	u32 total_packets_received_checksum_discarded_lo;
+	u32 total_packets_received_ttl0_discarded_hi;
+	u32 total_packets_received_ttl0_discarded_lo;
+	u32 total_transmitted_dropped_packets_error_hi;
+	u32 total_transmitted_dropped_packets_error_lo;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+	u32 driver_filtered_tx_pkt;
+};
+
+struct bnx2x_eth_stats_old {
+	u32 rx_stat_dot3statsframestoolong_hi;
+	u32 rx_stat_dot3statsframestoolong_lo;
+};
+
+struct bnx2x_eth_q_stats_old {
+	/* Fields to perserve over fw reset*/
+	u32 total_unicast_bytes_received_hi;
+	u32 total_unicast_bytes_received_lo;
+	u32 total_broadcast_bytes_received_hi;
+	u32 total_broadcast_bytes_received_lo;
+	u32 total_multicast_bytes_received_hi;
+	u32 total_multicast_bytes_received_lo;
+	u32 total_unicast_bytes_transmitted_hi;
+	u32 total_unicast_bytes_transmitted_lo;
+	u32 total_broadcast_bytes_transmitted_hi;
+	u32 total_broadcast_bytes_transmitted_lo;
+	u32 total_multicast_bytes_transmitted_hi;
+	u32 total_multicast_bytes_transmitted_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+
+	/* Fields to perserve last of */
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 total_tpa_bytes_hi_old;
+	u32 total_tpa_bytes_lo_old;
+
+	u32 driver_xoff_old;
+	u32 rx_err_discard_pkt_old;
+	u32 rx_skb_alloc_failed_old;
+	u32 hw_csum_err_old;
+	u32 driver_filtered_tx_pkt_old;
+};
+
+struct bnx2x_net_stats_old {
+	 u32 rx_dropped;
+};
+
+struct bnx2x_fw_port_stats_old {
+	 u32 mac_filter_discard;
+	 u32 mf_tag_discard;
+	 u32 brb_truncate_discard;
+	 u32 mac_discard;
+};
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+	do { \
+		s_lo += a_lo; \
+		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+	} while (0)
+
+#define LE32_0 ((__force __le32) 0)
+#define LE16_0 ((__force __le16) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+		ADD_64(s_hi, le32_to_cpu(a_hi_le), \
+		       s_lo, le32_to_cpu(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+		ADD_64(s_hi, le16_to_cpu(a_hi_le), \
+		       s_lo, le16_to_cpu(a_lo_le))
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+	do { \
+		if (m_lo < s_lo) { \
+			/* underflow */ \
+			d_hi = m_hi - s_hi; \
+			if (d_hi > 0) { \
+				/* we can 'loan' 1 */ \
+				d_hi--; \
+				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+			} else { \
+				/* m_hi <= s_hi */ \
+				d_hi = 0; \
+				d_lo = 0; \
+			} \
+		} else { \
+			/* m_lo >= s_lo */ \
+			if (m_hi < s_hi) { \
+				d_hi = 0; \
+				d_lo = 0; \
+			} else { \
+				/* m_hi >= s_hi */ \
+				d_hi = m_hi - s_hi; \
+				d_lo = m_lo - s_lo; \
+			} \
+		} \
+	} while (0)
+
+#define UPDATE_STAT64(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+		pstats->mac_stx[0].t##_hi = new->s##_hi; \
+		pstats->mac_stx[0].t##_lo = new->s##_lo; \
+		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+		       pstats->mac_stx[1].t##_lo, diff.lo); \
+	} while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+			diff.lo, new->s##_lo, old->s##_lo); \
+		ADD_64(estats->t##_hi, diff.hi, \
+		       estats->t##_lo, diff.lo); \
+	} while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+	do { \
+		s_lo += a; \
+		s_hi += (s_lo < a) ? 1 : 0; \
+	} while (0)
+
+#define ADD_STAT64(diff, t) \
+	do { \
+		ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+		       pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+	} while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+	do { \
+		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+			      pstats->mac_stx[1].s##_lo, \
+			      new->s); \
+	} while (0)
+
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
+	do { \
+		diff = le##size##_to_cpu(tclient->s) - \
+		       le##size##_to_cpu(old_tclient->s); \
+		old_tclient->s = tclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
+	do { \
+		UPDATE_EXTEND_TSTAT_X(s, t, size); \
+		ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		old_uclient->s = uclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_E_USTAT(s, t) \
+	do { \
+		UPDATE_EXTEND_USTAT(s, t); \
+		ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+		old_xclient->s = xclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_QSTAT(s, t) \
+	do { \
+		qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+		qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
+			+ ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
+	} while (0)
+
+#define UPDATE_QSTAT_OLD(f) \
+	do { \
+		qstats_old->f = qstats->f; \
+	} while (0)
+
+#define UPDATE_ESTAT_QSTAT_64(s) \
+	do { \
+		ADD_64(estats->s##_hi, qstats->s##_hi, \
+		       estats->s##_lo, qstats->s##_lo); \
+		SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
+		       estats->s##_lo, qstats_old->s##_lo_old); \
+		qstats_old->s##_hi_old = qstats->s##_hi; \
+		qstats_old->s##_lo_old = qstats->s##_lo; \
+	} while (0)
+
+#define UPDATE_ESTAT_QSTAT(s) \
+	do { \
+		estats->s += qstats->s; \
+		estats->s -= qstats_old->s##_old; \
+		qstats_old->s##_old = qstats->s; \
+	} while (0)
+
+#define UPDATE_FSTAT_QSTAT(s) \
+	do { \
+		ADD_64(fstats->s##_hi, qstats->s##_hi, \
+		       fstats->s##_lo, qstats->s##_lo); \
+		SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
+		       fstats->s##_lo, qstats_old->s##_lo); \
+		estats->s##_hi = fstats->s##_hi; \
+		estats->s##_lo = fstats->s##_lo; \
+		qstats_old->s##_hi = qstats->s##_hi; \
+		qstats_old->s##_lo = qstats->s##_lo; \
+	} while (0)
+
+#define UPDATE_FW_STAT(s) \
+	do { \
+		estats->s = le32_to_cpu(tport->s) + fwstats->s; \
+	} while (0)
+
+#define UPDATE_FW_STAT_OLD(f) \
+	do { \
+		fwstats->f = estats->f; \
+	} while (0)
+
+#define UPDATE_ESTAT(s, t) \
+	do { \
+		SUB_64(estats->s##_hi, estats_old->t##_hi, \
+		       estats->s##_lo, estats_old->t##_lo); \
+		ADD_64(estats->s##_hi, estats->t##_hi, \
+		       estats->s##_lo, estats->t##_lo); \
+		estats_old->t##_hi = estats->t##_hi; \
+		estats_old->t##_lo = estats->t##_lo; \
+	} while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+	do { \
+		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+	} while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+	do { \
+		SUB_64(m_hi, 0, m_lo, s); \
+	} while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+/* forward */
+struct bnx2x;
+
+void bnx2x_memset_stats(struct bnx2x *bp);
+void bnx2x_stats_init(struct bnx2x *bp);
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+			  void (func_to_exec)(void *cookie),
+			  void *cookie);
+
+/**
+ * bnx2x_save_statistics - save statistics when unloading.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_save_statistics(struct bnx2x *bp);
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+			      u32 stats_type);
+#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 0000000..1374e53
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,2288 @@
+/* bnx2x_vfpf.c: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ *	       Ariel Elior <ariel.elior@qlogic.com>
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
+/* place a given tlv on the tlv buffer at a given offset */
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+			  u16 offset, u16 type, u16 length)
+{
+	struct channel_tlv *tl =
+		(struct channel_tlv *)(tlvs_list + offset);
+
+	tl->type = type;
+	tl->length = length;
+}
+
+/* Clear the mailbox and init the header of the first tlv */
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+			    u16 type, u16 length)
+{
+	mutex_lock(&bp->vf2pf_mutex);
+
+	DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
+	   type);
+
+	/* Clear mailbox */
+	memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+	/* init type and length */
+	bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
+
+	/* init first tlv header */
+	first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
+}
+
+/* releases the mailbox */
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+				struct vfpf_first_tlv *first_tlv)
+{
+	DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+	   first_tlv->tl.type);
+
+	mutex_unlock(&bp->vf2pf_mutex);
+}
+
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+				   enum channel_tlvs req_tlv)
+{
+	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+	do {
+		if (tlv->type == req_tlv)
+			return tlv;
+
+		if (!tlv->length) {
+			BNX2X_ERR("Found TLV with length 0\n");
+			return NULL;
+		}
+
+		tlvs_list += tlv->length;
+		tlv = (struct channel_tlv *)tlvs_list;
+	} while (tlv->type != CHANNEL_TLV_LIST_END);
+
+	DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+	return NULL;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+{
+	int i = 1;
+	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+	while (tlv->type != CHANNEL_TLV_LIST_END) {
+		/* output tlv */
+		DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+		   tlv->type, tlv->length);
+
+		/* advance to next tlv */
+		tlvs_list += tlv->length;
+
+		/* cast general tlv list pointer to channel tlv header*/
+		tlv = (struct channel_tlv *)tlvs_list;
+
+		i++;
+
+		/* break condition for this loop */
+		if (i > MAX_TLVS_IN_LIST) {
+			WARN(true, "corrupt tlvs");
+			return;
+		}
+	}
+
+	/* output last tlv */
+	DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+	   tlv->type, tlv->length);
+}
+
+/* test whether we support a tlv type */
+bool bnx2x_tlv_supported(u16 tlvtype)
+{
+	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static inline int bnx2x_pfvf_status_codes(int rc)
+{
+	switch (rc) {
+	case 0:
+		return PFVF_STATUS_SUCCESS;
+	case -ENOMEM:
+		return PFVF_STATUS_NO_RESOURCE;
+	default:
+		return PFVF_STATUS_FAILURE;
+	}
+}
+
+static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+	struct cstorm_vf_zone_data __iomem *zone_data =
+		REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+	int tout = 100, interval = 100; /* wait for 10 seconds */
+
+	if (*done) {
+		BNX2X_ERR("done was non zero before message to pf was sent\n");
+		WARN_ON(true);
+		return -EINVAL;
+	}
+
+	/* if PF indicated channel is down avoid sending message. Return success
+	 * so calling flow can continue
+	 */
+	bnx2x_sample_bulletin(bp);
+	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
+		DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
+		*done = PFVF_STATUS_SUCCESS;
+		return -EINVAL;
+	}
+
+	/* Write message address */
+	writel(U64_LO(msg_mapping),
+	       &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+	writel(U64_HI(msg_mapping),
+	       &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+	/* make sure the address is written before FW accesses it */
+	wmb();
+
+	/* Trigger the PF FW */
+	writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+	/* Wait for PF to complete */
+	while ((tout >= 0) && (!*done)) {
+		msleep(interval);
+		tout -= 1;
+
+		/* progress indicator - HV can take its own sweet time in
+		 * answering VFs...
+		 */
+		DP_CONT(BNX2X_MSG_IOV, ".");
+	}
+
+	if (!*done) {
+		BNX2X_ERR("PF response has timed out\n");
+		return -EAGAIN;
+	}
+	DP(BNX2X_MSG_SP, "Got a response from PF\n");
+	return 0;
+}
+
+static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+	u32 me_reg;
+	int tout = 10, interval = 100; /* Wait for 1 sec */
+
+	do {
+		/* pxp traps vf read of doorbells and returns me reg value */
+		me_reg = readl(bp->doorbells);
+		if (GOOD_ME_REG(me_reg))
+			break;
+
+		msleep(interval);
+
+		BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+			  me_reg);
+	} while (tout-- > 0);
+
+	if (!GOOD_ME_REG(me_reg)) {
+		BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
+
+	*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+	return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+	int rc = 0, attempts = 0;
+	struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+	struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+	struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
+	struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
+	u32 vf_id;
+	bool resources_acquired = false;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+	if (bnx2x_get_vf_id(bp, &vf_id)) {
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	req->vfdev_info.vf_id = vf_id;
+	req->vfdev_info.vf_os = 0;
+	req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
+
+	req->resc_request.num_rxqs = rx_count;
+	req->resc_request.num_txqs = tx_count;
+	req->resc_request.num_sbs = bp->igu_sb_cnt;
+	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
+
+	/* pf 2 vf bulletin board address */
+	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+	/* Request physical port identifier */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+		      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
+	/* Bulletin support for bulletin board with length > legacy length */
+	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+	/* vlan filtering is supported */
+	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req,
+		      req->first_tlv.tl.length + sizeof(struct channel_tlv),
+		      CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	while (!resources_acquired) {
+		DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+		/* send acquire request */
+		rc = bnx2x_send_msg2pf(bp,
+				       &resp->hdr.status,
+				       bp->vf2pf_mbox_mapping);
+
+		/* PF timeout */
+		if (rc)
+			goto out;
+
+		/* copy acquire response from buffer to bp */
+		memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+		attempts++;
+
+		/* test whether the PF accepted our request. If not, humble
+		 * the request and try again.
+		 */
+		if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+			DP(BNX2X_MSG_SP, "resources acquired\n");
+			resources_acquired = true;
+		} else if (bp->acquire_resp.hdr.status ==
+			   PFVF_STATUS_NO_RESOURCE &&
+			   attempts < VF_ACQUIRE_THRESH) {
+			DP(BNX2X_MSG_SP,
+			   "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+			/* humble our request */
+			req->resc_request.num_txqs =
+				min(req->resc_request.num_txqs,
+				    bp->acquire_resp.resc.num_txqs);
+			req->resc_request.num_rxqs =
+				min(req->resc_request.num_rxqs,
+				    bp->acquire_resp.resc.num_rxqs);
+			req->resc_request.num_sbs =
+				min(req->resc_request.num_sbs,
+				    bp->acquire_resp.resc.num_sbs);
+			req->resc_request.num_mac_filters =
+				min(req->resc_request.num_mac_filters,
+				    bp->acquire_resp.resc.num_mac_filters);
+			req->resc_request.num_vlan_filters =
+				min(req->resc_request.num_vlan_filters,
+				    bp->acquire_resp.resc.num_vlan_filters);
+			req->resc_request.num_mc_filters =
+				min(req->resc_request.num_mc_filters,
+				    bp->acquire_resp.resc.num_mc_filters);
+
+			/* Clear response buffer */
+			memset(&bp->vf2pf_mbox->resp, 0,
+			       sizeof(union pfvf_tlvs));
+		} else {
+			/* Determine reason of PF failure of acquire process */
+			fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+							    CHANNEL_TLV_FP_HSI_SUPPORT);
+			if (fp_hsi_resp && !fp_hsi_resp->is_supported)
+				BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
+			else
+				BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+					  bp->acquire_resp.hdr.status);
+			rc = -EAGAIN;
+			goto out;
+		}
+	}
+
+	/* Retrieve physical port id (if possible) */
+	phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+			 bnx2x_search_tlv_list(bp, resp,
+					       CHANNEL_TLV_PHYS_PORT_ID);
+	if (phys_port_resp) {
+		memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+		bp->flags |= HAS_PHYS_PORT_ID;
+	}
+
+	/* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
+	 * If that's the case, we need to make certain required FW was
+	 * supported by such a hypervisor [i.e., v0-v2].
+	 */
+	fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+					    CHANNEL_TLV_FP_HSI_SUPPORT);
+	if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
+		BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
+
+		/* Since acquire succeeded on the PF side, we need to send a
+		 * release message in order to allow future probes.
+		 */
+		bnx2x_vfpf_finalize(bp, &req->first_tlv);
+		bnx2x_vfpf_release(bp);
+
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* get HW info */
+	bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+	bp->link_params.chip_id = bp->common.chip_id;
+	bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+	bp->common.int_block = INT_BLOCK_IGU;
+	bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+	bp->igu_dsb_id = -1;
+	bp->mf_ov = 0;
+	bp->mf_mode = 0;
+	bp->common.flash_size = 0;
+	bp->flags |=
+		NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+	bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
+	bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+	bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
+	strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+		sizeof(bp->fw_ver));
+
+	if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+		memcpy(bp->dev->dev_addr,
+		       bp->acquire_resp.resc.current_mac_addr,
+		       ETH_ALEN);
+
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+	return rc;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+	struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	u32 rc, vf_id;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+	if (bnx2x_get_vf_id(bp, &vf_id)) {
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	req->vf_id = vf_id;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send release request */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+	if (rc)
+		/* PF timeout */
+		goto out;
+
+	if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+		/* PF released us */
+		DP(BNX2X_MSG_SP, "vf released\n");
+	} else {
+		/* PF reports error */
+		BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
+			  resp->hdr.status);
+		rc = -EAGAIN;
+		goto out;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+	struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc, i;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+	/* status blocks */
+	for_each_eth_queue(bp, i)
+		req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+						       status_blk_mapping);
+
+	/* statistics - requests only supports single queue for now */
+	req->stats_addr = bp->fw_stats_data_mapping +
+			  offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+	req->stats_stride = sizeof(struct per_queue_stats);
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc)
+		goto out;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+			  resp->hdr.status);
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+	struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int i, rc;
+	u32 vf_id;
+
+	/* If we haven't got a valid VF id, there is no sense to
+	 * continue with sending messages
+	 */
+	if (bnx2x_get_vf_id(bp, &vf_id))
+		goto free_irq;
+
+	/* Close the queues */
+	for_each_queue(bp, i)
+		bnx2x_vfpf_teardown_queue(bp, i);
+
+	/* remove mac */
+	bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+	req->vf_id = vf_id;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+	if (rc)
+		BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+	else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+			  resp->hdr.status);
+
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+free_irq:
+	/* Disable HW interrupts, NAPI */
+	bnx2x_netif_stop(bp, 0);
+	/* Delete all NAPI objects */
+	bnx2x_del_all_napi(bp);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+}
+
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				   struct bnx2x_vf_queue *q)
+{
+	u8 cl_id = vfq_cl_id(vf, q);
+	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+	/* mac */
+	bnx2x_init_mac_obj(bp, &q->mac_obj,
+			   cl_id, q->cid, func_id,
+			   bnx2x_vf_sp(bp, vf, mac_rdata),
+			   bnx2x_vf_sp_map(bp, vf, mac_rdata),
+			   BNX2X_FILTER_MAC_PENDING,
+			   &vf->filter_state,
+			   BNX2X_OBJ_TYPE_RX_TX,
+			   &vf->vf_macs_pool);
+	/* vlan */
+	bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+			    cl_id, q->cid, func_id,
+			    bnx2x_vf_sp(bp, vf, vlan_rdata),
+			    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+			    BNX2X_FILTER_VLAN_PENDING,
+			    &vf->filter_state,
+			    BNX2X_OBJ_TYPE_RX_TX,
+			    &vf->vf_vlans_pool);
+	/* vlan-mac */
+	bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+				cl_id, q->cid, func_id,
+				bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+				bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+				BNX2X_FILTER_VLAN_MAC_PENDING,
+				&vf->filter_state,
+				BNX2X_OBJ_TYPE_RX_TX,
+				&vf->vf_macs_pool,
+				&vf->vf_vlans_pool);
+	/* mcast */
+	bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+			     q->cid, func_id, func_id,
+			     bnx2x_vf_sp(bp, vf, mcast_rdata),
+			     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+			     BNX2X_FILTER_MCAST_PENDING,
+			     &vf->filter_state,
+			     BNX2X_OBJ_TYPE_RX_TX);
+
+	/* rss */
+	bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+				  func_id, func_id,
+				  bnx2x_vf_sp(bp, vf, rss_rdata),
+				  bnx2x_vf_sp_map(bp, vf, rss_rdata),
+				  BNX2X_FILTER_RSS_CONF_PENDING,
+				  &vf->filter_state,
+				  BNX2X_OBJ_TYPE_RX_TX);
+
+	vf->leading_rss = cl_id;
+	q->is_leading = true;
+	q->sp_initialized = true;
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool is_leading)
+{
+	struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	u8 fp_idx = fp->index;
+	u16 tpa_agg_size = 0, flags = 0;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+	/* select tpa mode to request */
+	if (fp->mode != TPA_MODE_DISABLED) {
+		flags |= VFPF_QUEUE_FLG_TPA;
+		flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+		if (fp->mode == TPA_MODE_GRO)
+			flags |= VFPF_QUEUE_FLG_TPA_GRO;
+		tpa_agg_size = TPA_AGG_SIZE;
+	}
+
+	if (is_leading)
+		flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
+	/* calculate queue flags */
+	flags |= VFPF_QUEUE_FLG_STATS;
+	flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+	flags |= VFPF_QUEUE_FLG_VLAN;
+
+	/* Common */
+	req->vf_qid = fp_idx;
+	req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+	/* Rx */
+	req->rxq.rcq_addr = fp->rx_comp_mapping;
+	req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+	req->rxq.rxq_addr = fp->rx_desc_mapping;
+	req->rxq.sge_addr = fp->rx_sge_mapping;
+	req->rxq.vf_sb = fp_idx;
+	req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+	req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+	req->rxq.mtu = bp->dev->mtu;
+	req->rxq.buf_sz = fp->rx_buf_size;
+	req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+	req->rxq.tpa_agg_sz = tpa_agg_size;
+	req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+	req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+	req->rxq.flags = flags;
+	req->rxq.drop_flags = 0;
+	req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+	req->rxq.stat_id = -1; /* No stats at the moment */
+
+	/* Tx */
+	req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+	req->txq.vf_sb = fp_idx;
+	req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+	req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+	req->txq.flags = flags;
+	req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc)
+		BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+			  fp_idx);
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+			  fp_idx, resp->hdr.status);
+		rc = -EINVAL;
+	}
+
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+	struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+			sizeof(*req));
+
+	req->vf_qid = qidx;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+	if (rc) {
+		BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+			  rc);
+		goto out;
+	}
+
+	/* PF failed the transaction */
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+			  resp->hdr.status);
+		rc = -EINVAL;
+	}
+
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
+{
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+	int rc = 0;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+	req->vf_qid = vf_qid;
+	req->n_mac_vlan_filters = 1;
+
+	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
+	if (set)
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+	/* sample bulletin board for new mac */
+	bnx2x_sample_bulletin(bp);
+
+	/* copy mac from device to request */
+	memcpy(req->filters[0].mac, addr, ETH_ALEN);
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send message to pf */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+		goto out;
+	}
+
+	/* failure may mean PF was configured with a new mac for us */
+	while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+		DP(BNX2X_MSG_IOV,
+		   "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+		/* copy mac from bulletin to device */
+		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+
+		/* check if bulletin board was updated */
+		if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+			/* copy mac from device to request */
+			memcpy(req->filters[0].mac, bp->dev->dev_addr,
+			       ETH_ALEN);
+
+			/* send message to pf */
+			rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+					       bp->vf2pf_mbox_mapping);
+		} else {
+			/* no new info in bulletin */
+			break;
+		}
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+		rc = -EINVAL;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+			  struct bnx2x_config_rss_params *params)
+{
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+	int rc = 0;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+			sizeof(*req));
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+	memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+	req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+	req->rss_key_size = T_ETH_RSS_KEY;
+	req->rss_result_mask = params->rss_result_mask;
+
+	/* flags handled individually for backward/forward compatibility */
+	if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+		req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+	if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+		req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+	if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+		req->rss_flags |= VFPF_RSS_SET_SRCH;
+	if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+		req->rss_flags |= VFPF_RSS_IPV4;
+	if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+		req->rss_flags |= VFPF_RSS_IPV4_TCP;
+	if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+		req->rss_flags |= VFPF_RSS_IPV4_UDP;
+	if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+		req->rss_flags |= VFPF_RSS_IPV6;
+	if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+		req->rss_flags |= VFPF_RSS_IPV6_TCP;
+	if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+		req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+	DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send message to pf */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+		goto out;
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		/* Since older drivers don't support this feature (and VF has
+		 * no way of knowing other than failing this), don't propagate
+		 * an error in this case.
+		 */
+		DP(BNX2X_MSG_IOV,
+		   "Failed to send rss message to PF over VF-PF channel [%d]\n",
+		   resp->hdr.status);
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc, i = 0;
+	struct netdev_hw_addr *ha;
+
+	if (bp->state != BNX2X_STATE_OPEN) {
+		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+		return -EINVAL;
+	}
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	/* Get Rx mode requested */
+	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+	netdev_for_each_mc_addr(ha, dev) {
+		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+		   bnx2x_mc_addr(ha));
+		memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+		i++;
+	}
+
+	/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+	  * addresses tops
+	  */
+	if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+		DP(NETIF_MSG_IFUP,
+		   "VF supports not more than %d multicast MAC addresses\n",
+		   PFVF_MAX_MULTICAST_PER_VF);
+		return -EINVAL;
+	}
+
+	req->n_multicast = i;
+	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+	req->vf_qid = 0;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("Sending a message failed: %d\n", rc);
+		goto out;
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+			  resp->hdr.status);
+		rc = -EINVAL;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return 0;
+}
+
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc = 0;
+
+	if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+		DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+		return 0;
+	}
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+	req->vf_qid = vf_qid;
+	req->n_mac_vlan_filters = 1;
+
+	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+	if (add)
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+	/* sample bulletin board for hypervisor vlan */
+	bnx2x_sample_bulletin(bp);
+
+	if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+		BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	req->filters[0].vlan_tag = vid;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send message to pf */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+		goto out;
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+			  vid);
+		rc = -EINVAL;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+	int mode = bp->rx_mode;
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+	/* Ignore everything accept MODE_NONE */
+	if (mode  == BNX2X_RX_MODE_NONE) {
+		req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+	} else {
+		/* Current PF driver will not look at the specific flags,
+		 * but they are required when working with older drivers on hv.
+		 */
+		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+		if (mode == BNX2X_RX_MODE_PROMISC)
+			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+	}
+
+	if (bp->accept_any_vlan)
+		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
+	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+	req->vf_qid = 0;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc)
+		BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+		rc = -EINVAL;
+	}
+
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
+/* General service functions */
+static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
+{
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
+
+	REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
+}
+
+static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
+{
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
+
+	REG_WR8(bp, addr, 1);
+}
+
+/* enable vf_pf mailbox (aka vf-pf-channel) */
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
+{
+	bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
+
+	/* enable the mailbox in the FW */
+	storm_memset_vf_mbx_ack(bp, abs_vfid);
+	storm_memset_vf_mbx_valid(bp, abs_vfid);
+
+	/* enable the VF access to the mailbox */
+	bnx2x_vf_enable_access(bp, abs_vfid);
+}
+
+/* this works only on !E1h */
+static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
+				dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
+				u32 vf_addr_lo, u32 len32)
+{
+	struct dmae_command dmae;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_ERR("Chip revision does not support VFs\n");
+		return DMAE_NOT_RDY;
+	}
+
+	if (!bp->dmae_ready) {
+		BNX2X_ERR("DMAE is not ready, can not copy\n");
+		return DMAE_NOT_RDY;
+	}
+
+	/* set opcode and fixed command fields */
+	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
+
+	if (from_vf) {
+		dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
+			(DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
+			(DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
+
+		dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
+
+		dmae.src_addr_lo = vf_addr_lo;
+		dmae.src_addr_hi = vf_addr_hi;
+		dmae.dst_addr_lo = U64_LO(pf_addr);
+		dmae.dst_addr_hi = U64_HI(pf_addr);
+	} else {
+		dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
+			(DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
+			(DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
+
+		dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
+
+		dmae.src_addr_lo = U64_LO(pf_addr);
+		dmae.src_addr_hi = U64_HI(pf_addr);
+		dmae.dst_addr_lo = vf_addr_lo;
+		dmae.dst_addr_hi = vf_addr_hi;
+	}
+	dmae.len = len32;
+
+	/* issue the command and wait for completion */
+	return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+}
+
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf)
+{
+	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+	u16 length, type;
+
+	/* prepare response */
+	type = mbx->first_tlv.tl.type;
+	length = type == CHANNEL_TLV_ACQUIRE ?
+		sizeof(struct pfvf_acquire_resp_tlv) :
+		sizeof(struct pfvf_general_resp_tlv);
+	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+				       struct bnx2x_virtf *vf,
+				       int vf_rc)
+{
+	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+	dma_addr_t pf_addr;
+	u64 vf_addr;
+	int rc;
+
+	bnx2x_dp_tlv_list(bp, resp);
+	DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+	   mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+	resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
+
+	/* send response */
+	vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
+		  mbx->first_tlv.resp_msg_offset;
+	pf_addr = mbx->msg_mapping +
+		  offsetof(struct bnx2x_vf_mbx_msg, resp);
+
+	/* Copy the response buffer. The first u64 is written afterwards, as
+	 * the vf is sensitive to the header being written
+	 */
+	vf_addr += sizeof(u64);
+	pf_addr += sizeof(u64);
+	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+				  U64_HI(vf_addr),
+				  U64_LO(vf_addr),
+				  (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+	if (rc) {
+		BNX2X_ERR("Failed to copy response body to VF %d\n",
+			  vf->abs_vfid);
+		goto mbx_error;
+	}
+	vf_addr -= sizeof(u64);
+	pf_addr -= sizeof(u64);
+
+	/* ack the FW */
+	storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+	mmiowb();
+
+	/* copy the response header including status-done field,
+	 * must be last dmae, must be after FW is acked
+	 */
+	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+				  U64_HI(vf_addr),
+				  U64_LO(vf_addr),
+				  sizeof(u64)/4);
+
+	/* unlock channel mutex */
+	bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+	if (rc) {
+		BNX2X_ERR("Failed to copy response status to VF %d\n",
+			  vf->abs_vfid);
+		goto mbx_error;
+	}
+	return;
+
+mbx_error:
+	bnx2x_vf_release(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+			      struct bnx2x_virtf *vf,
+			      int rc)
+{
+	bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+	bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+					struct bnx2x_virtf *vf,
+					void *buffer,
+					u16 *offset)
+{
+	struct vfpf_port_phys_id_resp_tlv *port_id;
+
+	if (!(bp->flags & HAS_PHYS_PORT_ID))
+		return;
+
+	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+		      sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+	port_id = (struct vfpf_port_phys_id_resp_tlv *)
+		  (((u8 *)buffer) + *offset);
+	memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+	/* Offset should continue representing the offset to the tail
+	 * of TLV data (outside this function scope)
+	 */
+	*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
+static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf,
+					 void *buffer,
+					 u16 *offset)
+{
+	struct vfpf_fp_hsi_resp_tlv *fp_hsi;
+
+	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
+		      sizeof(struct vfpf_fp_hsi_resp_tlv));
+
+	fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
+		 (((u8 *)buffer) + *offset);
+	fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
+
+	/* Offset should continue representing the offset to the tail
+	 * of TLV data (outside this function scope)
+	 */
+	*offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
+}
+
+static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				      struct bnx2x_vf_mbx *mbx, int vfop_status)
+{
+	int i;
+	struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
+	struct pf_vf_resc *resc = &resp->resc;
+	u8 status = bnx2x_pfvf_status_codes(vfop_status);
+	u16 length;
+
+	memset(resp, 0, sizeof(*resp));
+
+	/* fill in pfdev info */
+	resp->pfdev_info.chip_num = bp->common.chip_id;
+	resp->pfdev_info.db_size = bp->db_size;
+	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
+	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
+				   PFVF_CAP_TPA |
+				   PFVF_CAP_TPA_UPDATE |
+				   PFVF_CAP_VLAN_FILTER);
+	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
+			  sizeof(resp->pfdev_info.fw_ver));
+
+	if (status == PFVF_STATUS_NO_RESOURCE ||
+	    status == PFVF_STATUS_SUCCESS) {
+		/* set resources numbers, if status equals NO_RESOURCE these
+		 * are max possible numbers
+		 */
+		resc->num_rxqs = vf_rxq_count(vf) ? :
+			bnx2x_vf_max_queue_cnt(bp, vf);
+		resc->num_txqs = vf_txq_count(vf) ? :
+			bnx2x_vf_max_queue_cnt(bp, vf);
+		resc->num_sbs = vf_sb_count(vf);
+		resc->num_mac_filters = vf_mac_rules_cnt(vf);
+		resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+		resc->num_mc_filters = 0;
+
+		if (status == PFVF_STATUS_SUCCESS) {
+			/* fill in the allocated resources */
+			struct pf_vf_bulletin_content *bulletin =
+				BP_VF_BULLETIN(bp, vf->index);
+
+			for_each_vfq(vf, i)
+				resc->hw_qid[i] =
+					vfq_qzone_id(vf, vfq_get(vf, i));
+
+			for_each_vf_sb(vf, i) {
+				resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
+				resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
+			}
+
+			/* if a mac has been set for this vf, supply it */
+			if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+				memcpy(resc->current_mac_addr, bulletin->mac,
+				       ETH_ALEN);
+			}
+		}
+	}
+
+	DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
+	   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
+	   vf->abs_vfid,
+	   resp->pfdev_info.chip_num,
+	   resp->pfdev_info.db_size,
+	   resp->pfdev_info.indices_per_sb,
+	   resp->pfdev_info.pf_cap,
+	   resc->num_rxqs,
+	   resc->num_txqs,
+	   resc->num_sbs,
+	   resc->num_mac_filters,
+	   resc->num_vlan_filters,
+	   resc->num_mc_filters,
+	   resp->pfdev_info.fw_ver);
+
+	DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
+	for (i = 0; i < vf_rxq_count(vf); i++)
+		DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
+	DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
+	for (i = 0; i < vf_sb_count(vf); i++)
+		DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
+			resc->hw_sbs[i].hw_sb_id,
+			resc->hw_sbs[i].sb_qid);
+	DP_CONT(BNX2X_MSG_IOV, "]\n");
+
+	/* prepare response */
+	length = sizeof(struct pfvf_acquire_resp_tlv);
+	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+	/* Handle possible VF requests for physical port identifiers.
+	 * 'length' should continue to indicate the offset of the first empty
+	 * place in the buffer (i.e., where next TLV should be inserted)
+	 */
+	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+				  CHANNEL_TLV_PHYS_PORT_ID))
+		bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+	/* `New' vfs will want to know if fastpath HSI is supported, since
+	 * if that's not the case they could print into system log the fact
+	 * the driver version must be updated.
+	 */
+	bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
+
+	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* send the response */
+	bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
+}
+
+static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
+				       struct vfpf_acquire_tlv *acquire)
+{
+	/* Windows driver does one of three things:
+	 * 1. Old driver doesn't have bulletin board address set.
+	 * 2. 'Middle' driver sends mc_num == 32.
+	 * 3. New driver sets the OS field.
+	 */
+	if (!acquire->bulletin_addr ||
+	    acquire->resc_request.num_mc_filters == 32 ||
+	    ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
+	     VF_OS_WINDOWS))
+		return true;
+
+	return false;
+}
+
+static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf,
+					 struct bnx2x_vf_mbx *mbx)
+{
+	/* Linux drivers which correctly set the doorbell size also
+	 * send a physical port request
+	 */
+	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+				  CHANNEL_TLV_PHYS_PORT_ID))
+		return 0;
+
+	/* Issue does not exist in windows VMs */
+	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+		return 0;
+
+	return -EOPNOTSUPP;
+}
+
+static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				 struct bnx2x_vf_mbx *mbx)
+{
+	int rc;
+	struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
+
+	/* log vfdef info */
+	DP(BNX2X_MSG_IOV,
+	   "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
+	   vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
+	   acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
+	   acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
+	   acquire->resc_request.num_vlan_filters,
+	   acquire->resc_request.num_mc_filters);
+
+	/* Prevent VFs with old drivers from loading, since they calculate
+	 * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
+	 * while being upgraded.
+	 */
+	rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
+	if (rc) {
+		DP(BNX2X_MSG_IOV,
+		   "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
+		   vf->abs_vfid);
+		goto out;
+	}
+
+	/* Verify the VF fastpath HSI can be supported by the loaded FW.
+	 * Linux vfs should be oblivious to changes between v0 and v2.
+	 */
+	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+		vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
+	else
+		vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
+				   ETH_FP_HSI_VER_2);
+	if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
+		DP(BNX2X_MSG_IOV,
+		   "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
+		   vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
+		   ETH_FP_HSI_VERSION);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* acquire the resources */
+	rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
+
+	/* store address of vf's bulletin board */
+	vf->bulletin_map = acquire->bulletin_addr;
+	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
+		DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
+		   vf->abs_vfid);
+		vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
+	} else {
+		vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
+	}
+
+	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+		DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+		   vf->abs_vfid);
+		vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+	} else {
+		vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+	}
+
+out:
+	/* response */
+	bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
+}
+
+static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+			      struct bnx2x_vf_mbx *mbx)
+{
+	struct vfpf_init_tlv *init = &mbx->msg->req.init;
+	int rc;
+
+	/* record ghost addresses from vf message */
+	vf->fw_stat_map = init->stats_addr;
+	vf->stats_stride = init->stats_stride;
+	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+	/* set VF multiqueue statistics collection mode */
+	if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+		vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
+	/* Update VF's view of link state */
+	if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
+		bnx2x_iov_link_update_vf(bp, vf->index);
+
+	/* response */
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+/* convert MBX queue-flags to standard SP queue-flags */
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
+				     unsigned long *sp_q_flags)
+{
+	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
+		__set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
+		__set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
+		__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
+		__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
+		__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
+		__set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
+		__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
+		__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+	if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+		__set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
+
+	/* outer vlan removal is set according to PF's multi function mode */
+	if (IS_MF_SD(bp))
+		__set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
+}
+
+static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				 struct bnx2x_vf_mbx *mbx)
+{
+	struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
+	struct bnx2x_vf_queue_construct_params qctor;
+	int rc = 0;
+
+	/* verify vf_qid */
+	if (setup_q->vf_qid >= vf_rxq_count(vf)) {
+		BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
+			  setup_q->vf_qid, vf_rxq_count(vf));
+		rc = -EINVAL;
+		goto response;
+	}
+
+	/* tx queues must be setup alongside rx queues thus if the rx queue
+	 * is not marked as valid there's nothing to do.
+	 */
+	if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
+		struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
+		unsigned long q_type = 0;
+
+		struct bnx2x_queue_init_params *init_p;
+		struct bnx2x_queue_setup_params *setup_p;
+
+		if (bnx2x_vfq_is_leading(q))
+			bnx2x_leading_vfq_init(bp, vf, q);
+
+		/* re-init the VF operation context */
+		memset(&qctor, 0 ,
+		       sizeof(struct bnx2x_vf_queue_construct_params));
+		setup_p = &qctor.prep_qsetup;
+		init_p =  &qctor.qstate.params.init;
+
+		/* activate immediately */
+		__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
+
+		if (setup_q->param_valid & VFPF_TXQ_VALID) {
+			struct bnx2x_txq_setup_params *txq_params =
+				&setup_p->txq_params;
+
+			__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+			/* save sb resource index */
+			q->sb_idx = setup_q->txq.vf_sb;
+
+			/* tx init */
+			init_p->tx.hc_rate = setup_q->txq.hc_rate;
+			init_p->tx.sb_cq_index = setup_q->txq.sb_index;
+
+			bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
+						 &init_p->tx.flags);
+
+			/* tx setup - flags */
+			bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
+						 &setup_p->flags);
+
+			/* tx setup - general, nothing */
+
+			/* tx setup - tx */
+			txq_params->dscr_map = setup_q->txq.txq_addr;
+			txq_params->sb_cq_index = setup_q->txq.sb_index;
+			txq_params->traffic_type = setup_q->txq.traffic_type;
+
+			bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
+						 q->index, q->sb_idx);
+		}
+
+		if (setup_q->param_valid & VFPF_RXQ_VALID) {
+			struct bnx2x_rxq_setup_params *rxq_params =
+							&setup_p->rxq_params;
+
+			__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+			/* Note: there is no support for different SBs
+			 * for TX and RX
+			 */
+			q->sb_idx = setup_q->rxq.vf_sb;
+
+			/* rx init */
+			init_p->rx.hc_rate = setup_q->rxq.hc_rate;
+			init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
+			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
+						 &init_p->rx.flags);
+
+			/* rx setup - flags */
+			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
+						 &setup_p->flags);
+
+			/* rx setup - general */
+			setup_p->gen_params.mtu = setup_q->rxq.mtu;
+
+			/* rx setup - rx */
+			rxq_params->drop_flags = setup_q->rxq.drop_flags;
+			rxq_params->dscr_map = setup_q->rxq.rxq_addr;
+			rxq_params->sge_map = setup_q->rxq.sge_addr;
+			rxq_params->rcq_map = setup_q->rxq.rcq_addr;
+			rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
+			rxq_params->buf_sz = setup_q->rxq.buf_sz;
+			rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
+			rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
+			rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
+			rxq_params->cache_line_log =
+				setup_q->rxq.cache_line_log;
+			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+
+			/* rx setup - multicast engine */
+			if (bnx2x_vfq_is_leading(q)) {
+				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+				rxq_params->mcast_engine_id = mcast_id;
+				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+			}
+
+			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
+						 q->index, q->sb_idx);
+		}
+		/* complete the preparations */
+		bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
+
+		rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+		if (rc)
+			goto response;
+	}
+response:
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
+				     struct bnx2x_virtf *vf,
+				     struct vfpf_set_q_filters_tlv *tlv,
+				     struct bnx2x_vf_mac_vlan_filters **pfl,
+				     u32 type_flag)
+{
+	int i, j;
+	struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+	size_t fsz;
+
+	fsz = tlv->n_mac_vlan_filters *
+	      sizeof(struct bnx2x_vf_mac_vlan_filter) +
+	      sizeof(struct bnx2x_vf_mac_vlan_filters);
+
+	fl = kzalloc(fsz, GFP_KERNEL);
+	if (!fl)
+		return -ENOMEM;
+
+	for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
+		struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
+
+		if ((msg_filter->flags & type_flag) != type_flag)
+			continue;
+		memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+		if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
+			fl->filters[j].mac = msg_filter->mac;
+			fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+		}
+		if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
+			fl->filters[j].vid = msg_filter->vlan_tag;
+			fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
+		}
+		fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
+		fl->count++;
+		j++;
+	}
+	if (!fl->count)
+		kfree(fl);
+	else
+		*pfl = fl;
+
+	return 0;
+}
+
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+				    u32 flags)
+{
+	int i, cnt = 0;
+
+	for (i = 0; i < filters->n_mac_vlan_filters; i++)
+		if  ((filters->filters[i].flags & flags) == flags)
+			cnt++;
+
+	return cnt;
+}
+
+static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
+				       struct vfpf_q_mac_vlan_filter *filter)
+{
+	DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
+	if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
+		DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
+	if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
+		DP_CONT(msglvl, ", MAC=%pM", filter->mac);
+	DP_CONT(msglvl, "\n");
+}
+
+static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
+				       struct vfpf_set_q_filters_tlv *filters)
+{
+	int i;
+
+	if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
+		for (i = 0; i < filters->n_mac_vlan_filters; i++)
+			bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
+						 &filters->filters[i]);
+
+	if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
+		DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
+
+	if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
+		for (i = 0; i < filters->n_multicast; i++)
+			DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
+}
+
+#define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID
+#define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER	(VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
+
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+	int rc = 0;
+
+	struct vfpf_set_q_filters_tlv *msg =
+		&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
+
+	/* check for any mac/vlan changes */
+	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+		struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+
+		/* build vlan-mac list */
+		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+					       VFPF_VLAN_MAC_FILTER);
+		if (rc)
+			goto op_err;
+
+		if (fl) {
+
+			/* set vlan-mac list */
+			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+							   msg->vf_qid,
+							   false);
+			if (rc)
+				goto op_err;
+		}
+
+		/* build mac list */
+		fl = NULL;
+
+		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+					       VFPF_MAC_FILTER);
+		if (rc)
+			goto op_err;
+
+		if (fl) {
+			/* set mac list */
+			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+							   msg->vf_qid,
+							   false);
+			if (rc)
+				goto op_err;
+		}
+
+	}
+
+	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+		unsigned long accept = 0;
+		struct pf_vf_bulletin_content *bulletin =
+					BP_VF_BULLETIN(bp, vf->index);
+
+		/* Ignore VF requested mode; instead set a regular mode */
+		if (msg->rx_mask !=  VFPF_RX_MASK_ACCEPT_NONE) {
+			__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+			__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+		}
+
+		/* any_vlan is not configured if HV is forcing VLAN
+		 * any_vlan is configured if
+		 *   1. VF does not support vlan filtering
+		 *   OR
+		 *   2. VF supports vlan filtering and explicitly requested it
+		 */
+		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+		    (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+		     msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+		/* set rx-mode */
+		rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+		if (rc)
+			goto op_err;
+	}
+
+	if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+		/* set mcasts */
+		rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+				    msg->n_multicast, false);
+		if (rc)
+			goto op_err;
+	}
+op_err:
+	if (rc)
+		BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+			  vf->abs_vfid, msg->vf_qid, rc);
+	return rc;
+}
+
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+				      struct bnx2x_virtf *vf,
+				      struct vfpf_set_q_filters_tlv *filters)
+{
+	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+	int rc = 0;
+
+	/* if a mac was already set for this VF via the set vf mac ndo, we only
+	 * accept mac configurations of that mac. Why accept them at all?
+	 * because PF may have been unable to configure the mac at the time
+	 * since queue was not set up.
+	 */
+	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+		struct vfpf_q_mac_vlan_filter *filter = NULL;
+		int i;
+
+		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+			if (!(filters->filters[i].flags &
+			      VFPF_Q_FILTER_DEST_MAC_VALID))
+				continue;
+
+			/* once a mac was set by ndo can only accept
+			 * a single mac...
+			 */
+			if (filter) {
+				BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+					  vf->abs_vfid,
+					  filters->n_mac_vlan_filters);
+				rc = -EPERM;
+				goto response;
+			}
+
+			filter = &filters->filters[i];
+		}
+
+		/* ...and only the mac set by the ndo */
+		if (filter &&
+		    !ether_addr_equal(filter->mac, bulletin->mac)) {
+			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
+				  vf->abs_vfid);
+
+			rc = -EPERM;
+			goto response;
+		}
+	}
+
+response:
+	return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+				       struct bnx2x_virtf *vf,
+				       struct vfpf_set_q_filters_tlv *filters)
+{
+	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+	int rc = 0;
+
+	/* if vlan was set by hypervisor we don't allow guest to config vlan */
+	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+		/* search for vlan filters */
+
+		if (bnx2x_vf_filters_contain(filters,
+					     VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+			BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+				  vf->abs_vfid);
+			rc = -EPERM;
+			goto response;
+		}
+	}
+
+	/* verify vf_qid */
+	if (filters->vf_qid > vf_rxq_count(vf)) {
+		rc = -EPERM;
+		goto response;
+	}
+
+response:
+	return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+				       struct bnx2x_virtf *vf,
+				       struct bnx2x_vf_mbx *mbx)
+{
+	struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+	int rc;
+
+	rc = bnx2x_filters_validate_mac(bp, vf, filters);
+	if (rc)
+		goto response;
+
+	rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+	if (rc)
+		goto response;
+
+	DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
+	   vf->abs_vfid,
+	   filters->vf_qid);
+
+	/* print q_filter message */
+	bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
+
+	rc = bnx2x_vf_mbx_qfilters(bp, vf);
+response:
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    struct bnx2x_vf_mbx *mbx)
+{
+	int qid = mbx->msg->req.q_op.vf_qid;
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
+	   vf->abs_vfid, qid);
+
+	rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				  struct bnx2x_vf_mbx *mbx)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
+
+	rc = bnx2x_vf_close(bp, vf);
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    struct bnx2x_vf_mbx *mbx)
+{
+	int rc;
+
+	DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
+
+	rc = bnx2x_vf_free(bp, vf);
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    struct bnx2x_vf_mbx *mbx)
+{
+	struct bnx2x_config_rss_params rss;
+	struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+	int rc = 0;
+
+	if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+	    rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+		BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+			  vf->index);
+		rc = -EINVAL;
+		goto mbx_resp;
+	}
+
+	memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
+	/* set vfop params according to rss tlv */
+	memcpy(rss.ind_table, rss_tlv->ind_table,
+	       T_ETH_INDIRECTION_TABLE_SIZE);
+	memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+	rss.rss_obj = &vf->rss_conf_obj;
+	rss.rss_result_mask = rss_tlv->rss_result_mask;
+
+	/* flags handled individually for backward/forward compatibility */
+	rss.rss_flags = 0;
+	rss.ramrod_flags = 0;
+
+	if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+		__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+		__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+		__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+		__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+		__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+		__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+		__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+		__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
+	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+		__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
+
+	if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+	     rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+	    (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+	     rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+		BNX2X_ERR("about to hit a FW assert. aborting...\n");
+		rc = -EINVAL;
+		goto mbx_resp;
+	}
+
+	rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+				       struct vfpf_tpa_tlv *tpa_tlv)
+{
+	int rc = 0;
+
+	if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+	    U_ETH_MAX_SGES_FOR_PACKET) {
+		rc = -EINVAL;
+		BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+			  tpa_tlv->tpa_client_info.max_sges_for_packet,
+			  U_ETH_MAX_SGES_FOR_PACKET);
+	}
+
+	if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+		rc = -EINVAL;
+		BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+			  tpa_tlv->tpa_client_info.max_tpa_queues,
+			  MAX_AGG_QS(bp));
+	}
+
+	return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    struct bnx2x_vf_mbx *mbx)
+{
+	struct bnx2x_queue_update_tpa_params vf_op_params;
+	struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+	int rc = 0;
+
+	memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+	if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+		goto mbx_resp;
+
+	vf_op_params.complete_on_both_clients =
+		tpa_tlv->tpa_client_info.complete_on_both_clients;
+	vf_op_params.dont_verify_thr =
+		tpa_tlv->tpa_client_info.dont_verify_thr;
+	vf_op_params.max_agg_sz =
+		tpa_tlv->tpa_client_info.max_agg_size;
+	vf_op_params.max_sges_pkt =
+		tpa_tlv->tpa_client_info.max_sges_for_packet;
+	vf_op_params.max_tpa_queues =
+		tpa_tlv->tpa_client_info.max_tpa_queues;
+	vf_op_params.sge_buff_sz =
+		tpa_tlv->tpa_client_info.sge_buff_size;
+	vf_op_params.sge_pause_thr_high =
+		tpa_tlv->tpa_client_info.sge_pause_thr_high;
+	vf_op_params.sge_pause_thr_low =
+		tpa_tlv->tpa_client_info.sge_pause_thr_low;
+	vf_op_params.tpa_mode =
+		tpa_tlv->tpa_client_info.tpa_mode;
+	vf_op_params.update_ipv4 =
+		tpa_tlv->tpa_client_info.update_ipv4;
+	vf_op_params.update_ipv6 =
+		tpa_tlv->tpa_client_info.update_ipv6;
+
+	rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
+
+mbx_resp:
+	bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+/* dispatch request */
+static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				  struct bnx2x_vf_mbx *mbx)
+{
+	int i;
+
+	/* check if tlv type is known */
+	if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
+		/* Lock the per vf op mutex and note the locker's identity.
+		 * The unlock will take place in mbx response.
+		 */
+		bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+		/* switch on the opcode */
+		switch (mbx->first_tlv.tl.type) {
+		case CHANNEL_TLV_ACQUIRE:
+			bnx2x_vf_mbx_acquire(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_INIT:
+			bnx2x_vf_mbx_init_vf(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_SETUP_Q:
+			bnx2x_vf_mbx_setup_q(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_SET_Q_FILTERS:
+			bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_TEARDOWN_Q:
+			bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_CLOSE:
+			bnx2x_vf_mbx_close_vf(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_RELEASE:
+			bnx2x_vf_mbx_release_vf(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_UPDATE_RSS:
+			bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+			return;
+		case CHANNEL_TLV_UPDATE_TPA:
+			bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+			return;
+		}
+
+	} else {
+		/* unknown TLV - this may belong to a VF driver from the future
+		 * - a version written after this PF driver was written, which
+		 * supports features unknown as of yet. Too bad since we don't
+		 * support them. Or this may be because someone wrote a crappy
+		 * VF driver and is sending garbage over the channel.
+		 */
+		BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
+			  mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
+			  vf->state);
+		for (i = 0; i < 20; i++)
+			DP_CONT(BNX2X_MSG_IOV, "%x ",
+				mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+	}
+
+	/* can we respond to VF (do we have an address for it?) */
+	if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+		/* notify the VF that we do not support this request */
+		bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
+	} else {
+		/* can't send a response since this VF is unknown to us
+		 * just ack the FW to release the mailbox and unlock
+		 * the channel.
+		 */
+		storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+		/* Firmware ack should be written before unlocking channel */
+		mmiowb();
+		bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+	}
+}
+
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+			   struct vf_pf_event_data *vfpf_event)
+{
+	u8 vf_idx;
+
+	DP(BNX2X_MSG_IOV,
+	   "vf pf event received: vfid %d, address_hi %x, address lo %x",
+	   vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
+	/* Sanity checks consider removing later */
+
+	/* check if the vf_id is valid */
+	if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
+	    BNX2X_NR_VIRTFN(bp)) {
+		BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
+			  vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
+		return;
+	}
+
+	vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
+
+	/* Update VFDB with current message and schedule its handling */
+	mutex_lock(&BP_VFDB(bp)->event_mutex);
+	BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
+	BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+	BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+	mutex_unlock(&BP_VFDB(bp)->event_mutex);
+
+	bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
+
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+	struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+	u64 events;
+	u8 vf_idx;
+	int rc;
+
+	if (!vfdb)
+		return;
+
+	mutex_lock(&vfdb->event_mutex);
+	events = vfdb->event_occur;
+	vfdb->event_occur = 0;
+	mutex_unlock(&vfdb->event_mutex);
+
+	for_each_vf(bp, vf_idx) {
+		struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+		/* Handle VFs which have pending events */
+		if (!(events & (1ULL << vf_idx)))
+			continue;
+
+		DP(BNX2X_MSG_IOV,
+		   "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+		   vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+		   mbx->first_tlv.resp_msg_offset);
+
+		/* dmae to get the VF request */
+		rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+					  vf->abs_vfid, mbx->vf_addr_hi,
+					  mbx->vf_addr_lo,
+					  sizeof(union vfpf_tlvs)/4);
+		if (rc) {
+			BNX2X_ERR("Failed to copy request VF %d\n",
+				  vf->abs_vfid);
+			bnx2x_vf_release(bp, vf);
+			return;
+		}
+
+		/* process the VF message header */
+		mbx->first_tlv = mbx->msg->req.first_tlv;
+
+		/* Clean response buffer to refrain from falsely
+		 * seeing chains.
+		 */
+		memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+		/* dispatch the request (will prepare the response) */
+		bnx2x_vf_mbx_request(bp, vf, mbx);
+	}
+}
+
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+				bool support_long)
+{
+	/* Older VFs contain a bug where they can't check CRC for bulletin
+	 * boards of length greater than legacy size.
+	 */
+	bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
+					  BULLETIN_CONTENT_LEGACY_SIZE;
+	bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
+}
+
+/* propagate local bulletin board to vf */
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
+{
+	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
+	dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
+		vf * BULLETIN_CONTENT_SIZE;
+	dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
+	int rc;
+
+	/* can only update vf after init took place */
+	if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
+	    bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
+		return 0;
+
+	/* increment bulletin board version and compute crc */
+	bulletin->version++;
+	bnx2x_vf_bulletin_finalize(bulletin,
+				   (bnx2x_vf(bp, vf, cfg_flags) &
+				    VF_CFG_EXT_BULLETIN) ? true : false);
+
+	/* propagate bulletin board via dmae to vm memory */
+	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
+				  bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
+				  U64_LO(vf_addr), bulletin->length / 4);
+	return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 0000000..64f2b52
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,473 @@
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and Qlogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module.  An independent module is a module which is
+ * not derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
+ */
+#ifndef VF_PF_IF_H
+#define VF_PF_IF_H
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* Common definitions for all HVs */
+struct vf_pf_resc_request {
+	u8  num_rxqs;
+	u8  num_txqs;
+	u8  num_sbs;
+	u8  num_mac_filters;
+	u8  num_vlan_filters;
+	u8  num_mc_filters; /* No limit  so superfluous */
+};
+
+struct hw_sb_info {
+	u8 hw_sb_id;	/* aka absolute igu id, used to ack the sb */
+	u8 sb_qid;	/* used to update DHC for sb */
+};
+
+/* HW VF-PF channel definitions
+ * A.K.A VF-PF mailbox
+ */
+#define TLV_BUFFER_SIZE			1024
+#define PF_VF_BULLETIN_SIZE		512
+
+#define VFPF_QUEUE_FLG_TPA		0x0001
+#define VFPF_QUEUE_FLG_TPA_IPV6		0x0002
+#define VFPF_QUEUE_FLG_TPA_GRO		0x0004
+#define VFPF_QUEUE_FLG_CACHE_ALIGN	0x0008
+#define VFPF_QUEUE_FLG_STATS		0x0010
+#define VFPF_QUEUE_FLG_OV		0x0020
+#define VFPF_QUEUE_FLG_VLAN		0x0040
+#define VFPF_QUEUE_FLG_COS		0x0080
+#define VFPF_QUEUE_FLG_HC		0x0100
+#define VFPF_QUEUE_FLG_DHC		0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS	0x0400
+
+#define VFPF_QUEUE_DROP_IP_CS_ERR	(1 << 0)
+#define VFPF_QUEUE_DROP_TCP_CS_ERR	(1 << 1)
+#define VFPF_QUEUE_DROP_TTL0		(1 << 2)
+#define VFPF_QUEUE_DROP_UDP_CS_ERR	(1 << 3)
+
+#define VFPF_RX_MASK_ACCEPT_NONE		0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST	0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST	0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST		0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST	0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST		0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN		0x00000020
+
+#define BULLETIN_CONTENT_SIZE		(sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_CONTENT_LEGACY_SIZE	(32)
+#define BULLETIN_ATTEMPTS	5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED	0
+
+enum {
+	PFVF_STATUS_WAITING = 0,
+	PFVF_STATUS_SUCCESS,
+	PFVF_STATUS_FAILURE,
+	PFVF_STATUS_NOT_SUPPORTED,
+	PFVF_STATUS_NO_RESOURCE
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+	u16 type;
+	u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+	struct channel_tlv tl;
+	u32 resp_msg_offset;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+	struct channel_tlv tl;
+	u8 status;
+	u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_general_resp_tlv {
+	struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+	struct channel_tlv tl;
+	u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	struct vf_pf_vfdev_info {
+		/* the following fields are for debug purposes */
+		u8  vf_id;		/* ME register value */
+		u8  vf_os;		/* e.g. Linux, W2K8 */
+#define VF_OS_SUBVERSION_MASK	(0x1f)
+#define VF_OS_MASK		(0xe0)
+#define VF_OS_SHIFT		(5)
+#define VF_OS_UNDEFINED		(0 << VF_OS_SHIFT)
+#define VF_OS_WINDOWS		(1 << VF_OS_SHIFT)
+
+		u8 fp_hsi_ver;
+		u8 caps;
+#define VF_CAP_SUPPORT_EXT_BULLETIN	(1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER	(1 << 1)
+	} vfdev_info;
+
+	struct vf_pf_resc_request resc_request;
+
+	aligned_u64 bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vfpf_q_op_tlv {
+	struct vfpf_first_tlv	first_tlv;
+	u8 vf_qid;
+	u8 padding[3];
+};
+
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+	struct vfpf_first_tlv	first_tlv;
+	u32			rss_flags;
+#define VFPF_RSS_MODE_DISABLED	(1 << 0)
+#define VFPF_RSS_MODE_REGULAR	(1 << 1)
+#define VFPF_RSS_SET_SRCH	(1 << 2)
+#define VFPF_RSS_IPV4		(1 << 3)
+#define VFPF_RSS_IPV4_TCP	(1 << 4)
+#define VFPF_RSS_IPV4_UDP	(1 << 5)
+#define VFPF_RSS_IPV6		(1 << 6)
+#define VFPF_RSS_IPV6_TCP	(1 << 7)
+#define VFPF_RSS_IPV6_UDP	(1 << 8)
+	u8			rss_result_mask;
+	u8			ind_table_size;
+	u8			rss_key_size;
+	u8			padding;
+	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+	u32			rss_key[T_ETH_RSS_KEY];	/* hash values */
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+	struct pfvf_tlv hdr;
+	struct pf_vf_pfdev_info {
+		u32 chip_num;
+		u32 pf_cap;
+#define PFVF_CAP_RSS          0x00000001
+#define PFVF_CAP_DHC          0x00000002
+#define PFVF_CAP_TPA          0x00000004
+#define PFVF_CAP_TPA_UPDATE   0x00000008
+#define PFVF_CAP_VLAN_FILTER  0x00000010
+
+		char fw_ver[32];
+		u16 db_size;
+		u8  indices_per_sb;
+		u8  padding;
+	} pfdev_info;
+	struct pf_vf_resc {
+		/* in case of status NO_RESOURCE in message hdr, pf will fill
+		 * this struct with suggested amount of resources for next
+		 * acquire request
+		 */
+#define PFVF_MAX_QUEUES_PER_VF         16
+#define PFVF_MAX_SBS_PER_VF            16
+		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+		u8	hw_qid[PFVF_MAX_QUEUES_PER_VF];
+		u8	num_rxqs;
+		u8	num_txqs;
+		u8	num_sbs;
+		u8	num_mac_filters;
+		u8	num_vlan_filters;
+		u8	num_mc_filters;
+		u8	permanent_mac_addr[ETH_ALEN];
+		u8	current_mac_addr[ETH_ALEN];
+		u8	padding[2];
+	} resc;
+};
+
+struct vfpf_port_phys_id_resp_tlv {
+	struct channel_tlv tl;
+	u8 id[ETH_ALEN];
+	u8 padding[2];
+};
+
+struct vfpf_fp_hsi_resp_tlv {
+	struct channel_tlv tl;
+	u8 is_supported;
+	u8 padding[3];
+};
+
+#define VFPF_INIT_FLG_STATS_COALESCE	(1 << 0) /* when set the VFs queues
+						  * stats will be coalesced on
+						  * the leading RSS queue
+						  */
+
+/* Init VF */
+struct vfpf_init_tlv {
+	struct vfpf_first_tlv first_tlv;
+	aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
+	aligned_u64 spq_addr;
+	aligned_u64 stats_addr;
+	u16 stats_stride;
+	u32 flags;
+	u32 padding[2];
+};
+
+/* Setup Queue */
+struct vfpf_setup_q_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	struct vf_pf_rxq_params {
+		/* physical addresses */
+		aligned_u64 rcq_addr;
+		aligned_u64 rcq_np_addr;
+		aligned_u64 rxq_addr;
+		aligned_u64 sge_addr;
+
+		/* sb + hc info */
+		u8  vf_sb;		/* index in hw_sbs[] */
+		u8  sb_index;		/* Index in the SB */
+		u16 hc_rate;		/* desired interrupts per sec. */
+					/* valid iff VFPF_QUEUE_FLG_HC */
+		/* rx buffer info */
+		u16 mtu;
+		u16 buf_sz;
+		u16 flags;		/* VFPF_QUEUE_FLG_X flags */
+		u16 stat_id;		/* valid iff VFPF_QUEUE_FLG_STATS */
+
+		/* valid iff VFPF_QUEUE_FLG_TPA */
+		u16 sge_buf_sz;
+		u16 tpa_agg_sz;
+		u8 max_sge_pkt;
+
+		u8 drop_flags;		/* VFPF_QUEUE_DROP_X, for Linux VMs
+					 * all the flags are turned off
+					 */
+
+		u8 cache_line_log;	/* VFPF_QUEUE_FLG_CACHE_ALIGN */
+		u8 padding;
+	} rxq;
+
+	struct vf_pf_txq_params {
+		/* physical addresses */
+		aligned_u64 txq_addr;
+
+		/* sb + hc info */
+		u8  vf_sb;		/* index in hw_sbs[] */
+		u8  sb_index;		/* Index in the SB */
+		u16 hc_rate;		/* desired interrupts per sec. */
+					/* valid iff VFPF_QUEUE_FLG_HC */
+		u32 flags;		/* VFPF_QUEUE_FLG_X flags */
+		u16 stat_id;		/* valid iff VFPF_QUEUE_FLG_STATS */
+		u8  traffic_type;	/* see in setup_context() */
+		u8  padding;
+	} txq;
+
+	u8 vf_qid;			/* index in hw_qid[] */
+	u8 param_valid;
+#define VFPF_RXQ_VALID		0x01
+#define VFPF_TXQ_VALID		0x02
+	u8 padding[2];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+	u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID	0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID	0x02
+#define VFPF_Q_FILTER_SET		0x100	/* set/clear */
+	u8  mac[ETH_ALEN];
+	u16 vlan_tag;
+};
+
+/* configure queue filters */
+struct vfpf_set_q_filters_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	u32 flags;
+#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED	0x01
+#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED	0x02
+#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED	0x04
+
+	u8 vf_qid;			/* index in hw_qid[] */
+	u8 n_mac_vlan_filters;
+	u8 n_multicast;
+	u8 padding;
+
+#define PFVF_MAX_MAC_FILTERS                   16
+#define PFVF_MAX_VLAN_FILTERS                  16
+#define PFVF_MAX_FILTERS               (PFVF_MAX_MAC_FILTERS +\
+					 PFVF_MAX_VLAN_FILTERS)
+	struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
+
+#define PFVF_MAX_MULTICAST_PER_VF              32
+	u8  multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
+
+	u32 rx_mask;	/* see mask constants at the top of the file */
+};
+
+struct vfpf_tpa_tlv {
+	struct vfpf_first_tlv	first_tlv;
+
+	struct vf_pf_tpa_client_info {
+		aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+		u8 update_ipv4;
+		u8 update_ipv6;
+		u8 max_tpa_queues;
+		u8 max_sges_for_packet;
+		u8 complete_on_both_clients;
+		u8 dont_verify_thr;
+		u8 tpa_mode;
+		u16 sge_buff_size;
+		u16 max_agg_size;
+		u16 sge_pause_thr_low;
+		u16 sge_pause_thr_high;
+	} tpa_client_info;
+};
+
+/* close VF (disable VF) */
+struct vfpf_close_tlv {
+	struct vfpf_first_tlv   first_tlv;
+	u16			vf_id;  /* for debug */
+	u8 padding[2];
+};
+
+/* release the VF's acquired resources */
+struct vfpf_release_tlv {
+	struct vfpf_first_tlv	first_tlv;
+	u16			vf_id;
+	u8 padding[2];
+};
+
+struct tlv_buffer_size {
+	u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+	struct vfpf_first_tlv		first_tlv;
+	struct vfpf_acquire_tlv		acquire;
+	struct vfpf_init_tlv		init;
+	struct vfpf_close_tlv		close;
+	struct vfpf_q_op_tlv		q_op;
+	struct vfpf_setup_q_tlv		setup_q;
+	struct vfpf_set_q_filters_tlv	set_q_filters;
+	struct vfpf_release_tlv		release;
+	struct vfpf_rss_tlv		update_rss;
+	struct vfpf_tpa_tlv		update_tpa;
+	struct channel_list_end_tlv	list_end;
+	struct tlv_buffer_size		tlv_buf_size;
+};
+
+union pfvf_tlvs {
+	struct pfvf_general_resp_tlv	general_resp;
+	struct pfvf_acquire_resp_tlv	acquire_resp;
+	struct channel_list_end_tlv	list_end;
+	struct tlv_buffer_size		tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+struct pf_vf_bulletin_size {
+	u8 size[PF_VF_BULLETIN_SIZE];
+};
+
+struct pf_vf_bulletin_content {
+	u32 crc;			/* crc of structure to ensure is not in
+					 * mid-update
+					 */
+	u16 version;
+	u16 length;
+
+	aligned_u64 valid_bitmap;	/* bitmap indicating which fields
+					 * hold valid values
+					 */
+
+#define MAC_ADDR_VALID		0	/* alert the vf that a new mac address
+					 * is available for it
+					 */
+#define VLAN_VALID		1	/* when set, the vf should not access
+					 * the vfpf channel
+					 */
+#define CHANNEL_DOWN		2	/* vfpf channel is disabled. VFs are not
+					 * to attempt to send messages on the
+					 * channel after this bit is set
+					 */
+#define LINK_VALID		3	/* alert the VF thet a new link status
+					 * update is available for it
+					 */
+	u8 mac[ETH_ALEN];
+	u8 mac_padding[2];
+
+	u16 vlan;
+	u8 vlan_padding[6];
+
+	u16 link_speed;			 /* Effective line speed */
+	u8 link_speed_padding[6];
+	u32 link_flags;			 /* VFPF_LINK_REPORT_XXX flags */
+#define VFPF_LINK_REPORT_LINK_DOWN	 (1 << 0)
+#define VFPF_LINK_REPORT_FULL_DUPLEX	 (1 << 1)
+#define VFPF_LINK_REPORT_RX_FC_ON	 (1 << 2)
+#define VFPF_LINK_REPORT_TX_FC_ON	 (1 << 3)
+	u8 link_flags_padding[4];
+};
+
+union pf_vf_bulletin {
+	struct pf_vf_bulletin_content content;
+	struct pf_vf_bulletin_size size;
+};
+
+#define MAX_TLVS_IN_LIST 50
+
+enum channel_tlvs {
+	CHANNEL_TLV_NONE,
+	CHANNEL_TLV_ACQUIRE,
+	CHANNEL_TLV_INIT,
+	CHANNEL_TLV_SETUP_Q,
+	CHANNEL_TLV_SET_Q_FILTERS,
+	CHANNEL_TLV_ACTIVATE_Q,
+	CHANNEL_TLV_DEACTIVATE_Q,
+	CHANNEL_TLV_TEARDOWN_Q,
+	CHANNEL_TLV_CLOSE,
+	CHANNEL_TLV_RELEASE,
+	CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
+	CHANNEL_TLV_PF_RELEASE_VF,
+	CHANNEL_TLV_LIST_END,
+	CHANNEL_TLV_FLR,
+	CHANNEL_TLV_PF_SET_MAC,
+	CHANNEL_TLV_PF_SET_VLAN,
+	CHANNEL_TLV_UPDATE_RSS,
+	CHANNEL_TLV_PHYS_PORT_ID,
+	CHANNEL_TLV_UPDATE_TPA,
+	CHANNEL_TLV_FP_HSI_SUPPORT,
+	CHANNEL_TLV_MAX
+};
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* VF_PF_IF_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644
index 0000000..97e78e2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644
index 0000000..4744919
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -0,0 +1,5779 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT		(5 * HZ)
+
+static const char version[] =
+	"Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+	BCM57302,
+	BCM57304,
+	BCM57404,
+	BCM57406,
+	BCM57304_VF,
+	BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+	char *name;
+} board_info[] = {
+	{ "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+	{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+	{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+	{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+	HWRM_FUNC_CFG,
+	HWRM_PORT_PHY_QCFG,
+	HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+	return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons)					\
+		writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons)					\
+		writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db)						\
+		writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+	/* Tell compiler to fetch tx indices from memory. */
+	barrier();
+
+	return bp->tx_ring_size -
+		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+	TX_BD_FLAGS_LHINT_512_TO_1023,
+	TX_BD_FLAGS_LHINT_1024_TO_2047,
+	TX_BD_FLAGS_LHINT_1024_TO_2047,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct tx_bd *txbd;
+	struct tx_bd_ext *txbd1;
+	struct netdev_queue *txq;
+	int i;
+	dma_addr_t mapping;
+	unsigned int length, pad = 0;
+	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+	u16 prod, last_frag;
+	struct pci_dev *pdev = bp->pdev;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_sw_tx_bd *tx_buf;
+
+	i = skb_get_queue_mapping(skb);
+	if (unlikely(i >= bp->tx_nr_rings)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	bnapi = bp->bnapi[i];
+	txr = &bnapi->tx_ring;
+	txq = netdev_get_tx_queue(dev, i);
+	prod = txr->tx_prod;
+
+	free_size = bnxt_tx_avail(bp, txr);
+	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+		netif_tx_stop_queue(txq);
+		return NETDEV_TX_BUSY;
+	}
+
+	length = skb->len;
+	len = skb_headlen(skb);
+	last_frag = skb_shinfo(skb)->nr_frags;
+
+	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+	txbd->tx_bd_opaque = prod;
+
+	tx_buf = &txr->tx_buf_ring[prod];
+	tx_buf->skb = skb;
+	tx_buf->nr_frags = last_frag;
+
+	vlan_tag_flags = 0;
+	cfa_action = 0;
+	if (skb_vlan_tag_present(skb)) {
+		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+				 skb_vlan_tag_get(skb);
+		/* Currently supports 8021Q, 8021AD vlan offloads
+		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+		 */
+		if (skb->vlan_proto == htons(ETH_P_8021Q))
+			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+	}
+
+	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+		struct tx_push_bd *push = txr->tx_push;
+		struct tx_bd *tx_push = &push->txbd1;
+		struct tx_bd_ext *tx_push1 = &push->txbd2;
+		void *pdata = tx_push1 + 1;
+		int j;
+
+		/* Set COAL_NOW to be ready quickly for the next push */
+		tx_push->tx_bd_len_flags_type =
+			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+					TX_BD_TYPE_LONG_TX_BD |
+					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+					TX_BD_FLAGS_COAL_NOW |
+					TX_BD_FLAGS_PACKET_END |
+					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			tx_push1->tx_bd_hsize_lflags =
+					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+		else
+			tx_push1->tx_bd_hsize_lflags = 0;
+
+		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+		tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+		skb_copy_from_linear_data(skb, pdata, len);
+		pdata += len;
+		for (j = 0; j < last_frag; j++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+			void *fptr;
+
+			fptr = skb_frag_address_safe(frag);
+			if (!fptr)
+				goto normal_tx;
+
+			memcpy(pdata, fptr, skb_frag_size(frag));
+			pdata += skb_frag_size(frag);
+		}
+
+		memcpy(txbd, tx_push, sizeof(*txbd));
+		prod = NEXT_TX(prod);
+		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+		memcpy(txbd, tx_push1, sizeof(*txbd));
+		prod = NEXT_TX(prod);
+		push->doorbell =
+			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+		txr->tx_prod = prod;
+
+		netdev_tx_sent_queue(txq, skb->len);
+
+		__iowrite64_copy(txr->tx_doorbell, push,
+				 (length + sizeof(*push) + 8) / 8);
+
+		tx_buf->is_push = 1;
+
+		goto tx_done;
+	}
+
+normal_tx:
+	if (length < BNXT_MIN_PKT_SIZE) {
+		pad = BNXT_MIN_PKT_SIZE - length;
+		if (skb_pad(skb, pad)) {
+			/* SKB already freed. */
+			tx_buf->skb = NULL;
+			return NETDEV_TX_OK;
+		}
+		length = BNXT_MIN_PKT_SIZE;
+	}
+
+	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+		dev_kfree_skb_any(skb);
+		tx_buf->skb = NULL;
+		return NETDEV_TX_OK;
+	}
+
+	dma_unmap_addr_set(tx_buf, mapping, mapping);
+	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+	txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+	prod = NEXT_TX(prod);
+	txbd1 = (struct tx_bd_ext *)
+		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+	txbd1->tx_bd_hsize_lflags = 0;
+	if (skb_is_gso(skb)) {
+		u32 hdr_len;
+
+		if (skb->encapsulation)
+			hdr_len = skb_inner_network_offset(skb) +
+				skb_inner_network_header_len(skb) +
+				inner_tcp_hdrlen(skb);
+		else
+			hdr_len = skb_transport_offset(skb) +
+				tcp_hdrlen(skb);
+
+		txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+					TX_BD_FLAGS_T_IPID |
+					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+		length = skb_shinfo(skb)->gso_size;
+		txbd1->tx_bd_mss = cpu_to_le32(length);
+		length += hdr_len;
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		txbd1->tx_bd_hsize_lflags =
+			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+		txbd1->tx_bd_mss = 0;
+	}
+
+	length >>= 9;
+	flags |= bnxt_lhint_arr[length];
+	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+	txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+	for (i = 0; i < last_frag; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		prod = NEXT_TX(prod);
+		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+		len = skb_frag_size(frag);
+		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+					   DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+			goto tx_dma_error;
+
+		tx_buf = &txr->tx_buf_ring[prod];
+		dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+		txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+		flags = len << TX_BD_LEN_SHIFT;
+		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+	}
+
+	flags &= ~TX_BD_LEN;
+	txbd->tx_bd_len_flags_type =
+		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+			    TX_BD_FLAGS_PACKET_END);
+
+	netdev_tx_sent_queue(txq, skb->len);
+
+	/* Sync BD data before updating doorbell */
+	wmb();
+
+	prod = NEXT_TX(prod);
+	txr->tx_prod = prod;
+
+	writel(DB_KEY_TX | prod, txr->tx_doorbell);
+	writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+	mmiowb();
+
+	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * tx index in bnxt_tx_avail() below, because in
+		 * bnxt_tx_int(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+			netif_tx_wake_queue(txq);
+	}
+	return NETDEV_TX_OK;
+
+tx_dma_error:
+	last_frag = i;
+
+	/* start back at beginning and unmap skb */
+	prod = txr->tx_prod;
+	tx_buf = &txr->tx_buf_ring[prod];
+	tx_buf->skb = NULL;
+	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			 skb_headlen(skb), PCI_DMA_TODEVICE);
+	prod = NEXT_TX(prod);
+
+	/* unmap remaining mapped pages */
+	for (i = 0; i < last_frag; i++) {
+		prod = NEXT_TX(prod);
+		tx_buf = &txr->tx_buf_ring[prod];
+		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
+			       PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+	struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+	int index = bnapi->index;
+	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+	u16 cons = txr->tx_cons;
+	struct pci_dev *pdev = bp->pdev;
+	int i;
+	unsigned int tx_bytes = 0;
+
+	for (i = 0; i < nr_pkts; i++) {
+		struct bnxt_sw_tx_bd *tx_buf;
+		struct sk_buff *skb;
+		int j, last;
+
+		tx_buf = &txr->tx_buf_ring[cons];
+		cons = NEXT_TX(cons);
+		skb = tx_buf->skb;
+		tx_buf->skb = NULL;
+
+		if (tx_buf->is_push) {
+			tx_buf->is_push = 0;
+			goto next_tx_int;
+		}
+
+		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+				 skb_headlen(skb), PCI_DMA_TODEVICE);
+		last = tx_buf->nr_frags;
+
+		for (j = 0; j < last; j++) {
+			cons = NEXT_TX(cons);
+			tx_buf = &txr->tx_buf_ring[cons];
+			dma_unmap_page(
+				&pdev->dev,
+				dma_unmap_addr(tx_buf, mapping),
+				skb_frag_size(&skb_shinfo(skb)->frags[j]),
+				PCI_DMA_TODEVICE);
+		}
+
+next_tx_int:
+		cons = NEXT_TX(cons);
+
+		tx_bytes += skb->len;
+		dev_kfree_skb_any(skb);
+	}
+
+	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+	txr->tx_cons = cons;
+
+	/* Need to make the tx_cons update visible to bnxt_start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that bnxt_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq)) &&
+	    (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+		__netif_tx_lock(txq, smp_processor_id());
+		if (netif_tx_queue_stopped(txq) &&
+		    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+		    txr->dev_state != BNXT_DEV_STATE_CLOSING)
+			netif_tx_wake_queue(txq);
+		__netif_tx_unlock(txq);
+	}
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+				       gfp_t gfp)
+{
+	u8 *data;
+	struct pci_dev *pdev = bp->pdev;
+
+	data = kmalloc(bp->rx_buf_size, gfp);
+	if (!data)
+		return NULL;
+
+	*mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+				  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+	if (dma_mapping_error(&pdev->dev, *mapping)) {
+		kfree(data);
+		data = NULL;
+	}
+	return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+				     struct bnxt_rx_ring_info *rxr,
+				     u16 prod, gfp_t gfp)
+{
+	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+	u8 *data;
+	dma_addr_t mapping;
+
+	data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+	if (!data)
+		return -ENOMEM;
+
+	rx_buf->data = data;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+	return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+			       u8 *data)
+{
+	u16 prod = rxr->rx_prod;
+	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+	struct rx_bd *cons_bd, *prod_bd;
+
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+	prod_rx_buf->data = data;
+
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
+
+	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+	u16 next, max = rxr->rx_agg_bmap_size;
+
+	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+	if (next >= max)
+		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+	return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+				     struct bnxt_rx_ring_info *rxr,
+				     u16 prod, gfp_t gfp)
+{
+	struct rx_bd *rxbd =
+		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+	struct pci_dev *pdev = bp->pdev;
+	struct page *page;
+	dma_addr_t mapping;
+	u16 sw_prod = rxr->rx_sw_agg_prod;
+
+	page = alloc_page(gfp);
+	if (!page)
+		return -ENOMEM;
+
+	mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+	if (dma_mapping_error(&pdev->dev, mapping)) {
+		__free_page(page);
+		return -EIO;
+	}
+
+	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+	__set_bit(sw_prod, rxr->rx_agg_bmap);
+	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+	rx_agg_buf->page = page;
+	rx_agg_buf->mapping = mapping;
+	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+	rxbd->rx_bd_opaque = sw_prod;
+	return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+				   u32 agg_bufs)
+{
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	u16 prod = rxr->rx_agg_prod;
+	u16 sw_prod = rxr->rx_sw_agg_prod;
+	u32 i;
+
+	for (i = 0; i < agg_bufs; i++) {
+		u16 cons;
+		struct rx_agg_cmp *agg;
+		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+		struct rx_bd *prod_bd;
+		struct page *page;
+
+		agg = (struct rx_agg_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+		cons = agg->rx_agg_cmp_opaque;
+		__clear_bit(cons, rxr->rx_agg_bmap);
+
+		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+		__set_bit(sw_prod, rxr->rx_agg_bmap);
+		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+		cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+		/* It is possible for sw_prod to be equal to cons, so
+		 * set cons_rx_buf->page to NULL first.
+		 */
+		page = cons_rx_buf->page;
+		cons_rx_buf->page = NULL;
+		prod_rx_buf->page = page;
+
+		prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+		prod_bd->rx_bd_opaque = sw_prod;
+
+		prod = NEXT_RX_AGG(prod);
+		sw_prod = NEXT_RX_AGG(sw_prod);
+		cp_cons = NEXT_CMP(cp_cons);
+	}
+	rxr->rx_agg_prod = prod;
+	rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+				   struct bnxt_rx_ring_info *rxr, u16 cons,
+				   u16 prod, u8 *data, dma_addr_t dma_addr,
+				   unsigned int len)
+{
+	int err;
+	struct sk_buff *skb;
+
+	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+	if (unlikely(err)) {
+		bnxt_reuse_rx_data(rxr, cons, data);
+		return NULL;
+	}
+
+	skb = build_skb(data, 0);
+	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+			 PCI_DMA_FROMDEVICE);
+	if (!skb) {
+		kfree(data);
+		return NULL;
+	}
+
+	skb_reserve(skb, BNXT_RX_OFFSET);
+	skb_put(skb, len);
+	return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+				     struct sk_buff *skb, u16 cp_cons,
+				     u32 agg_bufs)
+{
+	struct pci_dev *pdev = bp->pdev;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	u16 prod = rxr->rx_agg_prod;
+	u32 i;
+
+	for (i = 0; i < agg_bufs; i++) {
+		u16 cons, frag_len;
+		struct rx_agg_cmp *agg;
+		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+		struct page *page;
+		dma_addr_t mapping;
+
+		agg = (struct rx_agg_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+		cons = agg->rx_agg_cmp_opaque;
+		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+		cons_rx_buf = &rxr->rx_agg_ring[cons];
+		skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+		__clear_bit(cons, rxr->rx_agg_bmap);
+
+		/* It is possible for bnxt_alloc_rx_page() to allocate
+		 * a sw_prod index that equals the cons index, so we
+		 * need to clear the cons entry now.
+		 */
+		mapping = dma_unmap_addr(cons_rx_buf, mapping);
+		page = cons_rx_buf->page;
+		cons_rx_buf->page = NULL;
+
+		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+			struct skb_shared_info *shinfo;
+			unsigned int nr_frags;
+
+			shinfo = skb_shinfo(skb);
+			nr_frags = --shinfo->nr_frags;
+			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+			dev_kfree_skb(skb);
+
+			cons_rx_buf->page = page;
+
+			/* Update prod since possibly some pages have been
+			 * allocated already.
+			 */
+			rxr->rx_agg_prod = prod;
+			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+			return NULL;
+		}
+
+		dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+
+		skb->data_len += frag_len;
+		skb->len += frag_len;
+		skb->truesize += PAGE_SIZE;
+
+		prod = NEXT_RX_AGG(prod);
+		cp_cons = NEXT_CMP(cp_cons);
+	}
+	rxr->rx_agg_prod = prod;
+	return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+			       u8 agg_bufs, u32 *raw_cons)
+{
+	u16 last;
+	struct rx_agg_cmp *agg;
+
+	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+	last = RING_CMP(*raw_cons);
+	agg = (struct rx_agg_cmp *)
+		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+	return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+					    unsigned int len,
+					    dma_addr_t mapping)
+{
+	struct bnxt *bp = bnapi->bp;
+	struct pci_dev *pdev = bp->pdev;
+	struct sk_buff *skb;
+
+	skb = napi_alloc_skb(&bnapi->napi, len);
+	if (!skb)
+		return NULL;
+
+	dma_sync_single_for_cpu(&pdev->dev, mapping,
+				bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+	memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+	dma_sync_single_for_device(&pdev->dev, mapping,
+				   bp->rx_copy_thresh,
+				   PCI_DMA_FROMDEVICE);
+
+	skb_put(skb, len);
+	return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+			   struct rx_tpa_start_cmp *tpa_start,
+			   struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+	u8 agg_id = TPA_START_AGG_ID(tpa_start);
+	u16 cons, prod;
+	struct bnxt_tpa_info *tpa_info;
+	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+	struct rx_bd *prod_bd;
+	dma_addr_t mapping;
+
+	cons = tpa_start->rx_tpa_start_cmp_opaque;
+	prod = rxr->rx_prod;
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+	tpa_info = &rxr->rx_tpa[agg_id];
+
+	prod_rx_buf->data = tpa_info->data;
+
+	mapping = tpa_info->mapping;
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+	tpa_info->data = cons_rx_buf->data;
+	cons_rx_buf->data = NULL;
+	tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+	tpa_info->len =
+		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+				RX_TPA_START_CMP_LEN_SHIFT;
+	if (likely(TPA_START_HASH_VALID(tpa_start))) {
+		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+		tpa_info->hash_type = PKT_HASH_TYPE_L4;
+		tpa_info->gso_type = SKB_GSO_TCPV4;
+		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+		if (hash_type == 3)
+			tpa_info->gso_type = SKB_GSO_TCPV6;
+		tpa_info->rss_hash =
+			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+	} else {
+		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+		tpa_info->gso_type = 0;
+		if (netif_msg_rx_err(bp))
+			netdev_warn(bp->dev, "TPA packet without valid hash\n");
+	}
+	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+	rxr->rx_prod = NEXT_RX(prod);
+	cons = NEXT_RX(cons);
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+	cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+			   u16 cp_cons, u32 agg_bufs)
+{
+	if (agg_bufs)
+		bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+					   struct rx_tpa_end_cmp *tpa_end,
+					   struct rx_tpa_end_cmp_ext *tpa_end1,
+					   struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+	struct tcphdr *th;
+	int payload_off, tcp_opt_len = 0;
+	int len, nw_off;
+
+	NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+	skb_shinfo(skb)->gso_size =
+		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+	payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+		       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+		      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+	if (TPA_END_GRO_TS(tpa_end))
+		tcp_opt_len = 12;
+
+	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+		struct iphdr *iph;
+
+		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+			 ETH_HLEN;
+		skb_set_network_header(skb, nw_off);
+		iph = ip_hdr(skb);
+		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+		len = skb->len - skb_transport_offset(skb);
+		th = tcp_hdr(skb);
+		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+		struct ipv6hdr *iph;
+
+		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+			 ETH_HLEN;
+		skb_set_network_header(skb, nw_off);
+		iph = ipv6_hdr(skb);
+		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+		len = skb->len - skb_transport_offset(skb);
+		th = tcp_hdr(skb);
+		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+	} else {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+	tcp_gro_complete(skb);
+
+	if (nw_off) { /* tunnel */
+		struct udphdr *uh = NULL;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = (struct iphdr *)skb->data;
+
+			if (iph->protocol == IPPROTO_UDP)
+				uh = (struct udphdr *)(iph + 1);
+		} else {
+			struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+			if (iph->nexthdr == IPPROTO_UDP)
+				uh = (struct udphdr *)(iph + 1);
+		}
+		if (uh) {
+			if (uh->check)
+				skb_shinfo(skb)->gso_type |=
+					SKB_GSO_UDP_TUNNEL_CSUM;
+			else
+				skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+		}
+	}
+#endif
+	return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+					   struct bnxt_napi *bnapi,
+					   u32 *raw_cons,
+					   struct rx_tpa_end_cmp *tpa_end,
+					   struct rx_tpa_end_cmp_ext *tpa_end1,
+					   bool *agg_event)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	u8 agg_id = TPA_END_AGG_ID(tpa_end);
+	u8 *data, agg_bufs;
+	u16 cp_cons = RING_CMP(*raw_cons);
+	unsigned int len;
+	struct bnxt_tpa_info *tpa_info;
+	dma_addr_t mapping;
+	struct sk_buff *skb;
+
+	tpa_info = &rxr->rx_tpa[agg_id];
+	data = tpa_info->data;
+	prefetch(data);
+	len = tpa_info->len;
+	mapping = tpa_info->mapping;
+
+	agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+		    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+	if (agg_bufs) {
+		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+			return ERR_PTR(-EBUSY);
+
+		*agg_event = true;
+		cp_cons = NEXT_CMP(cp_cons);
+	}
+
+	if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+		bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+		netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+			    agg_bufs, (int)MAX_SKB_FRAGS);
+		return NULL;
+	}
+
+	if (len <= bp->rx_copy_thresh) {
+		skb = bnxt_copy_skb(bnapi, data, len, mapping);
+		if (!skb) {
+			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+			return NULL;
+		}
+	} else {
+		u8 *new_data;
+		dma_addr_t new_mapping;
+
+		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+		if (!new_data) {
+			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+			return NULL;
+		}
+
+		tpa_info->data = new_data;
+		tpa_info->mapping = new_mapping;
+
+		skb = build_skb(data, 0);
+		dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+				 PCI_DMA_FROMDEVICE);
+
+		if (!skb) {
+			kfree(data);
+			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+			return NULL;
+		}
+		skb_reserve(skb, BNXT_RX_OFFSET);
+		skb_put(skb, len);
+	}
+
+	if (agg_bufs) {
+		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+		if (!skb) {
+			/* Page reuse already handled by bnxt_rx_pages(). */
+			return NULL;
+		}
+	}
+	skb->protocol = eth_type_trans(skb, bp->dev);
+
+	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+		netdev_features_t features = skb->dev->features;
+		u16 vlan_proto = tpa_info->metadata >>
+			RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+		     vlan_proto == ETH_P_8021Q) ||
+		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+		     vlan_proto == ETH_P_8021AD)) {
+			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+					       tpa_info->metadata &
+					       RX_CMP_FLAGS2_METADATA_VID_MASK);
+		}
+	}
+
+	skb_checksum_none_assert(skb);
+	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		skb->csum_level =
+			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+	}
+
+	if (TPA_END_GRO(tpa_end))
+		skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+	return skb;
+}
+
+/* returns the following:
+ * 1       - 1 packet successfully received
+ * 0       - successful TPA_START, packet not completed yet
+ * -EBUSY  - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO    - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+		       bool *agg_event)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	struct net_device *dev = bp->dev;
+	struct rx_cmp *rxcmp;
+	struct rx_cmp_ext *rxcmp1;
+	u32 tmp_raw_cons = *raw_cons;
+	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+	struct bnxt_sw_rx_bd *rx_buf;
+	unsigned int len;
+	u8 *data, agg_bufs, cmp_type;
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	int rc = 0;
+
+	rxcmp = (struct rx_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+	cp_cons = RING_CMP(tmp_raw_cons);
+	rxcmp1 = (struct rx_cmp_ext *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+		return -EBUSY;
+
+	cmp_type = RX_CMP_TYPE(rxcmp);
+
+	prod = rxr->rx_prod;
+
+	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+		goto next_rx_no_prod;
+
+	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+		skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+				   (struct rx_tpa_end_cmp *)rxcmp,
+				   (struct rx_tpa_end_cmp_ext *)rxcmp1,
+				   agg_event);
+
+		if (unlikely(IS_ERR(skb)))
+			return -EBUSY;
+
+		rc = -ENOMEM;
+		if (likely(skb)) {
+			skb_record_rx_queue(skb, bnapi->index);
+			skb_mark_napi_id(skb, &bnapi->napi);
+			if (bnxt_busy_polling(bnapi))
+				netif_receive_skb(skb);
+			else
+				napi_gro_receive(&bnapi->napi, skb);
+			rc = 1;
+		}
+		goto next_rx_no_prod;
+	}
+
+	cons = rxcmp->rx_cmp_opaque;
+	rx_buf = &rxr->rx_buf_ring[cons];
+	data = rx_buf->data;
+	prefetch(data);
+
+	agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+				RX_CMP_AGG_BUFS_SHIFT;
+
+	if (agg_bufs) {
+		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+			return -EBUSY;
+
+		cp_cons = NEXT_CMP(cp_cons);
+		*agg_event = true;
+	}
+
+	rx_buf->data = NULL;
+	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+		bnxt_reuse_rx_data(rxr, cons, data);
+		if (agg_bufs)
+			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+		rc = -EIO;
+		goto next_rx;
+	}
+
+	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+	dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+	if (len <= bp->rx_copy_thresh) {
+		skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+		bnxt_reuse_rx_data(rxr, cons, data);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto next_rx;
+		}
+	} else {
+		skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto next_rx;
+		}
+	}
+
+	if (agg_bufs) {
+		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto next_rx;
+		}
+	}
+
+	if (RX_CMP_HASH_VALID(rxcmp)) {
+		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+		if (hash_type != 1 && hash_type != 3)
+			type = PKT_HASH_TYPE_L3;
+		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+	}
+
+	skb->protocol = eth_type_trans(skb, dev);
+
+	if (rxcmp1->rx_cmp_flags2 &
+	    cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+		netdev_features_t features = skb->dev->features;
+		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+		     vlan_proto == ETH_P_8021Q) ||
+		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+		     vlan_proto == ETH_P_8021AD))
+			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+					       meta_data &
+					       RX_CMP_FLAGS2_METADATA_VID_MASK);
+	}
+
+	skb_checksum_none_assert(skb);
+	if (RX_CMP_L4_CS_OK(rxcmp1)) {
+		if (dev->features & NETIF_F_RXCSUM) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+		}
+	} else {
+		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+			cpr->rx_l4_csum_errors++;
+	}
+
+	skb_record_rx_queue(skb, bnapi->index);
+	skb_mark_napi_id(skb, &bnapi->napi);
+	if (bnxt_busy_polling(bnapi))
+		netif_receive_skb(skb);
+	else
+		napi_gro_receive(&bnapi->napi, skb);
+	rc = 1;
+
+next_rx:
+	rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+	*raw_cons = tmp_raw_cons;
+
+	return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+				    struct hwrm_async_event_cmpl *cmpl)
+{
+	u16 event_id = le16_to_cpu(cmpl->event_id);
+
+	/* TODO CHIMP_FW: Define event id's for link change, error etc */
+	switch (event_id) {
+	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+		break;
+	default:
+		netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+			   event_id);
+		break;
+	}
+	return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+				(struct hwrm_fwd_req_cmpl *)txcmp;
+
+	switch (cmpl_type) {
+	case CMPL_BASE_TYPE_HWRM_DONE:
+		seq_id = le16_to_cpu(h_cmpl->sequence_id);
+		if (seq_id == bp->hwrm_intr_seq_id)
+			bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+		else
+			netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+		break;
+
+	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+		if ((vf_id < bp->pf.first_vf_id) ||
+		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+				   vf_id);
+			return -EINVAL;
+		}
+
+		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+		break;
+
+	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+		bnxt_async_event_process(bp,
+					 (struct hwrm_async_event_cmpl *)txcmp);
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+	struct bnxt_napi *bnapi = dev_instance;
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+	napi_schedule(&bnapi->napi);
+	return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+	u32 raw_cons = cpr->cp_raw_cons;
+	u16 cons = RING_CMP(raw_cons);
+	struct tx_cmp *txcmp;
+
+	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+	return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+	struct bnxt_napi *bnapi = dev_instance;
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 cons = RING_CMP(cpr->cp_raw_cons);
+	u32 int_status;
+
+	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+	if (!bnxt_has_work(bp, cpr)) {
+		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
+		/* return if erroneous interrupt */
+		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+			return IRQ_NONE;
+	}
+
+	/* disable ring IRQ */
+	BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+	/* Return here if interrupt is shared and is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	napi_schedule(&bnapi->napi);
+	return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 raw_cons = cpr->cp_raw_cons;
+	u32 cons;
+	int tx_pkts = 0;
+	int rx_pkts = 0;
+	bool rx_event = false;
+	bool agg_event = false;
+	struct tx_cmp *txcmp;
+
+	while (1) {
+		int rc;
+
+		cons = RING_CMP(raw_cons);
+		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+		if (!TX_CMP_VALID(txcmp, raw_cons))
+			break;
+
+		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+			tx_pkts++;
+			/* return full budget so NAPI will complete. */
+			if (unlikely(tx_pkts > bp->tx_wake_thresh))
+				rx_pkts = budget;
+		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+			if (likely(rc >= 0))
+				rx_pkts += rc;
+			else if (rc == -EBUSY)	/* partial completion */
+				break;
+			rx_event = true;
+		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
+				     CMPL_BASE_TYPE_HWRM_DONE) ||
+				    (TX_CMP_TYPE(txcmp) ==
+				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+				    (TX_CMP_TYPE(txcmp) ==
+				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+			bnxt_hwrm_handler(bp, txcmp);
+		}
+		raw_cons = NEXT_RAW_CMP(raw_cons);
+
+		if (rx_pkts == budget)
+			break;
+	}
+
+	cpr->cp_raw_cons = raw_cons;
+	/* ACK completion ring before freeing tx ring and producing new
+	 * buffers in rx/agg rings to prevent overflowing the completion
+	 * ring.
+	 */
+	BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+	if (tx_pkts)
+		bnxt_tx_int(bp, bnapi, tx_pkts);
+
+	if (rx_event) {
+		struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+		if (agg_event) {
+			writel(DB_KEY_RX | rxr->rx_agg_prod,
+			       rxr->rx_agg_doorbell);
+			writel(DB_KEY_RX | rxr->rx_agg_prod,
+			       rxr->rx_agg_doorbell);
+		}
+	}
+	return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	int work_done = 0;
+
+	if (!bnxt_lock_napi(bnapi))
+		return budget;
+
+	while (1) {
+		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+		if (work_done >= budget)
+			break;
+
+		if (!bnxt_has_work(bp, cpr)) {
+			napi_complete(napi);
+			BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+			break;
+		}
+	}
+	mmiowb();
+	bnxt_unlock_napi(bnapi);
+	return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	int rx_work, budget = 4;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		return LL_FLUSH_FAILED;
+
+	if (!bnxt_lock_poll(bnapi))
+		return LL_FLUSH_BUSY;
+
+	rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+	BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+	bnxt_unlock_poll(bnapi);
+	return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+	int i, max_idx;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr;
+		int j;
+
+		if (!bnapi)
+			continue;
+
+		txr = &bnapi->tx_ring;
+		for (j = 0; j < max_idx;) {
+			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+			struct sk_buff *skb = tx_buf->skb;
+			int k, last;
+
+			if (!skb) {
+				j++;
+				continue;
+			}
+
+			tx_buf->skb = NULL;
+
+			if (tx_buf->is_push) {
+				dev_kfree_skb(skb);
+				j += 2;
+				continue;
+			}
+
+			dma_unmap_single(&pdev->dev,
+					 dma_unmap_addr(tx_buf, mapping),
+					 skb_headlen(skb),
+					 PCI_DMA_TODEVICE);
+
+			last = tx_buf->nr_frags;
+			j += 2;
+			for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+				tx_buf = &txr->tx_buf_ring[j];
+				dma_unmap_page(
+					&pdev->dev,
+					dma_unmap_addr(tx_buf, mapping),
+					skb_frag_size(frag), PCI_DMA_TODEVICE);
+			}
+			dev_kfree_skb(skb);
+		}
+		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+	}
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+	int i, max_idx, max_agg_idx;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_rx_ring_info *rxr;
+		int j;
+
+		if (!bnapi)
+			continue;
+
+		rxr = &bnapi->rx_ring;
+
+		if (rxr->rx_tpa) {
+			for (j = 0; j < MAX_TPA; j++) {
+				struct bnxt_tpa_info *tpa_info =
+							&rxr->rx_tpa[j];
+				u8 *data = tpa_info->data;
+
+				if (!data)
+					continue;
+
+				dma_unmap_single(
+					&pdev->dev,
+					dma_unmap_addr(tpa_info, mapping),
+					bp->rx_buf_use_size,
+					PCI_DMA_FROMDEVICE);
+
+				tpa_info->data = NULL;
+
+				kfree(data);
+			}
+		}
+
+		for (j = 0; j < max_idx; j++) {
+			struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+			u8 *data = rx_buf->data;
+
+			if (!data)
+				continue;
+
+			dma_unmap_single(&pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_use_size,
+					 PCI_DMA_FROMDEVICE);
+
+			rx_buf->data = NULL;
+
+			kfree(data);
+		}
+
+		for (j = 0; j < max_agg_idx; j++) {
+			struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+				&rxr->rx_agg_ring[j];
+			struct page *page = rx_agg_buf->page;
+
+			if (!page)
+				continue;
+
+			dma_unmap_page(&pdev->dev,
+				       dma_unmap_addr(rx_agg_buf, mapping),
+				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+			rx_agg_buf->page = NULL;
+			__clear_bit(j, rxr->rx_agg_bmap);
+
+			__free_page(page);
+		}
+	}
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+	bnxt_free_tx_skbs(bp);
+	bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+	struct pci_dev *pdev = bp->pdev;
+	int i;
+
+	for (i = 0; i < ring->nr_pages; i++) {
+		if (!ring->pg_arr[i])
+			continue;
+
+		dma_free_coherent(&pdev->dev, ring->page_size,
+				  ring->pg_arr[i], ring->dma_arr[i]);
+
+		ring->pg_arr[i] = NULL;
+	}
+	if (ring->pg_tbl) {
+		dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+				  ring->pg_tbl, ring->pg_tbl_map);
+		ring->pg_tbl = NULL;
+	}
+	if (ring->vmem_size && *ring->vmem) {
+		vfree(*ring->vmem);
+		*ring->vmem = NULL;
+	}
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+	int i;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (ring->nr_pages > 1) {
+		ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+						  ring->nr_pages * 8,
+						  &ring->pg_tbl_map,
+						  GFP_KERNEL);
+		if (!ring->pg_tbl)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < ring->nr_pages; i++) {
+		ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+						     ring->page_size,
+						     &ring->dma_arr[i],
+						     GFP_KERNEL);
+		if (!ring->pg_arr[i])
+			return -ENOMEM;
+
+		if (ring->nr_pages > 1)
+			ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+	}
+
+	if (ring->vmem_size) {
+		*ring->vmem = vzalloc(ring->vmem_size);
+		if (!(*ring->vmem))
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		rxr = &bnapi->rx_ring;
+
+		kfree(rxr->rx_tpa);
+		rxr->rx_tpa = NULL;
+
+		kfree(rxr->rx_agg_bmap);
+		rxr->rx_agg_bmap = NULL;
+
+		ring = &rxr->rx_ring_struct;
+		bnxt_free_ring(bp, ring);
+
+		ring = &rxr->rx_agg_ring_struct;
+		bnxt_free_ring(bp, ring);
+	}
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+	int i, rc, agg_rings = 0, tpa_rings = 0;
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		agg_rings = 1;
+
+	if (bp->flags & BNXT_FLAG_TPA)
+		tpa_rings = 1;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		rxr = &bnapi->rx_ring;
+		ring = &rxr->rx_ring_struct;
+
+		rc = bnxt_alloc_ring(bp, ring);
+		if (rc)
+			return rc;
+
+		if (agg_rings) {
+			u16 mem_size;
+
+			ring = &rxr->rx_agg_ring_struct;
+			rc = bnxt_alloc_ring(bp, ring);
+			if (rc)
+				return rc;
+
+			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+			mem_size = rxr->rx_agg_bmap_size / 8;
+			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+			if (!rxr->rx_agg_bmap)
+				return -ENOMEM;
+
+			if (tpa_rings) {
+				rxr->rx_tpa = kcalloc(MAX_TPA,
+						sizeof(struct bnxt_tpa_info),
+						GFP_KERNEL);
+				if (!rxr->rx_tpa)
+					return -ENOMEM;
+			}
+		}
+	}
+	return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+	int i;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		txr = &bnapi->tx_ring;
+
+		if (txr->tx_push) {
+			dma_free_coherent(&pdev->dev, bp->tx_push_size,
+					  txr->tx_push, txr->tx_push_mapping);
+			txr->tx_push = NULL;
+		}
+
+		ring = &txr->tx_ring_struct;
+
+		bnxt_free_ring(bp, ring);
+	}
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+	int i, j, rc;
+	struct pci_dev *pdev = bp->pdev;
+
+	bp->tx_push_size = 0;
+	if (bp->tx_push_thresh) {
+		int push_size;
+
+		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+					bp->tx_push_thresh);
+
+		if (push_size > 128) {
+			push_size = 0;
+			bp->tx_push_thresh = 0;
+		}
+
+		bp->tx_push_size = push_size;
+	}
+
+	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		txr = &bnapi->tx_ring;
+		ring = &txr->tx_ring_struct;
+
+		rc = bnxt_alloc_ring(bp, ring);
+		if (rc)
+			return rc;
+
+		if (bp->tx_push_size) {
+			struct tx_bd *txbd;
+			dma_addr_t mapping;
+
+			/* One pre-allocated DMA buffer to backup
+			 * TX push operation
+			 */
+			txr->tx_push = dma_alloc_coherent(&pdev->dev,
+						bp->tx_push_size,
+						&txr->tx_push_mapping,
+						GFP_KERNEL);
+
+			if (!txr->tx_push)
+				return -ENOMEM;
+
+			txbd = &txr->tx_push->txbd1;
+
+			mapping = txr->tx_push_mapping +
+				sizeof(struct tx_push_bd);
+			txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+			memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+		}
+		ring->queue_id = bp->q_info[j].queue_id;
+		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+			j++;
+	}
+	return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		ring = &cpr->cp_ring_struct;
+
+		bnxt_free_ring(bp, ring);
+	}
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+	int i, rc;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		ring = &cpr->cp_ring_struct;
+
+		rc = bnxt_alloc_ring(bp, ring);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		ring = &cpr->cp_ring_struct;
+		ring->nr_pages = bp->cp_nr_pages;
+		ring->page_size = HW_CMPD_RING_SIZE;
+		ring->pg_arr = (void **)cpr->cp_desc_ring;
+		ring->dma_arr = cpr->cp_desc_mapping;
+		ring->vmem_size = 0;
+
+		rxr = &bnapi->rx_ring;
+		ring = &rxr->rx_ring_struct;
+		ring->nr_pages = bp->rx_nr_pages;
+		ring->page_size = HW_RXBD_RING_SIZE;
+		ring->pg_arr = (void **)rxr->rx_desc_ring;
+		ring->dma_arr = rxr->rx_desc_mapping;
+		ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+		ring->vmem = (void **)&rxr->rx_buf_ring;
+
+		ring = &rxr->rx_agg_ring_struct;
+		ring->nr_pages = bp->rx_agg_nr_pages;
+		ring->page_size = HW_RXBD_RING_SIZE;
+		ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+		ring->dma_arr = rxr->rx_agg_desc_mapping;
+		ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+		ring->vmem = (void **)&rxr->rx_agg_ring;
+
+		txr = &bnapi->tx_ring;
+		ring = &txr->tx_ring_struct;
+		ring->nr_pages = bp->tx_nr_pages;
+		ring->page_size = HW_RXBD_RING_SIZE;
+		ring->pg_arr = (void **)txr->tx_desc_ring;
+		ring->dma_arr = txr->tx_desc_mapping;
+		ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+		ring->vmem = (void **)&txr->tx_buf_ring;
+	}
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+	int i;
+	u32 prod;
+	struct rx_bd **rx_buf_ring;
+
+	rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+	for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+		int j;
+		struct rx_bd *rxbd;
+
+		rxbd = rx_buf_ring[i];
+		if (!rxbd)
+			continue;
+
+		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+			rxbd->rx_bd_opaque = prod;
+		}
+	}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_ring_struct *ring;
+	u32 prod, type;
+	int i;
+
+	if (!bnapi)
+		return -EINVAL;
+
+	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+	if (NET_IP_ALIGN == 2)
+		type |= RX_BD_FLAGS_SOP;
+
+	rxr = &bnapi->rx_ring;
+	ring = &rxr->rx_ring_struct;
+	bnxt_init_rxbd_pages(ring, type);
+
+	prod = rxr->rx_prod;
+	for (i = 0; i < bp->rx_ring_size; i++) {
+		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+				    ring_nr, i, bp->rx_ring_size);
+			break;
+		}
+		prod = NEXT_RX(prod);
+	}
+	rxr->rx_prod = prod;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
+
+	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+		return 0;
+
+	ring = &rxr->rx_agg_ring_struct;
+
+	type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+	bnxt_init_rxbd_pages(ring, type);
+
+	prod = rxr->rx_agg_prod;
+	for (i = 0; i < bp->rx_agg_ring_size; i++) {
+		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+				    ring_nr, i, bp->rx_ring_size);
+			break;
+		}
+		prod = NEXT_RX_AGG(prod);
+	}
+	rxr->rx_agg_prod = prod;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
+
+	if (bp->flags & BNXT_FLAG_TPA) {
+		if (rxr->rx_tpa) {
+			u8 *data;
+			dma_addr_t mapping;
+
+			for (i = 0; i < MAX_TPA; i++) {
+				data = __bnxt_alloc_rx_data(bp, &mapping,
+							    GFP_KERNEL);
+				if (!data)
+					return -ENOMEM;
+
+				rxr->rx_tpa[i].data = data;
+				rxr->rx_tpa[i].mapping = mapping;
+			}
+		} else {
+			netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		rc = bnxt_init_one_rx_ring(bp, i);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+	u16 i;
+
+	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+				   MAX_SKB_FRAGS + 1);
+
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+		ring->fw_ring_id = INVALID_HW_RING_ID;
+	}
+
+	return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+	kfree(bp->grp_info);
+	bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+	int i;
+
+	if (irq_re_init) {
+		bp->grp_info = kcalloc(bp->cp_nr_rings,
+				       sizeof(struct bnxt_ring_grp_info),
+				       GFP_KERNEL);
+		if (!bp->grp_info)
+			return -ENOMEM;
+	}
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		if (irq_re_init)
+			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+	}
+	return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+	kfree(bp->vnic_info);
+	bp->vnic_info = NULL;
+	bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+	int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+	if (bp->flags & BNXT_FLAG_RFS)
+		num_vnics += bp->rx_nr_rings;
+#endif
+
+	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+				GFP_KERNEL);
+	if (!bp->vnic_info)
+		return -ENOMEM;
+
+	bp->nr_vnics = num_vnics;
+	return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		vnic->fw_vnic_id = INVALID_HW_RING_ID;
+		vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+		if (bp->vnic_info[i].rss_hash_key) {
+			if (i == 0)
+				prandom_bytes(vnic->rss_hash_key,
+					      HW_HASH_KEY_SIZE);
+			else
+				memcpy(vnic->rss_hash_key,
+				       bp->vnic_info[0].rss_hash_key,
+				       HW_HASH_KEY_SIZE);
+		}
+	}
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+	int pages;
+
+	pages = ring_size / desc_per_pg;
+
+	if (!pages)
+		return 1;
+
+	pages++;
+
+	while (pages & (pages - 1))
+		pages++;
+
+	return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+	bp->flags &= ~BNXT_FLAG_TPA;
+	if (bp->dev->features & NETIF_F_LRO)
+		bp->flags |= BNXT_FLAG_LRO;
+	if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+		bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+	u32 ring_size, rx_size, rx_space;
+	u32 agg_factor = 0, agg_ring_size = 0;
+
+	/* 8 for CRC and VLAN */
+	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+	rx_space = rx_size + NET_SKB_PAD +
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+	ring_size = bp->rx_ring_size;
+	bp->rx_agg_ring_size = 0;
+	bp->rx_agg_nr_pages = 0;
+
+	if (bp->flags & BNXT_FLAG_TPA)
+		agg_factor = 4;
+
+	bp->flags &= ~BNXT_FLAG_JUMBO;
+	if (rx_space > PAGE_SIZE) {
+		u32 jumbo_factor;
+
+		bp->flags |= BNXT_FLAG_JUMBO;
+		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+		if (jumbo_factor > agg_factor)
+			agg_factor = jumbo_factor;
+	}
+	agg_ring_size = ring_size * agg_factor;
+
+	if (agg_ring_size) {
+		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+							RX_DESC_CNT);
+		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+			u32 tmp = agg_ring_size;
+
+			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+				    tmp, agg_ring_size);
+		}
+		bp->rx_agg_ring_size = agg_ring_size;
+		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+		rx_space = rx_size + NET_SKB_PAD +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	}
+
+	bp->rx_buf_use_size = rx_size;
+	bp->rx_buf_size = rx_space;
+
+	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+	ring_size = bp->tx_ring_size;
+	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+	ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+	bp->cp_ring_size = ring_size;
+
+	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+	if (bp->cp_nr_pages > MAX_CP_PAGES) {
+		bp->cp_nr_pages = MAX_CP_PAGES;
+		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+			    ring_size, bp->cp_ring_size);
+	}
+	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+	bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_vnic_info *vnic;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->vnic_info)
+		return;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+
+		kfree(vnic->fw_grp_ids);
+		vnic->fw_grp_ids = NULL;
+
+		kfree(vnic->uc_list);
+		vnic->uc_list = NULL;
+
+		if (vnic->mc_list) {
+			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+					  vnic->mc_list, vnic->mc_list_mapping);
+			vnic->mc_list = NULL;
+		}
+
+		if (vnic->rss_table) {
+			dma_free_coherent(&pdev->dev, PAGE_SIZE,
+					  vnic->rss_table,
+					  vnic->rss_table_dma_addr);
+			vnic->rss_table = NULL;
+		}
+
+		vnic->rss_hash_key = NULL;
+		vnic->flags = 0;
+	}
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+	int i, rc = 0, size;
+	struct bnxt_vnic_info *vnic;
+	struct pci_dev *pdev = bp->pdev;
+	int max_rings;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+
+		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+			if (mem_size > 0) {
+				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+				if (!vnic->uc_list) {
+					rc = -ENOMEM;
+					goto out;
+				}
+			}
+		}
+
+		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+			vnic->mc_list =
+				dma_alloc_coherent(&pdev->dev,
+						   vnic->mc_list_size,
+						   &vnic->mc_list_mapping,
+						   GFP_KERNEL);
+			if (!vnic->mc_list) {
+				rc = -ENOMEM;
+				goto out;
+			}
+		}
+
+		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+			max_rings = bp->rx_nr_rings;
+		else
+			max_rings = 1;
+
+		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+		if (!vnic->fw_grp_ids) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		/* Allocate rss table and hash key */
+		vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+						     &vnic->rss_table_dma_addr,
+						     GFP_KERNEL);
+		if (!vnic->rss_table) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+	}
+	return 0;
+
+out:
+	return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+	struct pci_dev *pdev = bp->pdev;
+
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+			  bp->hwrm_cmd_resp_dma_addr);
+
+	bp->hwrm_cmd_resp_addr = NULL;
+	if (bp->hwrm_dbg_resp_addr) {
+		dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+				  bp->hwrm_dbg_resp_addr,
+				  bp->hwrm_dbg_resp_dma_addr);
+
+		bp->hwrm_dbg_resp_addr = NULL;
+	}
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+	struct pci_dev *pdev = bp->pdev;
+
+	bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+						   &bp->hwrm_cmd_resp_dma_addr,
+						   GFP_KERNEL);
+	if (!bp->hwrm_cmd_resp_addr)
+		return -ENOMEM;
+	bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+						    HWRM_DBG_REG_BUF_SIZE,
+						    &bp->hwrm_dbg_resp_dma_addr,
+						    GFP_KERNEL);
+	if (!bp->hwrm_dbg_resp_addr)
+		netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+	return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+	u32 size, i;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	size = sizeof(struct ctx_hw_stats);
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		if (cpr->hw_stats) {
+			dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+					  cpr->hw_stats_map);
+			cpr->hw_stats = NULL;
+		}
+	}
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+	u32 size, i;
+	struct pci_dev *pdev = bp->pdev;
+
+	size = sizeof(struct ctx_hw_stats);
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+						   &cpr->hw_stats_map,
+						   GFP_KERNEL);
+		if (!cpr->hw_stats)
+			return -ENOMEM;
+
+		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+	}
+	return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_tx_ring_info *txr;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		cpr->cp_raw_cons = 0;
+
+		txr = &bnapi->tx_ring;
+		txr->tx_prod = 0;
+		txr->tx_cons = 0;
+
+		rxr = &bnapi->rx_ring;
+		rxr->rx_prod = 0;
+		rxr->rx_agg_prod = 0;
+		rxr->rx_sw_agg_prod = 0;
+	}
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i;
+
+	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
+	 * safe to delete the hash table.
+	 */
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+		struct hlist_node *tmp;
+		struct bnxt_ntuple_filter *fltr;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+			hlist_del(&fltr->hash);
+			kfree(fltr);
+		}
+	}
+	if (irq_reinit) {
+		kfree(bp->ntp_fltr_bmap);
+		bp->ntp_fltr_bmap = NULL;
+	}
+	bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i, rc = 0;
+
+	if (!(bp->flags & BNXT_FLAG_RFS))
+		return 0;
+
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+	bp->ntp_fltr_count = 0;
+	bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+				    sizeof(long),
+				    GFP_KERNEL);
+
+	if (!bp->ntp_fltr_bmap)
+		rc = -ENOMEM;
+
+	return rc;
+#else
+	return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+	bnxt_free_vnic_attributes(bp);
+	bnxt_free_tx_rings(bp);
+	bnxt_free_rx_rings(bp);
+	bnxt_free_cp_rings(bp);
+	bnxt_free_ntp_fltrs(bp, irq_re_init);
+	if (irq_re_init) {
+		bnxt_free_stats(bp);
+		bnxt_free_ring_grps(bp);
+		bnxt_free_vnics(bp);
+		kfree(bp->bnapi);
+		bp->bnapi = NULL;
+	} else {
+		bnxt_clear_ring_indices(bp);
+	}
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+	int i, rc, size, arr_size;
+	void *bnapi;
+
+	if (irq_re_init) {
+		/* Allocate bnapi mem pointer array and mem block for
+		 * all queues
+		 */
+		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+				bp->cp_nr_rings);
+		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+		if (!bnapi)
+			return -ENOMEM;
+
+		bp->bnapi = bnapi;
+		bnapi += arr_size;
+		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+			bp->bnapi[i] = bnapi;
+			bp->bnapi[i]->index = i;
+			bp->bnapi[i]->bp = bp;
+		}
+
+		rc = bnxt_alloc_stats(bp);
+		if (rc)
+			goto alloc_mem_err;
+
+		rc = bnxt_alloc_ntp_fltrs(bp);
+		if (rc)
+			goto alloc_mem_err;
+
+		rc = bnxt_alloc_vnics(bp);
+		if (rc)
+			goto alloc_mem_err;
+	}
+
+	bnxt_init_ring_struct(bp);
+
+	rc = bnxt_alloc_rx_rings(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_tx_rings(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_cp_rings(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+				  BNXT_VNIC_UCAST_FLAG;
+	rc = bnxt_alloc_vnic_attributes(bp);
+	if (rc)
+		goto alloc_mem_err;
+	return 0;
+
+alloc_mem_err:
+	bnxt_free_mem(bp, true);
+	return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+			    u16 cmpl_ring, u16 target_id)
+{
+	struct hwrm_cmd_req_hdr *req = request;
+
+	req->cmpl_ring_req_type =
+		cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+	req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+	int i, intr_process, rc;
+	struct hwrm_cmd_req_hdr *req = msg;
+	u32 *data = msg;
+	__le32 *resp_len, *valid;
+	u16 cp_ring_id, len = 0;
+	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+	req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+	memset(resp, 0, PAGE_SIZE);
+	cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+		      HWRM_CMPL_RING_MASK) >>
+		     HWRM_CMPL_RING_SFT;
+	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+	/* Write request msg to hwrm channel */
+	__iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+	/* currently supports only one outstanding message */
+	if (intr_process)
+		bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+				       HWRM_SEQ_ID_MASK;
+
+	/* Ring channel doorbell */
+	writel(1, bp->bar0 + 0x100);
+
+	i = 0;
+	if (intr_process) {
+		/* Wait until hwrm response cmpl interrupt is processed */
+		while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+		       i++ < timeout) {
+			usleep_range(600, 800);
+		}
+
+		if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+			netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+				   req->cmpl_ring_req_type);
+			return -1;
+		}
+	} else {
+		/* Check if response len is updated */
+		resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+		for (i = 0; i < timeout; i++) {
+			len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+			      HWRM_RESP_LEN_SFT;
+			if (len)
+				break;
+			usleep_range(600, 800);
+		}
+
+		if (i >= timeout) {
+			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+				   timeout, req->cmpl_ring_req_type,
+				   req->target_id_seq_id, *resp_len);
+			return -1;
+		}
+
+		/* Last word of resp contains valid bit */
+		valid = bp->hwrm_cmd_resp_addr + len - 4;
+		for (i = 0; i < timeout; i++) {
+			if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+				break;
+			usleep_range(600, 800);
+		}
+
+		if (i >= timeout) {
+			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+				   timeout, req->cmpl_ring_req_type,
+				   req->target_id_seq_id, len, *valid);
+			return -1;
+		}
+	}
+
+	rc = le16_to_cpu(resp->error_code);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+			   le16_to_cpu(resp->req_type),
+			   le16_to_cpu(resp->seq_id), rc);
+		return rc;
+	}
+	return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+	int rc;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+	struct hwrm_func_drv_rgtr_input req = {0};
+	int i;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+	req.enables =
+		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
+			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+	/* TODO: current async event fwd bits are not defined and the firmware
+	 * only checks if it is non-zero to enable async event forwarding
+	 */
+	req.async_event_fwd[0] |= cpu_to_le32(1);
+	req.os_type = cpu_to_le16(1);
+	req.ver_maj = DRV_VER_MAJ;
+	req.ver_min = DRV_VER_MIN;
+	req.ver_upd = DRV_VER_UPD;
+
+	if (BNXT_PF(bp)) {
+		DECLARE_BITMAP(vf_req_snif_bmap, 256);
+		u32 *data = (u32 *)vf_req_snif_bmap;
+
+		memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
+		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+			__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+		for (i = 0; i < 8; i++)
+			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
+
+		req.enables |=
+			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+	}
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+	u32 rc = 0;
+	struct hwrm_tunnel_dst_port_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+	req.tunnel_type = tunnel_type;
+
+	switch (tunnel_type) {
+	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+		req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+		break;
+	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+		req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+		break;
+	default:
+		break;
+	}
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+			   rc);
+	return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+					   u8 tunnel_type)
+{
+	u32 rc = 0;
+	struct hwrm_tunnel_dst_port_alloc_input req = {0};
+	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+	req.tunnel_type = tunnel_type;
+	req.tunnel_dst_port_val = port;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+			   rc);
+		goto err_out;
+	}
+
+	if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+		bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+	else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+		bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+	req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+	req.mask = cpu_to_le32(vnic->rx_mask);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+					    struct bnxt_ntuple_filter *fltr)
+{
+	struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+	req.ntuple_filter_id = fltr->filter_id;
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS					\
+	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+					     struct bnxt_ntuple_filter *fltr)
+{
+	int rc = 0;
+	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+		bp->hwrm_cmd_resp_addr;
+	struct flow_keys *keys = &fltr->fkeys;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+	req.ethertype = htons(ETH_P_IP);
+	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+	req.ipaddr_type = 4;
+	req.ip_protocol = keys->basic.ip_proto;
+
+	req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+	req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+	req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+	req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+	req.src_port = keys->ports.src;
+	req.src_port_mask = cpu_to_be16(0xffff);
+	req.dst_port = keys->ports.dst;
+	req.dst_port_mask = cpu_to_be16(0xffff);
+
+	req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		fltr->filter_id = resp->ntuple_filter_id;
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+				     u8 *mac_addr)
+{
+	u32 rc = 0;
+	struct hwrm_cfa_l2_filter_alloc_input req = {0};
+	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+				CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+	req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+	req.enables =
+		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+	req.l2_addr_mask[0] = 0xff;
+	req.l2_addr_mask[1] = 0xff;
+	req.l2_addr_mask[2] = 0xff;
+	req.l2_addr_mask[3] = 0xff;
+	req.l2_addr_mask[4] = 0xff;
+	req.l2_addr_mask[5] = 0xff;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+							resp->l2_filter_id;
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+	int rc = 0;
+
+	/* Any associated ntuple filters will also be cleared by firmware. */
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < num_of_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		for (j = 0; j < vnic->uc_filter_count; j++) {
+			struct hwrm_cfa_l2_filter_free_input req = {0};
+
+			bnxt_hwrm_cmd_hdr_init(bp, &req,
+					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+			req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+			rc = _hwrm_send_message(bp, &req, sizeof(req),
+						HWRM_CMD_TIMEOUT);
+		}
+		vnic->uc_filter_count = 0;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_tpa_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+	if (tpa_flags) {
+		u16 mss = bp->dev->mtu - 40;
+		u32 nsegs, n, segs = 0, flags;
+
+		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+		if (tpa_flags & BNXT_FLAG_GRO)
+			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+		req.flags = cpu_to_le32(flags);
+
+		req.enables =
+			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+		/* Number of segs are log2 units, and first packet is not
+		 * included as part of this units.
+		 */
+		if (mss <= PAGE_SIZE) {
+			n = PAGE_SIZE / mss;
+			nsegs = (MAX_SKB_FRAGS - 1) * n;
+		} else {
+			n = mss / PAGE_SIZE;
+			if (mss & (PAGE_SIZE - 1))
+				n++;
+			nsegs = (MAX_SKB_FRAGS - n) / n;
+		}
+
+		segs = ilog2(nsegs);
+		req.max_agg_segs = cpu_to_le16(segs);
+		req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+	}
+	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+	u32 i, j, max_rings;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_rss_cfg_input req = {0};
+
+	if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+	if (set_rss) {
+		vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+				 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+		req.hash_type = cpu_to_le32(vnic->hash_type);
+
+		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+			max_rings = bp->rx_nr_rings;
+		else
+			max_rings = 1;
+
+		/* Fill the RSS indirection table with ring group ids */
+		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+			if (j == max_rings)
+				j = 0;
+			vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+		}
+
+		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+		req.hash_key_tbl_addr =
+			cpu_to_le64(vnic->rss_hash_key_dma_addr);
+	}
+	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+	req.enables =
+		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+	/* thresholds not implemented in firmware yet */
+	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+	req.rss_cos_lb_ctx_id =
+		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+			bnxt_hwrm_vnic_ctx_free_one(bp, i);
+	}
+	bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+	int rc;
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+						bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+			       -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+			le16_to_cpu(resp->rss_cos_lb_ctx_id);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+	int grp_idx = 0;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+	/* Only RSS support for now TBD: COS & LB */
+	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+				  VNIC_CFG_REQ_ENABLES_RSS_RULE);
+	req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+	req.cos_rule = cpu_to_le16(0xffff);
+	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+		grp_idx = 0;
+	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+		grp_idx = vnic_id - 1;
+
+	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+	req.lb_rule = cpu_to_le16(0xffff);
+	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+			      VLAN_HLEN);
+
+	if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+	u32 rc = 0;
+
+	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+		struct hwrm_vnic_free_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+		req.vnic_id =
+			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		if (rc)
+			return rc;
+		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+	}
+	return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+	u16 i;
+
+	for (i = 0; i < bp->nr_vnics; i++)
+		bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+				u16 end_grp_id)
+{
+	u32 rc = 0, i, j;
+	struct hwrm_vnic_alloc_input req = {0};
+	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	/* map ring groups to this vnic */
+	for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+				   j, (end_grp_id - start_grp_id));
+			break;
+		}
+		bp->vnic_info[vnic_id].fw_grp_ids[j] =
+					bp->grp_info[i].fw_grp_id;
+	}
+
+	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+	if (vnic_id == 0)
+		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+	u16 i;
+	u32 rc = 0;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct hwrm_ring_grp_alloc_input req = {0};
+		struct hwrm_ring_grp_alloc_output *resp =
+					bp->hwrm_cmd_resp_addr;
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+		req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+		req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+		req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+		req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+
+		bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+	u16 i;
+	u32 rc = 0;
+	struct hwrm_ring_grp_free_input req = {0};
+
+	if (!bp->grp_info)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+			continue;
+		req.ring_group_id =
+			cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+				    struct bnxt_ring_struct *ring,
+				    u32 ring_type, u32 map_index,
+				    u32 stats_ctx_id)
+{
+	int rc = 0, err = 0;
+	struct hwrm_ring_alloc_input req = {0};
+	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	u16 ring_id;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+	req.enables = 0;
+	if (ring->nr_pages > 1) {
+		req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+		/* Page size is in log2 units */
+		req.page_size = BNXT_PAGE_SHIFT;
+		req.page_tbl_depth = 1;
+	} else {
+		req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
+	}
+	req.fbo = 0;
+	/* Association of ring index with doorbell index and MSIX number */
+	req.logical_id = cpu_to_le16(map_index);
+
+	switch (ring_type) {
+	case HWRM_RING_ALLOC_TX:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+		/* Association of transmit ring with completion ring */
+		req.cmpl_ring_id =
+			cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+		req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+		req.queue_id = cpu_to_le16(ring->queue_id);
+		break;
+	case HWRM_RING_ALLOC_RX:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+		break;
+	case HWRM_RING_ALLOC_AGG:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+		break;
+	case HWRM_RING_ALLOC_CMPL:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+		if (bp->flags & BNXT_FLAG_USING_MSIX)
+			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+		break;
+	default:
+		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+			   ring_type);
+		return -1;
+	}
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	err = le16_to_cpu(resp->error_code);
+	ring_id = le16_to_cpu(resp->ring_id);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	if (rc || err) {
+		switch (ring_type) {
+		case RING_FREE_REQ_RING_TYPE_CMPL:
+			netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+				   rc, err);
+			return -1;
+
+		case RING_FREE_REQ_RING_TYPE_RX:
+			netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+				   rc, err);
+			return -1;
+
+		case RING_FREE_REQ_RING_TYPE_TX:
+			netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+				   rc, err);
+			return -1;
+
+		default:
+			netdev_err(bp->dev, "Invalid ring\n");
+			return -1;
+		}
+	}
+	ring->fw_ring_id = ring_id;
+	return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+	int i, rc = 0;
+
+	if (bp->cp_nr_rings) {
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+			struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_CMPL, i,
+						      INVALID_STATS_CTX_ID);
+			if (rc)
+				goto err_out;
+			cpr->cp_doorbell = bp->bar1 + i * 0x80;
+			BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+			bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+		}
+	}
+
+	if (bp->tx_nr_rings) {
+		for (i = 0; i < bp->tx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+			struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+			u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_TX, i,
+						      fw_stats_ctx);
+			if (rc)
+				goto err_out;
+			txr->tx_doorbell = bp->bar1 + i * 0x80;
+		}
+	}
+
+	if (bp->rx_nr_rings) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_RX, i,
+						      INVALID_STATS_CTX_ID);
+			if (rc)
+				goto err_out;
+			rxr->rx_doorbell = bp->bar1 + i * 0x80;
+			writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+			bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+		}
+	}
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring =
+						&rxr->rx_agg_ring_struct;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_AGG,
+						      bp->rx_nr_rings + i,
+						      INVALID_STATS_CTX_ID);
+			if (rc)
+				goto err_out;
+
+			rxr->rx_agg_doorbell =
+				bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+			writel(DB_KEY_RX | rxr->rx_agg_prod,
+			       rxr->rx_agg_doorbell);
+			bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+		}
+	}
+err_out:
+	return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+				   struct bnxt_ring_struct *ring,
+				   u32 ring_type, int cmpl_ring_id)
+{
+	int rc;
+	struct hwrm_ring_free_input req = {0};
+	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+	u16 error_code;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+	req.ring_type = ring_type;
+	req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	error_code = le16_to_cpu(resp->error_code);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	if (rc || error_code) {
+		switch (ring_type) {
+		case RING_FREE_REQ_RING_TYPE_CMPL:
+			netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+				   rc);
+			return rc;
+		case RING_FREE_REQ_RING_TYPE_RX:
+			netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+				   rc);
+			return rc;
+		case RING_FREE_REQ_RING_TYPE_TX:
+			netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+				   rc);
+			return rc;
+		default:
+			netdev_err(bp->dev, "Invalid ring\n");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+	int i, rc = 0;
+
+	if (!bp->bnapi)
+		return 0;
+
+	if (bp->tx_nr_rings) {
+		for (i = 0; i < bp->tx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+			struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_TX,
+					close_path ? cmpl_ring_id :
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	if (bp->rx_nr_rings) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_RX,
+					close_path ? cmpl_ring_id :
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+				bp->grp_info[i].rx_fw_ring_id =
+					INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	if (bp->rx_agg_nr_pages) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring =
+						&rxr->rx_agg_ring_struct;
+			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_RX,
+					close_path ? cmpl_ring_id :
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+				bp->grp_info[i].agg_fw_ring_id =
+					INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	if (bp->cp_nr_rings) {
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+			struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_CMPL,
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+				bp->grp_info[i].cp_fw_ring_id =
+							INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+	int i, rc = 0;
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+	u16 max_buf, max_buf_irq;
+	u16 buf_tmr, buf_tmr_irq;
+	u32 flags;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+			       -1, -1);
+
+	/* Each rx completion (2 records) should be DMAed immediately */
+	max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+	/* max_buf must not be zero */
+	max_buf = clamp_t(u16, max_buf, 1, 63);
+	max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+	buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+	buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+	flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
+	 * if coal_ticks is less than 25 us.
+	 */
+	if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+	req.flags = cpu_to_le16(flags);
+	req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+	req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+	req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+	req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+	req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+	req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+	req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+	int rc = 0, i;
+	struct hwrm_stat_ctx_free_input req = {0};
+
+	if (!bp->bnapi)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+			rc = _hwrm_send_message(bp, &req, sizeof(req),
+						HWRM_CMD_TIMEOUT);
+			if (rc)
+				break;
+
+			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+		}
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+	int rc = 0, i;
+	struct hwrm_stat_ctx_alloc_input req = {0};
+	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+	req.update_period_ms = cpu_to_le32(1000);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+
+		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_qcaps_input req = {0};
+	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto hwrm_func_qcaps_exit;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		pf->fw_fid = le16_to_cpu(resp->fid);
+		pf->port_id = le16_to_cpu(resp->port_id);
+		memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+		memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
+		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+		pf->max_pf_tx_rings = pf->max_tx_rings;
+		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+		pf->max_pf_rx_rings = pf->max_rx_rings;
+		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+		pf->max_vnics = le16_to_cpu(resp->max_vnics);
+		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+		pf->max_vfs = le16_to_cpu(resp->max_vfs);
+		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+	} else {
+#ifdef CONFIG_BNXT_SRIOV
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		vf->fw_fid = le16_to_cpu(resp->fid);
+		memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+		if (is_valid_ether_addr(vf->mac_addr))
+			/* overwrite netdev dev_adr with admin VF MAC */
+			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+		else
+			random_ether_addr(bp->dev->dev_addr);
+
+		vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+		vf->max_vnics = le16_to_cpu(resp->max_vnics);
+		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+#endif
+	}
+
+	bp->tx_push_thresh = 0;
+	if (resp->flags &
+	    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+	struct hwrm_func_reset_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+	req.enables = 0;
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_queue_qportcfg_input req = {0};
+	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	u8 i, *qptr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto qportcfg_exit;
+
+	if (!resp->max_configurable_queues) {
+		rc = -EINVAL;
+		goto qportcfg_exit;
+	}
+	bp->max_tc = resp->max_configurable_queues;
+	if (bp->max_tc > BNXT_MAX_QUEUE)
+		bp->max_tc = BNXT_MAX_QUEUE;
+
+	qptr = &resp->queue_id0;
+	for (i = 0; i < bp->max_tc; i++) {
+		bp->q_info[i].queue_id = *qptr++;
+		bp->q_info[i].queue_profile = *qptr++;
+	}
+
+qportcfg_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+	int rc;
+	struct hwrm_ver_get_input req = {0};
+	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+	req.hwrm_intf_min = HWRM_VERSION_MINOR;
+	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto hwrm_ver_get_exit;
+
+	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+	if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+	    req.hwrm_intf_min != resp->hwrm_intf_min ||
+	    req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+		netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
+			    resp->hwrm_intf_upd, req.hwrm_intf_maj,
+			    req.hwrm_intf_min, req.hwrm_intf_upd);
+		netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+	}
+	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+		 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+	if (bp->vxlan_port_cnt) {
+		bnxt_hwrm_tunnel_dst_port_free(
+			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+	}
+	bp->vxlan_port_cnt = 0;
+	if (bp->nge_port_cnt) {
+		bnxt_hwrm_tunnel_dst_port_free(
+			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+	}
+	bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+	int rc, i;
+	u32 tpa_flags = 0;
+
+	if (set_tpa)
+		tpa_flags = bp->flags & BNXT_FLAG_TPA;
+	for (i = 0; i < bp->nr_vnics; i++) {
+		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+				   rc, i);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->nr_vnics; i++)
+		bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+				    bool irq_re_init)
+{
+	if (bp->vnic_info) {
+		bnxt_hwrm_clear_vnic_filter(bp);
+		/* clear all RSS setting before free vnic ctx */
+		bnxt_hwrm_clear_vnic_rss(bp);
+		bnxt_hwrm_vnic_ctx_free(bp);
+		/* before free the vnic, undo the vnic tpa settings */
+		if (bp->flags & BNXT_FLAG_TPA)
+			bnxt_set_tpa(bp, false);
+		bnxt_hwrm_vnic_free(bp);
+	}
+	bnxt_hwrm_ring_free(bp, close_path);
+	bnxt_hwrm_ring_grp_free(bp);
+	if (irq_re_init) {
+		bnxt_hwrm_stat_ctx_free(bp);
+		bnxt_hwrm_free_tunnel_ports(bp);
+	}
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+	int rc;
+
+	/* allocate context for vnic */
+	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+			   vnic_id, rc);
+		goto vnic_setup_err;
+	}
+	bp->rsscos_nr_ctxs++;
+
+	/* configure default vnic, ring grp */
+	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+			   vnic_id, rc);
+		goto vnic_setup_err;
+	}
+
+	/* Enable RSS hashing on vnic */
+	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+			   vnic_id, rc);
+		goto vnic_setup_err;
+	}
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+				   vnic_id, rc);
+		}
+	}
+
+vnic_setup_err:
+	return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i, rc = 0;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		u16 vnic_id = i + 1;
+		u16 ring_id = i;
+
+		if (vnic_id >= bp->nr_vnics)
+			break;
+
+		bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+				   vnic_id, rc);
+			break;
+		}
+		rc = bnxt_setup_vnic(bp, vnic_id);
+		if (rc)
+			break;
+	}
+	return rc;
+#else
+	return 0;
+#endif
+}
+
+static int bnxt_cfg_rx_mode(struct bnxt *);
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+	int rc = 0;
+
+	if (irq_re_init) {
+		rc = bnxt_hwrm_stat_ctx_alloc(bp);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+				   rc);
+			goto err_out;
+		}
+	}
+
+	rc = bnxt_hwrm_ring_alloc(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_hwrm_ring_grp_alloc(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+		goto err_out;
+	}
+
+	/* default vnic 0 */
+	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_setup_vnic(bp, 0);
+	if (rc)
+		goto err_out;
+
+	if (bp->flags & BNXT_FLAG_RFS) {
+		rc = bnxt_alloc_rfs_vnics(bp);
+		if (rc)
+			goto err_out;
+	}
+
+	if (bp->flags & BNXT_FLAG_TPA) {
+		rc = bnxt_set_tpa(bp, true);
+		if (rc)
+			goto err_out;
+	}
+
+	if (BNXT_VF(bp))
+		bnxt_update_vf_mac(bp);
+
+	/* Filter for default vnic 0 */
+	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+	if (rc) {
+		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+		goto err_out;
+	}
+	bp->vnic_info[0].uc_filter_count = 1;
+
+	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+				   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+		bp->vnic_info[0].rx_mask |=
+				CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+	rc = bnxt_cfg_rx_mode(bp);
+	if (rc)
+		goto err_out;
+
+	rc = bnxt_hwrm_set_coal(bp);
+	if (rc)
+		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+			    rc);
+
+	return 0;
+
+err_out:
+	bnxt_hwrm_resource_free(bp, 0, true);
+
+	return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+	return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+	bnxt_init_rx_rings(bp);
+	bnxt_init_tx_rings(bp);
+	bnxt_init_ring_grps(bp, irq_re_init);
+	bnxt_init_vnics(bp);
+
+	return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+	}
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+	int i;
+
+	atomic_set(&bp->intr_sem, 0);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+	}
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+	int rc;
+	struct net_device *dev = bp->dev;
+
+	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+	if (rc)
+		return rc;
+
+	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+	if (bp->rx_nr_rings)
+		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+	if (!dev->rx_cpu_rmap)
+		rc = -ENOMEM;
+#endif
+
+	return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+	struct msix_entry *msix_ent;
+	struct net_device *dev = bp->dev;
+	int i, total_vecs, rc = 0;
+	const int len = sizeof(bp->irq_tbl[0].name);
+
+	bp->flags &= ~BNXT_FLAG_USING_MSIX;
+	total_vecs = bp->cp_nr_rings;
+
+	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!msix_ent)
+		return -ENOMEM;
+
+	for (i = 0; i < total_vecs; i++) {
+		msix_ent[i].entry = i;
+		msix_ent[i].vector = 0;
+	}
+
+	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+	if (total_vecs < 0) {
+		rc = -ENODEV;
+		goto msix_setup_exit;
+	}
+
+	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+	if (bp->irq_tbl) {
+		int tcs;
+
+		/* Trim rings based upon num of vectors allocated */
+		bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+		bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+		tcs = netdev_get_num_tc(dev);
+		if (tcs > 1) {
+			bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+			if (bp->tx_nr_rings_per_tc == 0) {
+				netdev_reset_tc(dev);
+				bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+			} else {
+				int i, off, count;
+
+				bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+				for (i = 0; i < tcs; i++) {
+					count = bp->tx_nr_rings_per_tc;
+					off = i * count;
+					netdev_set_tc_queue(dev, i, count, off);
+				}
+			}
+		}
+		bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			bp->irq_tbl[i].vector = msix_ent[i].vector;
+			snprintf(bp->irq_tbl[i].name, len,
+				 "%s-%s-%d", dev->name, "TxRx", i);
+			bp->irq_tbl[i].handler = bnxt_msix;
+		}
+		rc = bnxt_set_real_num_queues(bp);
+		if (rc)
+			goto msix_setup_exit;
+	} else {
+		rc = -ENOMEM;
+		goto msix_setup_exit;
+	}
+	bp->flags |= BNXT_FLAG_USING_MSIX;
+	kfree(msix_ent);
+	return 0;
+
+msix_setup_exit:
+	netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+	pci_disable_msix(bp->pdev);
+	kfree(msix_ent);
+	return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+	int rc;
+	const int len = sizeof(bp->irq_tbl[0].name);
+
+	if (netdev_get_num_tc(bp->dev))
+		netdev_reset_tc(bp->dev);
+
+	bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+	if (!bp->irq_tbl) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	bp->rx_nr_rings = 1;
+	bp->tx_nr_rings = 1;
+	bp->cp_nr_rings = 1;
+	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+	bp->irq_tbl[0].vector = bp->pdev->irq;
+	snprintf(bp->irq_tbl[0].name, len,
+		 "%s-%s-%d", bp->dev->name, "TxRx", 0);
+	bp->irq_tbl[0].handler = bnxt_inta;
+	rc = bnxt_set_real_num_queues(bp);
+	return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+	int rc = 0;
+
+	if (bp->flags & BNXT_FLAG_MSIX_CAP)
+		rc = bnxt_setup_msix(bp);
+
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+		/* fallback to INTA */
+		rc = bnxt_setup_inta(bp);
+	}
+	return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+	struct bnxt_irq *irq;
+	int i;
+
+#ifdef CONFIG_RFS_ACCEL
+	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+	bp->dev->rx_cpu_rmap = NULL;
+#endif
+	if (!bp->irq_tbl)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		irq = &bp->irq_tbl[i];
+		if (irq->requested)
+			free_irq(irq->vector, bp->bnapi[i]);
+		irq->requested = 0;
+	}
+	if (bp->flags & BNXT_FLAG_USING_MSIX)
+		pci_disable_msix(bp->pdev);
+	kfree(bp->irq_tbl);
+	bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+	int i, rc = 0;
+	unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+	struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+		flags = IRQF_SHARED;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+		if (rmap && (i < bp->rx_nr_rings)) {
+			rc = irq_cpu_rmap_add(rmap, irq->vector);
+			if (rc)
+				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+					    i);
+		}
+#endif
+		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+				 bp->bnapi[i]);
+		if (rc)
+			break;
+
+		irq->requested = 1;
+	}
+	return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+
+		napi_hash_del(&bnapi->napi);
+		netif_napi_del(&bnapi->napi);
+	}
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+
+	if (bp->flags & BNXT_FLAG_USING_MSIX) {
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			bnapi = bp->bnapi[i];
+			netif_napi_add(bp->dev, &bnapi->napi,
+				       bnxt_poll, 64);
+			napi_hash_add(&bnapi->napi);
+		}
+	} else {
+		bnapi = bp->bnapi[0];
+		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+		napi_hash_add(&bnapi->napi);
+	}
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		napi_disable(&bp->bnapi[i]->napi);
+		bnxt_disable_poll(bp->bnapi[i]);
+	}
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		bnxt_enable_poll(bp->bnapi[i]);
+		napi_enable(&bp->bnapi[i]->napi);
+	}
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct netdev_queue *txq;
+
+	if (bp->bnapi) {
+		for (i = 0; i < bp->tx_nr_rings; i++) {
+			bnapi = bp->bnapi[i];
+			txr = &bnapi->tx_ring;
+			txq = netdev_get_tx_queue(bp->dev, i);
+			__netif_tx_lock(txq, smp_processor_id());
+			txr->dev_state = BNXT_DEV_STATE_CLOSING;
+			__netif_tx_unlock(txq);
+		}
+	}
+	/* Stop all TX queues */
+	netif_tx_disable(bp->dev);
+	netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct netdev_queue *txq;
+
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		bnapi = bp->bnapi[i];
+		txr = &bnapi->tx_ring;
+		txq = netdev_get_tx_queue(bp->dev, i);
+		txr->dev_state = 0;
+	}
+	netif_tx_wake_all_queues(bp->dev);
+	if (bp->link_info.link_up)
+		netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+	if (bp->link_info.link_up) {
+		const char *duplex;
+		const char *flow_ctrl;
+		u16 speed;
+
+		netif_carrier_on(bp->dev);
+		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+			duplex = "full";
+		else
+			duplex = "half";
+		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+			flow_ctrl = "ON - receive & transmit";
+		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+			flow_ctrl = "ON - transmit";
+		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+			flow_ctrl = "ON - receive";
+		else
+			flow_ctrl = "none";
+		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+			    speed, duplex, flow_ctrl);
+	} else {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC Link is Down\n");
+	}
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+	int rc = 0;
+	struct bnxt_link_info *link_info = &bp->link_info;
+	struct hwrm_port_phy_qcfg_input req = {0};
+	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	u8 link_up = link_info->link_up;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		return rc;
+	}
+
+	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+	link_info->phy_link_status = resp->link;
+	link_info->duplex =  resp->duplex;
+	link_info->pause = resp->pause;
+	link_info->auto_mode = resp->auto_mode;
+	link_info->auto_pause_setting = resp->auto_pause;
+	link_info->force_pause_setting = resp->force_pause;
+	link_info->duplex_setting = resp->duplex_setting;
+	if (link_info->phy_link_status == BNXT_LINK_LINK)
+		link_info->link_speed = le16_to_cpu(resp->link_speed);
+	else
+		link_info->link_speed = 0;
+	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+	link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+	link_info->phy_ver[0] = resp->phy_maj;
+	link_info->phy_ver[1] = resp->phy_min;
+	link_info->phy_ver[2] = resp->phy_bld;
+	link_info->media_type = resp->media_type;
+	link_info->transceiver = resp->transceiver_type;
+	link_info->phy_addr = resp->phy_addr;
+
+	/* TODO: need to add more logic to report VF link */
+	if (chng_link_state) {
+		if (link_info->phy_link_status == BNXT_LINK_LINK)
+			link_info->link_up = 1;
+		else
+			link_info->link_up = 0;
+		if (link_up != link_info->link_up)
+			bnxt_report_link(bp);
+	} else {
+		/* alwasy link down if not require to update link state */
+		link_info->link_up = 0;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+		req->enables |=
+			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+	} else {
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+		req->enables |=
+			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+	}
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+				      struct hwrm_port_phy_cfg_input *req)
+{
+	u8 autoneg = bp->link_info.autoneg;
+	u16 fw_link_speed = bp->link_info.req_link_speed;
+	u32 advertising = bp->link_info.advertising;
+
+	if (autoneg & BNXT_AUTONEG_SPEED) {
+		req->auto_mode |=
+			PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+		req->enables |= cpu_to_le32(
+			PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+		req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+		req->flags |=
+			cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+	} else {
+		req->force_link_speed = cpu_to_le16(fw_link_speed);
+		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+	}
+
+	/* currently don't support half duplex */
+	req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+	req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+	/* tell chimp that the setting takes effect immediately */
+	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+	struct hwrm_port_phy_cfg_input req = {0};
+	int rc;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+	bnxt_hwrm_set_pause_common(bp, &req);
+
+	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+	    bp->link_info.force_link_chng)
+		bnxt_hwrm_set_link_common(bp, &req);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+		/* since changing of pause setting doesn't trigger any link
+		 * change event, the driver needs to update the current pause
+		 * result upon successfully return of the phy_cfg command
+		 */
+		bp->link_info.pause =
+		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+		bp->link_info.auto_pause_setting = 0;
+		if (!bp->link_info.force_link_chng)
+			bnxt_report_link(bp);
+	}
+	bp->link_info.force_link_chng = false;
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+	struct hwrm_port_phy_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+	if (set_pause)
+		bnxt_hwrm_set_pause_common(bp, &req);
+
+	bnxt_hwrm_set_link_common(bp, &req);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+	int rc;
+	bool update_link = false;
+	bool update_pause = false;
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	rc = bnxt_update_link(bp, true);
+	if (rc) {
+		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+	    link_info->auto_pause_setting != link_info->req_flow_ctrl)
+		update_pause = true;
+	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+	    link_info->force_pause_setting != link_info->req_flow_ctrl)
+		update_pause = true;
+	if (link_info->req_duplex != link_info->duplex_setting)
+		update_link = true;
+	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+		if (BNXT_AUTO_MODE(link_info->auto_mode))
+			update_link = true;
+		if (link_info->req_link_speed != link_info->force_link_speed)
+			update_link = true;
+	} else {
+		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+			update_link = true;
+		if (link_info->advertising != link_info->auto_link_speeds)
+			update_link = true;
+		if (link_info->req_link_speed != link_info->auto_link_speed)
+			update_link = true;
+	}
+
+	if (update_link)
+		rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+	else if (update_pause)
+		rc = bnxt_hwrm_set_pause(bp);
+	if (rc) {
+		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* Common routine to pre-map certain register block to different GRC window.
+ * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
+ * in PF and 3 windows in VF that can be customized to map in different
+ * register blocks.
+ */
+static void bnxt_preset_reg_win(struct bnxt *bp)
+{
+	if (BNXT_PF(bp)) {
+		/* CAG registers map to GRC window #4 */
+		writel(BNXT_CAG_REG_BASE,
+		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
+	}
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+	bnxt_preset_reg_win(bp);
+	netif_carrier_off(bp->dev);
+	if (irq_re_init) {
+		rc = bnxt_setup_int_mode(bp);
+		if (rc) {
+			netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+				   rc);
+			return rc;
+		}
+	}
+	if ((bp->flags & BNXT_FLAG_RFS) &&
+	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+		/* disable RFS if falling back to INTA */
+		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+		bp->flags &= ~BNXT_FLAG_RFS;
+	}
+
+	rc = bnxt_alloc_mem(bp, irq_re_init);
+	if (rc) {
+		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+		goto open_err_free_mem;
+	}
+
+	if (irq_re_init) {
+		bnxt_init_napi(bp);
+		rc = bnxt_request_irq(bp);
+		if (rc) {
+			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+			goto open_err;
+		}
+	}
+
+	bnxt_enable_napi(bp);
+
+	rc = bnxt_init_nic(bp, irq_re_init);
+	if (rc) {
+		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+		goto open_err;
+	}
+
+	if (link_re_init) {
+		rc = bnxt_update_phy_setting(bp);
+		if (rc)
+			goto open_err;
+	}
+
+	if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+		vxlan_get_rx_port(bp->dev);
+#endif
+		if (!bnxt_hwrm_tunnel_dst_port_alloc(
+				bp, htons(0x17c1),
+				TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+			bp->nge_port_cnt = 1;
+	}
+
+	set_bit(BNXT_STATE_OPEN, &bp->state);
+	bnxt_enable_int(bp);
+	/* Enable TX queues */
+	bnxt_tx_enable(bp);
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+	return 0;
+
+open_err:
+	bnxt_disable_napi(bp);
+	bnxt_del_napi(bp);
+
+open_err_free_mem:
+	bnxt_free_skbs(bp);
+	bnxt_free_irq(bp);
+	bnxt_free_mem(bp, true);
+	return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+	rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+	if (rc) {
+		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+		dev_close(bp->dev);
+	}
+	return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	rc = bnxt_hwrm_func_reset(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+			   rc);
+		rc = -1;
+		return rc;
+	}
+	return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+	int i;
+
+	atomic_inc(&bp->intr_sem);
+	if (!netif_running(bp->dev))
+		return;
+
+	bnxt_disable_int(bp);
+	for (i = 0; i < bp->cp_nr_rings; i++)
+		synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+	if (bp->sriov_cfg) {
+		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+						      !bp->sriov_cfg,
+						      BNXT_SRIOV_CFG_WAIT_TMO);
+		if (rc)
+			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+	}
+#endif
+	/* Change device state to avoid TX queue wake up's */
+	bnxt_tx_disable(bp);
+
+	clear_bit(BNXT_STATE_OPEN, &bp->state);
+	smp_mb__after_atomic();
+	while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
+		msleep(20);
+
+	/* Flush rings before disabling interrupts */
+	bnxt_shutdown_nic(bp, irq_re_init);
+
+	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+	bnxt_disable_napi(bp);
+	bnxt_disable_int_sync(bp);
+	del_timer_sync(&bp->timer);
+	bnxt_free_skbs(bp);
+
+	if (irq_re_init) {
+		bnxt_free_irq(bp);
+		bnxt_del_napi(bp);
+	}
+	bnxt_free_mem(bp, irq_re_init);
+	return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	bnxt_close_nic(bp, true, true);
+	return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		/* fallthru */
+	case SIOCGMIIREG: {
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		return 0;
+	}
+
+	case SIOCSMIIREG:
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		return 0;
+
+	default:
+		/* do nothing */
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	u32 i;
+	struct bnxt *bp = netdev_priv(dev);
+
+	memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+	if (!bp->bnapi)
+		return stats;
+
+	/* TODO check if we need to synchronize with bnxt_close path */
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+		struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+		stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+		stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+		stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+		stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+		stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+		stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+		stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+		stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+		stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+		stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+		stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+		stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+		stats->rx_missed_errors +=
+			le64_to_cpu(hw_stats->rx_discard_pkts);
+
+		stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+		stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+		stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+	}
+
+	return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct netdev_hw_addr *ha;
+	u8 *haddr;
+	int mc_count = 0;
+	bool update = false;
+	int off = 0;
+
+	netdev_for_each_mc_addr(ha, dev) {
+		if (mc_count >= BNXT_MAX_MC_ADDRS) {
+			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+			vnic->mc_list_count = 0;
+			return false;
+		}
+		haddr = ha->addr;
+		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+			update = true;
+		}
+		off += ETH_ALEN;
+		mc_count++;
+	}
+	if (mc_count)
+		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+	if (mc_count != vnic->mc_list_count) {
+		vnic->mc_list_count = mc_count;
+		update = true;
+	}
+	return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct netdev_hw_addr *ha;
+	int off = 0;
+
+	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+		return true;
+
+	netdev_for_each_uc_addr(ha, dev) {
+		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+			return true;
+
+		off += ETH_ALEN;
+	}
+	return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	u32 mask = vnic->rx_mask;
+	bool mc_update = false;
+	bool uc_update;
+
+	if (!netif_running(dev))
+		return;
+
+	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+	/* Only allow PF to be in promiscuous mode */
+	if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+	uc_update = bnxt_uc_list_updated(bp);
+
+	if (dev->flags & IFF_ALLMULTI) {
+		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+		vnic->mc_list_count = 0;
+	} else {
+		mc_update = bnxt_mc_list_updated(bp, &mask);
+	}
+
+	if (mask != vnic->rx_mask || uc_update || mc_update) {
+		vnic->rx_mask = mask;
+
+		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+	}
+}
+
+static int bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct netdev_hw_addr *ha;
+	int i, off = 0, rc;
+	bool uc_update;
+
+	netif_addr_lock_bh(dev);
+	uc_update = bnxt_uc_list_updated(bp);
+	netif_addr_unlock_bh(dev);
+
+	if (!uc_update)
+		goto skip_uc;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 1; i < vnic->uc_filter_count; i++) {
+		struct hwrm_cfa_l2_filter_free_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+				       -1);
+
+		req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	vnic->uc_filter_count = 1;
+
+	netif_addr_lock_bh(dev);
+	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+	} else {
+		netdev_for_each_uc_addr(ha, dev) {
+			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+			off += ETH_ALEN;
+			vnic->uc_filter_count++;
+		}
+	}
+	netif_addr_unlock_bh(dev);
+
+	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+		if (rc) {
+			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+				   rc);
+			vnic->uc_filter_count = i;
+			return rc;
+		}
+	}
+
+skip_uc:
+	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	if (rc)
+		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+			   rc);
+
+	return rc;
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+					   netdev_features_t features)
+{
+	return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u32 flags = bp->flags;
+	u32 changes;
+	int rc = 0;
+	bool re_init = false;
+	bool update_tpa = false;
+
+	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+	if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+		flags |= BNXT_FLAG_GRO;
+	if (features & NETIF_F_LRO)
+		flags |= BNXT_FLAG_LRO;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		flags |= BNXT_FLAG_STRIP_VLAN;
+
+	if (features & NETIF_F_NTUPLE)
+		flags |= BNXT_FLAG_RFS;
+
+	changes = flags ^ bp->flags;
+	if (changes & BNXT_FLAG_TPA) {
+		update_tpa = true;
+		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+		    (flags & BNXT_FLAG_TPA) == 0)
+			re_init = true;
+	}
+
+	if (changes & ~BNXT_FLAG_TPA)
+		re_init = true;
+
+	if (flags != bp->flags) {
+		u32 old_flags = bp->flags;
+
+		bp->flags = flags;
+
+		if (!netif_running(dev)) {
+			if (update_tpa)
+				bnxt_set_ring_params(bp);
+			return rc;
+		}
+
+		if (re_init) {
+			bnxt_close_nic(bp, false, false);
+			if (update_tpa)
+				bnxt_set_ring_params(bp);
+
+			return bnxt_open_nic(bp, false, false);
+		}
+		if (update_tpa) {
+			rc = bnxt_set_tpa(bp,
+					  (flags & BNXT_FLAG_TPA) ?
+					  true : false);
+			if (rc)
+				bp->flags = old_flags;
+		}
+	}
+	return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_cp_ring_info *cpr;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		bnapi = bp->bnapi[i];
+		txr = &bnapi->tx_ring;
+		rxr = &bnapi->rx_ring;
+		cpr = &bnapi->cp_ring;
+		if (netif_msg_drv(bp)) {
+			netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+				    i, txr->tx_ring_struct.fw_ring_id,
+				    txr->tx_prod, txr->tx_cons);
+			netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+				    i, rxr->rx_ring_struct.fw_ring_id,
+				    rxr->rx_prod,
+				    rxr->rx_agg_ring_struct.fw_ring_id,
+				    rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+			netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+				    i, cpr->cp_ring_struct.fw_ring_id,
+				    cpr->cp_raw_cons);
+		}
+	}
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+	bnxt_dbg_dump_states(bp);
+	if (netif_running(bp->dev)) {
+		bnxt_close_nic(bp, false, false);
+		bnxt_open_nic(bp, false, false);
+	}
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
+	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+	schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+		disable_irq(irq->vector);
+		irq->handler(irq->vector, bp->bnapi[i]);
+		enable_irq(irq->vector);
+	}
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+	struct bnxt *bp = (struct bnxt *)data;
+	struct net_device *dev = bp->dev;
+
+	if (!netif_running(dev))
+		return;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+	int rc;
+
+	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+	smp_mb__after_atomic();
+	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+		return;
+	}
+
+	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+		bnxt_cfg_rx_mode(bp);
+
+	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+		bnxt_cfg_ntp_filters(bp);
+	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+		rc = bnxt_update_link(bp, true);
+		if (rc)
+			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+				   rc);
+	}
+	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+		bnxt_hwrm_exec_fwd_req(bp);
+	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+		bnxt_hwrm_tunnel_dst_port_alloc(
+			bp, bp->vxlan_port,
+			TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+	}
+	if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+		bnxt_hwrm_tunnel_dst_port_free(
+			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+	}
+	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
+		/* bnxt_reset_task() calls bnxt_close_nic() which waits
+		 * for BNXT_STATE_IN_SP_TASK to clear.
+		 */
+		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+		rtnl_lock();
+		bnxt_reset_task(bp);
+		set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+		rtnl_unlock();
+	}
+
+	smp_mb__before_atomic();
+	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+	int rc;
+	struct bnxt *bp = netdev_priv(dev);
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	/* enable device (incl. PCI PM wakeup), and bus-mastering */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		goto init_err;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto init_err_disable;
+	}
+
+	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+		goto init_err_disable;
+	}
+
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+		goto init_err_disable;
+	}
+
+	pci_set_master(pdev);
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+
+	bp->bar0 = pci_ioremap_bar(pdev, 0);
+	if (!bp->bar0) {
+		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+
+	bp->bar1 = pci_ioremap_bar(pdev, 2);
+	if (!bp->bar1) {
+		dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+
+	bp->bar2 = pci_ioremap_bar(pdev, 4);
+	if (!bp->bar2) {
+		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+
+	INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+	spin_lock_init(&bp->ntp_fltr_lock);
+
+	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+	bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+	bp->coal_bufs = 20;
+	bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+	bp->coal_bufs_irq = 2;
+
+	init_timer(&bp->timer);
+	bp->timer.data = (unsigned long)bp;
+	bp->timer.function = bnxt_timer;
+	bp->current_interval = BNXT_TIMER_INTERVAL;
+
+	clear_bit(BNXT_STATE_OPEN, &bp->state);
+
+	return 0;
+
+init_err_release:
+	if (bp->bar2) {
+		pci_iounmap(pdev, bp->bar2);
+		bp->bar2 = NULL;
+	}
+
+	if (bp->bar1) {
+		pci_iounmap(pdev, bp->bar1);
+		bp->bar1 = NULL;
+	}
+
+	if (bp->bar0) {
+		pci_iounmap(pdev, bp->bar0);
+		bp->bar0 = NULL;
+	}
+
+	pci_release_regions(pdev);
+
+init_err_disable:
+	pci_disable_device(pdev);
+
+init_err:
+	return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+#ifdef CONFIG_BNXT_SRIOV
+	if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
+		return -EADDRNOTAVAIL;
+#endif
+
+	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+		return 0;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev)) {
+		bnxt_close_nic(bp, false, false);
+		rc = bnxt_open_nic(bp, false, false);
+	}
+
+	return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (new_mtu < 60 || new_mtu > 9000)
+		return -EINVAL;
+
+	if (netif_running(dev))
+		bnxt_close_nic(bp, false, false);
+
+	dev->mtu = new_mtu;
+	bnxt_set_ring_params(bp);
+
+	if (netif_running(dev))
+		return bnxt_open_nic(bp, false, false);
+
+	return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (tc > bp->max_tc) {
+		netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+			   tc, bp->max_tc);
+		return -EINVAL;
+	}
+
+	if (netdev_get_num_tc(dev) == tc)
+		return 0;
+
+	if (tc) {
+		int max_rx_rings, max_tx_rings;
+
+		bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+		if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+			return -ENOMEM;
+	}
+
+	/* Needs to close the device and do hw resource re-allocations */
+	if (netif_running(bp->dev))
+		bnxt_close_nic(bp, true, false);
+
+	if (tc) {
+		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+		netdev_set_num_tc(dev, tc);
+	} else {
+		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+		netdev_reset_tc(dev);
+	}
+	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+
+	if (netif_running(bp->dev))
+		return bnxt_open_nic(bp, true, false);
+
+	return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+			    struct bnxt_ntuple_filter *f2)
+{
+	struct flow_keys *keys1 = &f1->fkeys;
+	struct flow_keys *keys2 = &f2->fkeys;
+
+	if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+	    keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+	    keys1->ports.ports == keys2->ports.ports &&
+	    keys1->basic.ip_proto == keys2->basic.ip_proto &&
+	    keys1->basic.n_proto == keys2->basic.n_proto &&
+	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+		return true;
+
+	return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+			      u16 rxq_index, u32 flow_id)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_ntuple_filter *fltr, *new_fltr;
+	struct flow_keys *fkeys;
+	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+	int rc = 0, idx, bit_id;
+	struct hlist_head *head;
+
+	if (skb->encapsulation)
+		return -EPROTONOSUPPORT;
+
+	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+	if (!new_fltr)
+		return -ENOMEM;
+
+	fkeys = &new_fltr->fkeys;
+	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+		rc = -EPROTONOSUPPORT;
+		goto err_free;
+	}
+
+	if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+		rc = -EPROTONOSUPPORT;
+		goto err_free;
+	}
+
+	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+	head = &bp->ntp_fltr_hash_tbl[idx];
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(fltr, head, hash) {
+		if (bnxt_fltr_match(fltr, new_fltr)) {
+			rcu_read_unlock();
+			rc = 0;
+			goto err_free;
+		}
+	}
+	rcu_read_unlock();
+
+	spin_lock_bh(&bp->ntp_fltr_lock);
+	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+					 BNXT_NTP_FLTR_MAX_FLTR, 0);
+	if (bit_id < 0) {
+		spin_unlock_bh(&bp->ntp_fltr_lock);
+		rc = -ENOMEM;
+		goto err_free;
+	}
+
+	new_fltr->sw_id = (u16)bit_id;
+	new_fltr->flow_id = flow_id;
+	new_fltr->rxq = rxq_index;
+	hlist_add_head_rcu(&new_fltr->hash, head);
+	bp->ntp_fltr_count++;
+	spin_unlock_bh(&bp->ntp_fltr_lock);
+
+	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+	schedule_work(&bp->sp_task);
+
+	return new_fltr->sw_id;
+
+err_free:
+	kfree(new_fltr);
+	return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+		struct hlist_node *tmp;
+		struct bnxt_ntuple_filter *fltr;
+		int rc;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+			bool del = false;
+
+			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+				if (rps_may_expire_flow(bp->dev, fltr->rxq,
+							fltr->flow_id,
+							fltr->sw_id)) {
+					bnxt_hwrm_cfa_ntuple_filter_free(bp,
+									 fltr);
+					del = true;
+				}
+			} else {
+				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+								       fltr);
+				if (rc)
+					del = true;
+				else
+					set_bit(BNXT_FLTR_VALID, &fltr->state);
+			}
+
+			if (del) {
+				spin_lock_bh(&bp->ntp_fltr_lock);
+				hlist_del_rcu(&fltr->hash);
+				bp->ntp_fltr_count--;
+				spin_unlock_bh(&bp->ntp_fltr_lock);
+				synchronize_rcu();
+				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+				kfree(fltr);
+			}
+		}
+	}
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+				__be16 port)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return;
+
+	if (sa_family != AF_INET6 && sa_family != AF_INET)
+		return;
+
+	if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+		return;
+
+	bp->vxlan_port_cnt++;
+	if (bp->vxlan_port_cnt == 1) {
+		bp->vxlan_port = port;
+		set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+	}
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+				__be16 port)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return;
+
+	if (sa_family != AF_INET6 && sa_family != AF_INET)
+		return;
+
+	if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+		bp->vxlan_port_cnt--;
+
+		if (bp->vxlan_port_cnt == 0) {
+			set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+			schedule_work(&bp->sp_task);
+		}
+	}
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+	.ndo_open		= bnxt_open,
+	.ndo_start_xmit		= bnxt_start_xmit,
+	.ndo_stop		= bnxt_close,
+	.ndo_get_stats64	= bnxt_get_stats64,
+	.ndo_set_rx_mode	= bnxt_set_rx_mode,
+	.ndo_do_ioctl		= bnxt_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= bnxt_change_mac_addr,
+	.ndo_change_mtu		= bnxt_change_mtu,
+	.ndo_fix_features	= bnxt_fix_features,
+	.ndo_set_features	= bnxt_set_features,
+	.ndo_tx_timeout		= bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+	.ndo_get_vf_config	= bnxt_get_vf_config,
+	.ndo_set_vf_mac		= bnxt_set_vf_mac,
+	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
+	.ndo_set_vf_rate	= bnxt_set_vf_bw,
+	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
+	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bnxt_poll_controller,
+#endif
+	.ndo_setup_tc           = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
+#endif
+	.ndo_add_vxlan_port	= bnxt_add_vxlan_port,
+	.ndo_del_vxlan_port	= bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (BNXT_PF(bp))
+		bnxt_sriov_disable(bp);
+
+	unregister_netdev(dev);
+	cancel_work_sync(&bp->sp_task);
+	bp->sp_event = 0;
+
+	bnxt_free_hwrm_resources(bp);
+	pci_iounmap(pdev, bp->bar2);
+	pci_iounmap(pdev, bp->bar1);
+	pci_iounmap(pdev, bp->bar0);
+	free_netdev(dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+	int rc = 0;
+	struct bnxt_link_info *link_info = &bp->link_info;
+	char phy_ver[PHY_VER_STR_LEN];
+
+	rc = bnxt_update_link(bp, false);
+	if (rc) {
+		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+
+	/*initialize the ethool setting copy with NVM settings */
+	if (BNXT_AUTO_MODE(link_info->auto_mode))
+		link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+		link_info->req_flow_ctrl = link_info->auto_pause_setting;
+	} else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		link_info->req_flow_ctrl = link_info->force_pause_setting;
+	}
+	link_info->req_duplex = link_info->duplex_setting;
+	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+		link_info->req_link_speed = link_info->auto_link_speed;
+	else
+		link_info->req_link_speed = link_info->force_link_speed;
+	link_info->advertising = link_info->auto_link_speeds;
+	snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+		 link_info->phy_ver[0],
+		 link_info->phy_ver[1],
+		 link_info->phy_ver[2]);
+	strcat(bp->fw_ver_str, phy_ver);
+	return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+	u16 ctrl;
+
+	if (!pdev->msix_cap)
+		return 1;
+
+	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+	int max_rings = 0;
+
+	if (BNXT_PF(bp)) {
+		*max_tx = bp->pf.max_pf_tx_rings;
+		*max_rx = bp->pf.max_pf_rx_rings;
+		max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+		max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+	} else {
+#ifdef CONFIG_BNXT_SRIOV
+		*max_tx = bp->vf.max_tx_rings;
+		*max_rx = bp->vf.max_rx_rings;
+		max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+		max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+#endif
+	}
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		*max_rx >>= 1;
+
+	*max_rx = min_t(int, *max_rx, max_rings);
+	*max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int version_printed;
+	struct net_device *dev;
+	struct bnxt *bp;
+	int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+	if (version_printed++ == 0)
+		pr_info("%s", version);
+
+	max_irqs = bnxt_get_max_irq(pdev);
+	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+	if (!dev)
+		return -ENOMEM;
+
+	bp = netdev_priv(dev);
+
+	if (bnxt_vf_pciid(ent->driver_data))
+		bp->flags |= BNXT_FLAG_VF;
+
+	if (pdev->msix_cap) {
+		bp->flags |= BNXT_FLAG_MSIX_CAP;
+		if (BNXT_PF(bp))
+			bp->flags |= BNXT_FLAG_RFS;
+	}
+
+	rc = bnxt_init_board(pdev, dev);
+	if (rc < 0)
+		goto init_err_free;
+
+	dev->netdev_ops = &bnxt_netdev_ops;
+	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+	dev->ethtool_ops = &bnxt_ethtool_ops;
+
+	pci_set_drvdata(pdev, dev);
+
+	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+			   NETIF_F_TSO | NETIF_F_TSO6 |
+			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+			   NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+			   NETIF_F_RXHASH |
+			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+	if (bp->flags & BNXT_FLAG_RFS)
+		dev->hw_features |= NETIF_F_NTUPLE;
+
+	dev->hw_enc_features =
+			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+			NETIF_F_TSO | NETIF_F_TSO6 |
+			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+			NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+	dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+	init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+	rc = bnxt_alloc_hwrm_resources(bp);
+	if (rc)
+		goto init_err;
+
+	mutex_init(&bp->hwrm_cmd_lock);
+	bnxt_hwrm_ver_get(bp);
+
+	rc = bnxt_hwrm_func_drv_rgtr(bp);
+	if (rc)
+		goto init_err;
+
+	/* Get the MAX capabilities for this function */
+	rc = bnxt_hwrm_func_qcaps(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+			   rc);
+		rc = -1;
+		goto init_err;
+	}
+
+	rc = bnxt_hwrm_queue_qportcfg(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+			   rc);
+		rc = -1;
+		goto init_err;
+	}
+
+	bnxt_set_tpa_flags(bp);
+	bnxt_set_ring_params(bp);
+	dflt_rings = netif_get_num_default_rss_queues();
+	if (BNXT_PF(bp))
+		bp->pf.max_irqs = max_irqs;
+#if defined(CONFIG_BNXT_SRIOV)
+	else
+		bp->vf.max_irqs = max_irqs;
+#endif
+	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+	bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+
+	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+		bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+	rc = bnxt_probe_phy(bp);
+	if (rc)
+		goto init_err;
+
+	rc = register_netdev(dev);
+	if (rc)
+		goto init_err;
+
+	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+		    board_info[ent->driver_data].name,
+		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+	return 0;
+
+init_err:
+	pci_iounmap(pdev, bp->bar0);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+init_err_free:
+	free_netdev(dev);
+	return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= bnxt_pci_tbl,
+	.probe		= bnxt_init_one,
+	.remove		= bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+	.sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644
index 0000000..f199f4c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -0,0 +1,1092 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME		"bnxt_en"
+#define DRV_MODULE_VERSION	"0.1.24"
+
+#define DRV_VER_MAJ	0
+#define DRV_VER_MIN	1
+#define DRV_VER_UPD	24
+
+struct tx_bd {
+	__le32 tx_bd_len_flags_type;
+	#define TX_BD_TYPE					(0x3f << 0)
+	 #define TX_BD_TYPE_SHORT_TX_BD				 (0x00 << 0)
+	 #define TX_BD_TYPE_LONG_TX_BD				 (0x10 << 0)
+	#define TX_BD_FLAGS_PACKET_END				(1 << 6)
+	#define TX_BD_FLAGS_NO_CMPL				(1 << 7)
+	#define TX_BD_FLAGS_BD_CNT				(0x1f << 8)
+	 #define TX_BD_FLAGS_BD_CNT_SHIFT			 8
+	#define TX_BD_FLAGS_LHINT				(3 << 13)
+	 #define TX_BD_FLAGS_LHINT_SHIFT			 13
+	 #define TX_BD_FLAGS_LHINT_512_AND_SMALLER		 (0 << 13)
+	 #define TX_BD_FLAGS_LHINT_512_TO_1023			 (1 << 13)
+	 #define TX_BD_FLAGS_LHINT_1024_TO_2047			 (2 << 13)
+	 #define TX_BD_FLAGS_LHINT_2048_AND_LARGER		 (3 << 13)
+	#define TX_BD_FLAGS_COAL_NOW				(1 << 15)
+	#define TX_BD_LEN					(0xffff << 16)
+	 #define TX_BD_LEN_SHIFT				 16
+
+	u32 tx_bd_opaque;
+	__le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+	__le32 tx_bd_hsize_lflags;
+	#define TX_BD_FLAGS_TCP_UDP_CHKSUM			(1 << 0)
+	#define TX_BD_FLAGS_IP_CKSUM				(1 << 1)
+	#define TX_BD_FLAGS_NO_CRC				(1 << 2)
+	#define TX_BD_FLAGS_STAMP				(1 << 3)
+	#define TX_BD_FLAGS_T_IP_CHKSUM				(1 << 4)
+	#define TX_BD_FLAGS_LSO					(1 << 5)
+	#define TX_BD_FLAGS_IPID_FMT				(1 << 6)
+	#define TX_BD_FLAGS_T_IPID				(1 << 7)
+	#define TX_BD_HSIZE					(0xff << 16)
+	 #define TX_BD_HSIZE_SHIFT				 16
+
+	__le32 tx_bd_mss;
+	__le32 tx_bd_cfa_action;
+	#define TX_BD_CFA_ACTION				(0xffff << 16)
+	 #define TX_BD_CFA_ACTION_SHIFT				 16
+
+	__le32 tx_bd_cfa_meta;
+	#define TX_BD_CFA_META_MASK                             0xfffffff
+	#define TX_BD_CFA_META_VID_MASK                         0xfff
+	#define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
+	 #define TX_BD_CFA_META_PRI_SHIFT                        12
+	#define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
+	 #define TX_BD_CFA_META_TPID_SHIFT                       16
+	#define TX_BD_CFA_META_KEY                              (0xf << 28)
+	 #define TX_BD_CFA_META_KEY_SHIFT			 28
+	#define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
+};
+
+struct rx_bd {
+	__le32 rx_bd_len_flags_type;
+	#define RX_BD_TYPE					(0x3f << 0)
+	 #define RX_BD_TYPE_RX_PACKET_BD			 0x4
+	 #define RX_BD_TYPE_RX_BUFFER_BD			 0x5
+	 #define RX_BD_TYPE_RX_AGG_BD				 0x6
+	 #define RX_BD_TYPE_16B_BD_SIZE				 (0 << 4)
+	 #define RX_BD_TYPE_32B_BD_SIZE				 (1 << 4)
+	 #define RX_BD_TYPE_48B_BD_SIZE				 (2 << 4)
+	 #define RX_BD_TYPE_64B_BD_SIZE				 (3 << 4)
+	#define RX_BD_FLAGS_SOP					(1 << 6)
+	#define RX_BD_FLAGS_EOP					(1 << 7)
+	#define RX_BD_FLAGS_BUFFERS				(3 << 8)
+	 #define RX_BD_FLAGS_1_BUFFER_PACKET			 (0 << 8)
+	 #define RX_BD_FLAGS_2_BUFFER_PACKET			 (1 << 8)
+	 #define RX_BD_FLAGS_3_BUFFER_PACKET			 (2 << 8)
+	 #define RX_BD_FLAGS_4_BUFFER_PACKET			 (3 << 8)
+	#define RX_BD_LEN					(0xffff << 16)
+	 #define RX_BD_LEN_SHIFT				 16
+
+	u32 rx_bd_opaque;
+	__le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+	__le32 tx_cmp_flags_type;
+	#define CMP_TYPE					(0x3f << 0)
+	 #define CMP_TYPE_TX_L2_CMP				 0
+	 #define CMP_TYPE_RX_L2_CMP				 17
+	 #define CMP_TYPE_RX_AGG_CMP				 18
+	 #define CMP_TYPE_RX_L2_TPA_START_CMP			 19
+	 #define CMP_TYPE_RX_L2_TPA_END_CMP			 21
+	 #define CMP_TYPE_STATUS_CMP				 32
+	 #define CMP_TYPE_REMOTE_DRIVER_REQ			 34
+	 #define CMP_TYPE_REMOTE_DRIVER_RESP			 36
+	 #define CMP_TYPE_ERROR_STATUS				 48
+	 #define CMPL_BASE_TYPE_STAT_EJECT			 (0x1aUL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_DONE			 (0x20UL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_FWD_REQ			 (0x22UL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_FWD_RESP			 (0x24UL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT		 (0x2eUL << 0)
+
+	#define TX_CMP_FLAGS_ERROR				(1 << 6)
+	#define TX_CMP_FLAGS_PUSH				(1 << 7)
+
+	u32 tx_cmp_opaque;
+	__le32 tx_cmp_errors_v;
+	#define TX_CMP_V					(1 << 0)
+	#define TX_CMP_ERRORS_BUFFER_ERROR			(7 << 1)
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR		 0
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT		 2
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG	 4
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS		 5
+	 #define TX_CMP_ERRORS_ZERO_LENGTH_PKT			 (1 << 4)
+	 #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN			 (1 << 5)
+	 #define TX_CMP_ERRORS_DMA_ERROR			 (1 << 6)
+	 #define TX_CMP_ERRORS_HINT_TOO_SHORT			 (1 << 7)
+
+	__le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+	__le32 rx_cmp_len_flags_type;
+	#define RX_CMP_CMP_TYPE					(0x3f << 0)
+	#define RX_CMP_FLAGS_ERROR				(1 << 6)
+	#define RX_CMP_FLAGS_PLACEMENT				(7 << 7)
+	#define RX_CMP_FLAGS_RSS_VALID				(1 << 10)
+	#define RX_CMP_FLAGS_UNUSED				(1 << 11)
+	 #define RX_CMP_FLAGS_ITYPES_SHIFT			 12
+	 #define RX_CMP_FLAGS_ITYPE_UNKNOWN			 (0 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_IP				 (1 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_TCP				 (2 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_UDP				 (3 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_FCOE			 (4 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_ROCE			 (5 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS			 (8 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_PTP_W_TS			 (9 << 12)
+	#define RX_CMP_LEN					(0xffff << 16)
+	 #define RX_CMP_LEN_SHIFT				 16
+
+	u32 rx_cmp_opaque;
+	__le32 rx_cmp_misc_v1;
+	#define RX_CMP_V1					(1 << 0)
+	#define RX_CMP_AGG_BUFS					(0x1f << 1)
+	 #define RX_CMP_AGG_BUFS_SHIFT				 1
+	#define RX_CMP_RSS_HASH_TYPE				(0x7f << 9)
+	 #define RX_CMP_RSS_HASH_TYPE_SHIFT			 9
+	#define RX_CMP_PAYLOAD_OFFSET				(0xff << 16)
+	 #define RX_CMP_PAYLOAD_OFFSET_SHIFT			 16
+
+	__le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp)				\
+	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RSS_PROFILE_ID_MASK	0x1f
+
+#define RX_CMP_HASH_TYPE(rxcmp)					\
+	(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+	  RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+struct rx_cmp_ext {
+	__le32 rx_cmp_flags2;
+	#define RX_CMP_FLAGS2_IP_CS_CALC			0x1
+	#define RX_CMP_FLAGS2_L4_CS_CALC			(0x1 << 1)
+	#define RX_CMP_FLAGS2_T_IP_CS_CALC			(0x1 << 2)
+	#define RX_CMP_FLAGS2_T_L4_CS_CALC			(0x1 << 3)
+	#define RX_CMP_FLAGS2_META_FORMAT_VLAN			(0x1 << 4)
+	__le32 rx_cmp_meta_data;
+	#define RX_CMP_FLAGS2_METADATA_VID_MASK			0xfff
+	#define RX_CMP_FLAGS2_METADATA_TPID_MASK		0xffff0000
+	 #define RX_CMP_FLAGS2_METADATA_TPID_SFT		 16
+	__le32 rx_cmp_cfa_code_errors_v2;
+	#define RX_CMP_V					(1 << 0)
+	#define RX_CMPL_ERRORS_MASK				(0x7fff << 1)
+	 #define RX_CMPL_ERRORS_SFT				 1
+	#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK		(0x7 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER		 (0x0 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT	 (0x1 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT		 (0x3 << 1)
+	#define RX_CMPL_ERRORS_IP_CS_ERROR			(0x1 << 4)
+	#define RX_CMPL_ERRORS_L4_CS_ERROR			(0x1 << 5)
+	#define RX_CMPL_ERRORS_T_IP_CS_ERROR			(0x1 << 6)
+	#define RX_CMPL_ERRORS_T_L4_CS_ERROR			(0x1 << 7)
+	#define RX_CMPL_ERRORS_CRC_ERROR			(0x1 << 8)
+	#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK			(0x7 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR		 (0x0 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION	 (0x1 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN	 (0x2 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR	 (0x3 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR	 (0x4 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR	 (0x5 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL	 (0x6 << 9)
+	#define RX_CMPL_ERRORS_PKT_ERROR_MASK			(0xf << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR		 (0x0 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION	 (0x1 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN	 (0x2 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL		 (0x3 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR	 (0x4 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR	 (0x5 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN	 (0x6 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN	 (0x8 << 12)
+
+	#define RX_CMPL_CFA_CODE_MASK				(0xffff << 16)
+	 #define RX_CMPL_CFA_CODE_SFT				 16
+
+	__le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS						\
+	cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS						\
+	(cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS						\
+	(cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1)						\
+	    (((rxcmp1)->rx_cmp_flags2 &	RX_CMP_L4_CS_BITS) &&		\
+	     !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1)						\
+	    ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &			\
+	     RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+	__le32 rx_agg_cmp_len_flags_type;
+	#define RX_AGG_CMP_TYPE					(0x3f << 0)
+	#define RX_AGG_CMP_LEN					(0xffff << 16)
+	 #define RX_AGG_CMP_LEN_SHIFT				 16
+	u32 rx_agg_cmp_opaque;
+	__le32 rx_agg_cmp_v;
+	#define RX_AGG_CMP_V					(1 << 0)
+	__le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+	__le32 rx_tpa_start_cmp_len_flags_type;
+	#define RX_TPA_START_CMP_TYPE				(0x3f << 0)
+	#define RX_TPA_START_CMP_FLAGS				(0x3ff << 6)
+	 #define RX_TPA_START_CMP_FLAGS_SHIFT			 6
+	#define RX_TPA_START_CMP_FLAGS_PLACEMENT		(0x7 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT		 7
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS	 (0x6 << 7)
+	#define RX_TPA_START_CMP_FLAGS_RSS_VALID		(0x1 << 10)
+	#define RX_TPA_START_CMP_FLAGS_ITYPES			(0xf << 12)
+	 #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT		 12
+	 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP		 (0x2 << 12)
+	#define RX_TPA_START_CMP_LEN				(0xffff << 16)
+	 #define RX_TPA_START_CMP_LEN_SHIFT			 16
+
+	u32 rx_tpa_start_cmp_opaque;
+	__le32 rx_tpa_start_cmp_misc_v1;
+	#define RX_TPA_START_CMP_V1				(0x1 << 0)
+	#define RX_TPA_START_CMP_RSS_HASH_TYPE			(0x7f << 9)
+	 #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT		 9
+	#define RX_TPA_START_CMP_AGG_ID				(0x7f << 25)
+	 #define RX_TPA_START_CMP_AGG_ID_SHIFT			 25
+
+	__le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start)				\
+	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
+	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start)				\
+	(((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
+	   RX_TPA_START_CMP_RSS_HASH_TYPE) >>				\
+	  RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_AGG_ID(rx_tpa_start)					\
+	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
+	 RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+	__le32 rx_tpa_start_cmp_flags2;
+	#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC		(0x1 << 0)
+	#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC		(0x1 << 1)
+	#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC		(0x1 << 2)
+	#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC		(0x1 << 3)
+
+	__le32 rx_tpa_start_cmp_metadata;
+	__le32 rx_tpa_start_cmp_cfa_code_v2;
+	#define RX_TPA_START_CMP_V2				(0x1 << 0)
+	#define RX_TPA_START_CMP_CFA_CODE			(0xffff << 16)
+	 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT		 16
+	__le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+	__le32 rx_tpa_end_cmp_len_flags_type;
+	#define RX_TPA_END_CMP_TYPE				(0x3f << 0)
+	#define RX_TPA_END_CMP_FLAGS				(0x3ff << 6)
+	 #define RX_TPA_END_CMP_FLAGS_SHIFT			 6
+	#define RX_TPA_END_CMP_FLAGS_PLACEMENT			(0x7 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT		 7
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS		 (0x6 << 7)
+	#define RX_TPA_END_CMP_FLAGS_RSS_VALID			(0x1 << 10)
+	#define RX_TPA_END_CMP_FLAGS_ITYPES			(0xf << 12)
+	 #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT		 12
+	 #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP			 (0x2 << 12)
+	#define RX_TPA_END_CMP_LEN				(0xffff << 16)
+	 #define RX_TPA_END_CMP_LEN_SHIFT			 16
+
+	u32 rx_tpa_end_cmp_opaque;
+	__le32 rx_tpa_end_cmp_misc_v1;
+	#define RX_TPA_END_CMP_V1				(0x1 << 0)
+	#define RX_TPA_END_CMP_AGG_BUFS				(0x3f << 1)
+	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT			 1
+	#define RX_TPA_END_CMP_TPA_SEGS				(0xff << 8)
+	 #define RX_TPA_END_CMP_TPA_SEGS_SHIFT			 8
+	#define RX_TPA_END_CMP_PAYLOAD_OFFSET			(0xff << 16)
+	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT		 16
+	#define RX_TPA_END_CMP_AGG_ID				(0x7f << 25)
+	 #define RX_TPA_END_CMP_AGG_ID_SHIFT			 25
+
+	__le32 rx_tpa_end_cmp_tsdelta;
+	#define RX_TPA_END_GRO_TS				(0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end)					\
+	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
+	 RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end)					\
+	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
+	 RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO				\
+	cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &		\
+		    RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end)						\
+	((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &			\
+	 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end)					\
+	((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+	__le32 rx_tpa_end_cmp_dup_acks;
+	#define RX_TPA_END_CMP_TPA_DUP_ACKS			(0xf << 0)
+
+	__le32 rx_tpa_end_cmp_seg_len;
+	#define RX_TPA_END_CMP_TPA_SEG_LEN			(0xffff << 0)
+
+	__le32 rx_tpa_end_cmp_errors_v2;
+	#define RX_TPA_END_CMP_V2				(0x1 << 0)
+	#define RX_TPA_END_CMP_ERRORS				(0x7fff << 1)
+	#define RX_TPA_END_CMPL_ERRORS_SHIFT			 1
+
+	u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK						0xffffff
+#define DB_IDX_VALID						(0x1 << 26)
+#define DB_IRQ_DIS						(0x1 << 27)
+#define DB_KEY_TX						(0x0 << 28)
+#define DB_KEY_RX						(0x1 << 28)
+#define DB_KEY_CP						(0x2 << 28)
+#define DB_KEY_ST						(0x3 << 28)
+#define DB_KEY_TX_PUSH						(0x4 << 28)
+#define DB_LONG_TX_PUSH						(0x2 << 24)
+
+#define INVALID_HW_RING_ID	((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4		0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4	0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6		0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6	0x08
+
+/* The hardware supports certain page sizes.  Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT	12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT	PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT	13
+#else
+#define BNXT_PAGE_SHIFT	16
+#endif
+
+#define BNXT_PAGE_SIZE	(1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE	45
+
+#define BNXT_NUM_TESTS(bp)	0
+
+#define BNXT_DEFAULT_RX_RING_SIZE	1023
+#define BNXT_DEFAULT_TX_RING_SIZE	512
+
+#define MAX_TPA		64
+
+#define MAX_RX_PAGES	8
+#define MAX_RX_AGG_PAGES	32
+#define MAX_TX_PAGES	8
+#define MAX_CP_PAGES	64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT		(RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT	(RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT		(TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x)	(((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x)	((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x)	(((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x)	((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x)	(((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x)	((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons)					\
+	(!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==	\
+	 !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons)					\
+	(!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+	 !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons)				\
+	(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) ==	\
+	 !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp)					\
+	(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp)					\
+	(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx)		(((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx)	(((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx)		(((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n)	((idx) + (n))
+#define NEXT_RAW_CMP(idx)	ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx)		((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT		500
+#define HWRM_RESET_TIMEOUT		((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK		0xffff
+#define HWRM_RESP_LEN_MASK		0xffff0000
+#define HWRM_RESP_LEN_SFT		16
+#define HWRM_RESP_VALID_MASK		0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE		128
+#define BNXT_HWRM_REQS_PER_PAGE		(BNXT_PAGE_SIZE /	\
+					 BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+	struct sk_buff		*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	u8			is_gso;
+	u8			is_push;
+	unsigned short		nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+	u8			*data;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+	struct page		*page;
+	dma_addr_t		mapping;
+};
+
+struct bnxt_ring_struct {
+	int			nr_pages;
+	int			page_size;
+	void			**pg_arr;
+	dma_addr_t		*dma_arr;
+
+	__le64			*pg_tbl;
+	dma_addr_t		pg_tbl_map;
+
+	int			vmem_size;
+	void			**vmem;
+
+	u16			fw_ring_id; /* Ring id filled by Chimp FW */
+	u8			queue_id;
+};
+
+struct tx_push_bd {
+	__le32			doorbell;
+	struct tx_bd		txbd1;
+	struct tx_bd_ext	txbd2;
+};
+
+struct bnxt_tx_ring_info {
+	u16			tx_prod;
+	u16			tx_cons;
+	void __iomem		*tx_doorbell;
+
+	struct tx_bd		*tx_desc_ring[MAX_TX_PAGES];
+	struct bnxt_sw_tx_bd	*tx_buf_ring;
+
+	dma_addr_t		tx_desc_mapping[MAX_TX_PAGES];
+
+	struct tx_push_bd	*tx_push;
+	dma_addr_t		tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING	0x1
+	u32			dev_state;
+
+	struct bnxt_ring_struct	tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+	u8			*data;
+	dma_addr_t		mapping;
+	u16			len;
+	unsigned short		gso_type;
+	u32			flags2;
+	u32			metadata;
+	enum pkt_hash_types	hash_type;
+	u32			rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+	u16			rx_prod;
+	u16			rx_agg_prod;
+	u16			rx_sw_agg_prod;
+	void __iomem		*rx_doorbell;
+	void __iomem		*rx_agg_doorbell;
+
+	struct rx_bd		*rx_desc_ring[MAX_RX_PAGES];
+	struct bnxt_sw_rx_bd	*rx_buf_ring;
+
+	struct rx_bd		*rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+	struct bnxt_sw_rx_agg_bd	*rx_agg_ring;
+
+	unsigned long		*rx_agg_bmap;
+	u16			rx_agg_bmap_size;
+
+	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
+	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+	struct bnxt_tpa_info	*rx_tpa;
+
+	struct bnxt_ring_struct	rx_ring_struct;
+	struct bnxt_ring_struct	rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+	u32			cp_raw_cons;
+	void __iomem		*cp_doorbell;
+
+	struct tx_cmp		*cp_desc_ring[MAX_CP_PAGES];
+
+	dma_addr_t		cp_desc_mapping[MAX_CP_PAGES];
+
+	struct ctx_hw_stats	*hw_stats;
+	dma_addr_t		hw_stats_map;
+	u32			hw_stats_ctx_id;
+	u64			rx_l4_csum_errors;
+
+	struct bnxt_ring_struct	cp_ring_struct;
+};
+
+struct bnxt_napi {
+	struct napi_struct	napi;
+	struct bnxt		*bp;
+
+	int			index;
+	struct bnxt_cp_ring_info	cp_ring;
+	struct bnxt_rx_ring_info	rx_ring;
+	struct bnxt_tx_ring_info	tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	atomic_t		poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+	BNXT_STATE_IDLE = 0,
+	BNXT_STATE_NAPI,
+	BNXT_STATE_POLL,
+	BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+	irq_handler_t	handler;
+	unsigned int	vector;
+	u8		requested;
+	char		name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX	0x1
+#define HWRM_RING_ALLOC_RX	0x2
+#define HWRM_RING_ALLOC_AGG	0x4
+#define HWRM_RING_ALLOC_CMPL	0x8
+
+#define INVALID_STATS_CTX_ID	-1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK	0xffff0000
+#define HWRM_CMPL_RING_SFT	16
+	__le32	cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK	0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET	4
+#define HWRM_TARGET_FID_MASK	0xffff0000
+#define HWRM_TARGET_FID_SFT	16
+	__le32	target_id_seq_id;
+	__le64	resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+	u16	fw_stats_ctx;
+	u16	fw_grp_id;
+	u16	rx_fw_ring_id;
+	u16	agg_fw_ring_id;
+	u16	cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+	u16		fw_vnic_id; /* returned by Chimp during alloc */
+	u16		fw_rss_cos_lb_ctx;
+	u16		fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS	4
+	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+				/* index 0 always dev_addr */
+	u16		uc_filter_count;
+	u8		*uc_list;
+
+	u16		*fw_grp_ids;
+	u16		hash_type;
+	dma_addr_t	rss_table_dma_addr;
+	__le16		*rss_table;
+	dma_addr_t	rss_hash_key_dma_addr;
+	u64		*rss_hash_key;
+	u32		rx_mask;
+
+	u8		*mc_list;
+	int		mc_list_size;
+	int		mc_list_count;
+	dma_addr_t	mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS	16
+
+	u32		flags;
+#define BNXT_VNIC_RSS_FLAG	1
+#define BNXT_VNIC_RFS_FLAG	2
+#define BNXT_VNIC_MCAST_FLAG	4
+#define BNXT_VNIC_UCAST_FLAG	8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+	u16	fw_fid;
+	u8	mac_addr[ETH_ALEN];
+	u16	max_rsscos_ctxs;
+	u16	max_cp_rings;
+	u16	max_tx_rings;
+	u16	max_rx_rings;
+	u16	max_l2_ctxs;
+	u16	max_irqs;
+	u16	max_vnics;
+	u16	max_stat_ctxs;
+	u16	vlan;
+	u32	flags;
+#define BNXT_VF_QOS		0x1
+#define BNXT_VF_SPOOFCHK	0x2
+#define BNXT_VF_LINK_FORCED	0x4
+#define BNXT_VF_LINK_UP		0x8
+	u32	func_flags; /* func cfg flags */
+	u32	min_tx_rate;
+	u32	max_tx_rate;
+	void	*hwrm_cmd_req_addr;
+	dma_addr_t	hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID	1
+#define BNXT_FIRST_VF_FID	128
+	u32	fw_fid;
+	u8	port_id;
+	u8	mac_addr[ETH_ALEN];
+	u16	max_rsscos_ctxs;
+	u16	max_cp_rings;
+	u16	max_tx_rings; /* HW assigned max tx rings for this PF */
+	u16	max_pf_tx_rings; /* runtime max tx rings owned by PF */
+	u16	max_rx_rings; /* HW assigned max rx rings for this PF */
+	u16	max_pf_rx_rings; /* runtime max rx rings owned by PF */
+	u16	max_irqs;
+	u16	max_l2_ctxs;
+	u16	max_vnics;
+	u16	max_stat_ctxs;
+	u32	first_vf_id;
+	u16	active_vfs;
+	u16	max_vfs;
+	u32	max_encap_records;
+	u32	max_decap_records;
+	u32	max_tx_em_flows;
+	u32	max_tx_wm_flows;
+	u32	max_rx_em_flows;
+	u32	max_rx_wm_flows;
+	unsigned long	*vf_event_bmap;
+	u16	hwrm_cmd_req_pages;
+	void			*hwrm_cmd_req_addr[4];
+	dma_addr_t		hwrm_cmd_req_dma_addr[4];
+	struct bnxt_vf_info	*vf;
+};
+
+struct bnxt_ntuple_filter {
+	struct hlist_node	hash;
+	u8			src_mac_addr[ETH_ALEN];
+	struct flow_keys	fkeys;
+	__le64			filter_id;
+	u16			sw_id;
+	u16			rxq;
+	u32			flow_id;
+	unsigned long		state;
+#define BNXT_FLTR_VALID		0
+#define BNXT_FLTR_UPDATE	1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED				\
+	(ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full |	\
+	 ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+	u8			media_type;
+	u8			transceiver;
+	u8			phy_addr;
+	u8			phy_link_status;
+#define BNXT_LINK_NO_LINK	PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL	PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK		PORT_PHY_QCFG_RESP_LINK_LINK
+	u8			wire_speed;
+	u8			loop_back;
+	u8			link_up;
+	u8			duplex;
+#define BNXT_LINK_DUPLEX_HALF	PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL	PORT_PHY_QCFG_RESP_DUPLEX_FULL
+	u8			pause;
+#define BNXT_LINK_PAUSE_TX	PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX	PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH	(PORT_PHY_QCFG_RESP_PAUSE_RX | \
+				 PORT_PHY_QCFG_RESP_PAUSE_TX)
+	u8			auto_pause_setting;
+	u8			force_pause_setting;
+	u8			duplex_setting;
+	u8			auto_mode;
+#define BNXT_AUTO_MODE(mode)	((mode) > BNXT_LINK_AUTO_NONE && \
+				 (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS	PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD	PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN		3
+	u8			phy_ver[PHY_VER_LEN];
+	u16			link_speed;
+#define BNXT_LINK_SPEED_100MB	PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB	PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB	PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB	PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB	PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+	u16			support_speeds;
+	u16			auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+	u16			auto_link_speed;
+	u16			force_link_speed;
+	u32			preemphasis;
+
+	/* copy of requested setting from ethtool cmd */
+	u8			autoneg;
+#define BNXT_AUTONEG_SPEED		1
+#define BNXT_AUTONEG_FLOW_CTRL		2
+	u8			req_duplex;
+	u8			req_flow_ctrl;
+	u16			req_link_speed;
+	u32			advertising;
+	bool			force_link_chng;
+	/* a copy of phy_qcfg output used to report link
+	 * info to VF
+	 */
+	struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE	8
+
+struct bnxt_queue_info {
+	u8	queue_id;
+	u8	queue_profile;
+};
+
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT	0x400
+#define BNXT_CAG_REG_LEGACY_INT_STATUS	0x4014
+#define BNXT_CAG_REG_BASE		0x300000
+
+struct bnxt {
+	void __iomem		*bar0;
+	void __iomem		*bar1;
+	void __iomem		*bar2;
+
+	u32			reg_base;
+
+	struct net_device	*dev;
+	struct pci_dev		*pdev;
+
+	atomic_t		intr_sem;
+
+	u32			flags;
+	#define BNXT_FLAG_DCB_ENABLED	0x1
+	#define BNXT_FLAG_VF		0x2
+	#define BNXT_FLAG_LRO		0x4
+#ifdef CONFIG_INET
+	#define BNXT_FLAG_GRO		0x8
+#else
+	/* Cannot support hardware GRO if CONFIG_INET is not set */
+	#define BNXT_FLAG_GRO		0x0
+#endif
+	#define BNXT_FLAG_TPA		(BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+	#define BNXT_FLAG_JUMBO		0x10
+	#define BNXT_FLAG_STRIP_VLAN	0x20
+	#define BNXT_FLAG_AGG_RINGS	(BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+					 BNXT_FLAG_LRO)
+	#define BNXT_FLAG_USING_MSIX	0x40
+	#define BNXT_FLAG_MSIX_CAP	0x80
+	#define BNXT_FLAG_RFS		0x100
+	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
+					    BNXT_FLAG_RFS |		\
+					    BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
+
+	struct bnxt_napi	**bnapi;
+
+	u32			rx_buf_size;
+	u32			rx_buf_use_size;	/* useable size */
+	u32			rx_ring_size;
+	u32			rx_agg_ring_size;
+	u32			rx_copy_thresh;
+	u32			rx_ring_mask;
+	u32			rx_agg_ring_mask;
+	int			rx_nr_pages;
+	int			rx_agg_nr_pages;
+	int			rx_nr_rings;
+	int			rsscos_nr_ctxs;
+
+	u32			tx_ring_size;
+	u32			tx_ring_mask;
+	int			tx_nr_pages;
+	int			tx_nr_rings;
+	int			tx_nr_rings_per_tc;
+
+	int			tx_wake_thresh;
+	int			tx_push_thresh;
+	int			tx_push_size;
+
+	u32			cp_ring_size;
+	u32			cp_ring_mask;
+	u32			cp_bit;
+	int			cp_nr_pages;
+	int			cp_nr_rings;
+
+	int			num_stat_ctxs;
+	struct bnxt_ring_grp_info	*grp_info;
+	struct bnxt_vnic_info	*vnic_info;
+	int			nr_vnics;
+
+	u8			max_tc;
+	struct bnxt_queue_info	q_info[BNXT_MAX_QUEUE];
+
+	unsigned int		current_interval;
+#define BNXT_TIMER_INTERVAL	(HZ / 2)
+
+	struct timer_list	timer;
+
+	unsigned long		state;
+#define BNXT_STATE_OPEN		0
+#define BNXT_STATE_IN_SP_TASK	1
+
+	struct bnxt_irq	*irq_tbl;
+	u8			mac_addr[ETH_ALEN];
+
+	u32			msg_enable;
+
+	u16			hwrm_cmd_seq;
+	u32			hwrm_intr_seq_id;
+	void			*hwrm_cmd_resp_addr;
+	dma_addr_t		hwrm_cmd_resp_dma_addr;
+	void			*hwrm_dbg_resp_addr;
+	dma_addr_t		hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE	128
+	struct mutex		hwrm_cmd_lock;	/* serialize hwrm messages */
+	struct hwrm_ver_get_output	ver_resp;
+#define FW_VER_STR_LEN		32
+#define BC_HWRM_STR_LEN		21
+#define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+	char			fw_ver_str[FW_VER_STR_LEN];
+	__be16			vxlan_port;
+	u8			vxlan_port_cnt;
+	__le16			vxlan_fw_dst_port_id;
+	u8			nge_port_cnt;
+	__le16			nge_fw_dst_port_id;
+	u16			coal_ticks;
+	u16			coal_ticks_irq;
+	u16			coal_bufs;
+	u16			coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x)	((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+	struct work_struct	sp_task;
+	unsigned long		sp_event;
+#define BNXT_RX_MASK_SP_EVENT		0
+#define BNXT_RX_NTP_FLTR_SP_EVENT	1
+#define BNXT_LINK_CHNG_SP_EVENT		2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	3
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT	4
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT	5
+#define BNXT_RESET_TASK_SP_EVENT	6
+#define BNXT_RST_RING_SP_EVENT		7
+
+	struct bnxt_pf_info	pf;
+#ifdef CONFIG_BNXT_SRIOV
+	int			nr_vfs;
+	struct bnxt_vf_info	vf;
+	wait_queue_head_t	sriov_cfg_wait;
+	bool			sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO	msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR	4096
+#define BNXT_NTP_FLTR_HASH_SIZE	512
+#define BNXT_NTP_FLTR_HASH_MASK	(BNXT_NTP_FLTR_HASH_SIZE - 1)
+	struct hlist_head	ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+	spinlock_t		ntp_fltr_lock;	/* for hash table add, del */
+
+	unsigned long		*ntp_fltr_bmap;
+	int			ntp_fltr_count;
+
+	struct bnxt_link_info	link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+	atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+	int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+				BNXT_STATE_NAPI);
+
+	return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+	atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+	int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+				BNXT_STATE_POLL);
+
+	return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+	atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+	return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+	int old;
+
+	while (1) {
+		old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+				     BNXT_STATE_DISABLE);
+		if (old == BNXT_STATE_IDLE)
+			break;
+		usleep_range(500, 5000);
+	}
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+	return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+	return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+	return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644
index 0000000..45bd628
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT	((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coal)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(*coal));
+
+	coal->rx_coalesce_usecs =
+		max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+	coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+	coal->rx_coalesce_usecs_irq =
+		max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+	coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+	return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coal)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+	bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+	bp->coal_ticks_irq =
+		BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+	bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_coal(bp);
+
+	return rc;
+}
+
+#define BNXT_NUM_STATS	21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return BNXT_NUM_STATS * bp->cp_nr_rings;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 *buf)
+{
+	u32 i, j = 0;
+	struct bnxt *bp = netdev_priv(dev);
+	u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+	u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+	memset(buf, 0, buf_size);
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+		__le64 *hw_stats = (__le64 *)cpr->hw_stats;
+		int k;
+
+		for (k = 0; k < stat_fields; j++, k++)
+			buf[j] = le64_to_cpu(hw_stats[k]);
+		buf[j++] = cpr->rx_l4_csum_errors;
+	}
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u32 i;
+
+	switch (stringset) {
+	/* The number of strings must match BNXT_NUM_STATS defined above. */
+	case ETH_SS_STATS:
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			sprintf(buf, "[%d]: rx_ucast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_mcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_bcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_discards", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_drops", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_ucast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_mcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_bcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_ucast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_mcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_bcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_discards", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_drops", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_ucast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_mcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_bcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_events", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_aborts", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+			buf += ETH_GSTRING_LEN;
+		}
+		break;
+	default:
+		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+			   stringset);
+		break;
+	}
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+	ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+	ering->rx_pending = bp->rx_ring_size;
+	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+			      struct ethtool_ringparam *ering)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS))
+		return -EINVAL;
+
+	if (netif_running(dev))
+		bnxt_close_nic(bp, false, false);
+
+	bp->rx_ring_size = ering->rx_pending;
+	bp->tx_ring_size = ering->tx_pending;
+	bnxt_set_ring_params(bp);
+
+	if (netif_running(dev))
+		return bnxt_open_nic(bp, false, false);
+
+	return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+			      struct ethtool_channels *channel)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int max_rx_rings, max_tx_rings, tcs;
+
+	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+	tcs = netdev_get_num_tc(dev);
+	if (tcs > 1)
+		max_tx_rings /= tcs;
+
+	channel->max_rx = max_rx_rings;
+	channel->max_tx = max_tx_rings;
+	channel->max_other = 0;
+	channel->max_combined = 0;
+	channel->rx_count = bp->rx_nr_rings;
+	channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+			     struct ethtool_channels *channel)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int max_rx_rings, max_tx_rings, tcs;
+	u32 rc = 0;
+
+	if (channel->other_count || channel->combined_count ||
+	    !channel->rx_count || !channel->tx_count)
+		return -EINVAL;
+
+	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+	tcs = netdev_get_num_tc(dev);
+	if (tcs > 1)
+		max_tx_rings /= tcs;
+
+	if (channel->rx_count > max_rx_rings ||
+	    channel->tx_count > max_tx_rings)
+		return -EINVAL;
+
+	if (netif_running(dev)) {
+		if (BNXT_PF(bp)) {
+			/* TODO CHIMP_FW: Send message to all VF's
+			 * before PF unload
+			 */
+		}
+		rc = bnxt_close_nic(bp, true, false);
+		if (rc) {
+			netdev_err(bp->dev, "Set channel failure rc :%x\n",
+				   rc);
+			return rc;
+		}
+	}
+
+	bp->rx_nr_rings = channel->rx_count;
+	bp->tx_nr_rings_per_tc = channel->tx_count;
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+	if (tcs > 1)
+		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+
+	if (netif_running(dev)) {
+		rc = bnxt_open_nic(bp, true, false);
+		if ((!rc) && BNXT_PF(bp)) {
+			/* TODO CHIMP_FW: Send message to all VF's
+			 * to renable
+			 */
+		}
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+			    u32 *rule_locs)
+{
+	int i, j = 0;
+
+	cmd->data = bp->ntp_fltr_count;
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+		struct bnxt_ntuple_filter *fltr;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(fltr, head, hash) {
+			if (j == cmd->rule_cnt)
+				break;
+			rule_locs[j++] = fltr->sw_id;
+		}
+		rcu_read_unlock();
+		if (j == cmd->rule_cnt)
+			break;
+	}
+	cmd->rule_cnt = j;
+	return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fs =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct bnxt_ntuple_filter *fltr;
+	struct flow_keys *fkeys;
+	int i, rc = -EINVAL;
+
+	if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+		return rc;
+
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(fltr, head, hash) {
+			if (fltr->sw_id == fs->location)
+				goto fltr_found;
+		}
+		rcu_read_unlock();
+	}
+	return rc;
+
+fltr_found:
+	fkeys = &fltr->fkeys;
+	if (fkeys->basic.ip_proto == IPPROTO_TCP)
+		fs->flow_type = TCP_V4_FLOW;
+	else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+		fs->flow_type = UDP_V4_FLOW;
+	else
+		goto fltr_err;
+
+	fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+	fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+	fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+	fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+	fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+	fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+	fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+	fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+	fs->ring_cookie = fltr->rxq;
+	rc = 0;
+
+fltr_err:
+	rcu_read_unlock();
+
+	return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+			  u32 *rule_locs)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = bp->rx_nr_rings;
+		break;
+
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = bp->ntp_fltr_count;
+		cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+		break;
+
+	case ETHTOOL_GRXCLSRLALL:
+		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+		break;
+
+	case ETHTOOL_GRXCLSRULE:
+		rc = bnxt_grxclsrule(bp, cmd);
+		break;
+
+	default:
+		rc = -EOPNOTSUPP;
+		break;
+	}
+
+	return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+	return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+	return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+			 u8 *hfunc)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	int i = 0;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+
+	if (indir)
+		for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+			indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+	if (key)
+		memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+	return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+	info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+	info->testinfo_len = BNXT_NUM_TESTS(bp);
+	/* TODO CHIMP_FW: eeprom dump details */
+	info->eedump_len = 0;
+	/* TODO CHIMP FW: reg dump details */
+	info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+	u16 fw_speeds = link_info->support_speeds;
+	u32 speed_mask = 0;
+
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+		speed_mask |= SUPPORTED_100baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+		speed_mask |= SUPPORTED_1000baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+		speed_mask |= SUPPORTED_2500baseX_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+		speed_mask |= SUPPORTED_10000baseT_Full;
+	/* TODO: support 25GB, 50GB with different cable type */
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+		speed_mask |= SUPPORTED_20000baseMLD2_Full |
+			SUPPORTED_20000baseKR2_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+		speed_mask |= SUPPORTED_40000baseKR4_Full |
+			SUPPORTED_40000baseCR4_Full |
+			SUPPORTED_40000baseSR4_Full |
+			SUPPORTED_40000baseLR4_Full;
+
+	return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+	u16 fw_speeds = link_info->auto_link_speeds;
+	u32 speed_mask = 0;
+
+	/* TODO: support 25GB, 40GB, 50GB with different cable type */
+	/* set the advertised speeds */
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+		speed_mask |= ADVERTISED_100baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+		speed_mask |= ADVERTISED_1000baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+		speed_mask |= ADVERTISED_2500baseX_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+		speed_mask |= ADVERTISED_10000baseT_Full;
+	/* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+		speed_mask |= ADVERTISED_20000baseMLD2_Full |
+			      ADVERTISED_20000baseKR2_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+		speed_mask |= ADVERTISED_40000baseKR4_Full |
+			      ADVERTISED_40000baseCR4_Full |
+			      ADVERTISED_40000baseSR4_Full |
+			      ADVERTISED_40000baseLR4_Full;
+	return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+	switch (fw_link_speed) {
+	case BNXT_LINK_SPEED_100MB:
+		return SPEED_100;
+	case BNXT_LINK_SPEED_1GB:
+		return SPEED_1000;
+	case BNXT_LINK_SPEED_2_5GB:
+		return SPEED_2500;
+	case BNXT_LINK_SPEED_10GB:
+		return SPEED_10000;
+	case BNXT_LINK_SPEED_20GB:
+		return SPEED_20000;
+	case BNXT_LINK_SPEED_25GB:
+		return SPEED_25000;
+	case BNXT_LINK_SPEED_40GB:
+		return SPEED_40000;
+	case BNXT_LINK_SPEED_50GB:
+		return SPEED_50000;
+	default:
+		return SPEED_UNKNOWN;
+	}
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+	u16 ethtool_speed;
+
+	cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+	if (link_info->auto_link_speeds)
+		cmd->supported |= SUPPORTED_Autoneg;
+
+	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+		cmd->advertising =
+			bnxt_fw_to_ethtool_advertised_spds(link_info);
+		cmd->advertising |= ADVERTISED_Autoneg;
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->autoneg = AUTONEG_DISABLE;
+		cmd->advertising = 0;
+	}
+	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+		    BNXT_LINK_PAUSE_BOTH) {
+			cmd->advertising |= ADVERTISED_Pause;
+			cmd->supported |= SUPPORTED_Pause;
+		} else {
+			cmd->advertising |= ADVERTISED_Asym_Pause;
+			cmd->supported |= SUPPORTED_Asym_Pause;
+			if (link_info->auto_pause_setting &
+			    BNXT_LINK_PAUSE_RX)
+				cmd->advertising |= ADVERTISED_Pause;
+		}
+	} else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+		    BNXT_LINK_PAUSE_BOTH) {
+			cmd->supported |= SUPPORTED_Pause;
+		} else {
+			cmd->supported |= SUPPORTED_Asym_Pause;
+			if (link_info->force_pause_setting &
+			    BNXT_LINK_PAUSE_RX)
+				cmd->supported |= SUPPORTED_Pause;
+		}
+	}
+
+	cmd->port = PORT_NONE;
+	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+		cmd->port = PORT_TP;
+		cmd->supported |= SUPPORTED_TP;
+		cmd->advertising |= ADVERTISED_TP;
+	} else {
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->advertising |= ADVERTISED_FIBRE;
+
+		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+			cmd->port = PORT_DA;
+		else if (link_info->media_type ==
+			 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+			cmd->port = PORT_FIBRE;
+	}
+
+	if (link_info->phy_link_status == BNXT_LINK_LINK) {
+		if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+			cmd->duplex = DUPLEX_FULL;
+	} else {
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
+	ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+	ethtool_cmd_speed_set(cmd, ethtool_speed);
+	if (link_info->transceiver ==
+		PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+		cmd->transceiver = XCVR_INTERNAL;
+	else
+		cmd->transceiver = XCVR_EXTERNAL;
+	cmd->phy_address = link_info->phy_addr;
+
+	return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+	switch (ethtool_speed) {
+	case SPEED_100:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+	case SPEED_1000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+	case SPEED_2500:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+	case SPEED_10000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+	case SPEED_20000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+	case SPEED_25000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+	case SPEED_40000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+	case SPEED_50000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+	default:
+		netdev_err(dev, "unsupported speed!\n");
+		break;
+	}
+	return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+	u16 fw_speed_mask = 0;
+
+	/* only support autoneg at speed 100, 1000, and 10000 */
+	if (advertising & (ADVERTISED_100baseT_Full |
+			   ADVERTISED_100baseT_Half)) {
+		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+	}
+	if (advertising & (ADVERTISED_1000baseT_Full |
+			   ADVERTISED_1000baseT_Half)) {
+		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+	}
+	if (advertising & ADVERTISED_10000baseT_Full)
+		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+	return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	int rc = 0;
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+	u32 speed, fw_advertising = 0;
+	bool set_pause = false;
+
+	if (BNXT_VF(bp))
+		return rc;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+			netdev_err(dev, "Media type doesn't support autoneg\n");
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+					 ADVERTISED_Autoneg |
+					 ADVERTISED_TP |
+					 ADVERTISED_Pause |
+					 ADVERTISED_Asym_Pause)) {
+			netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+				   cmd->advertising);
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+		if (fw_advertising & ~link_info->support_speeds) {
+			netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+				   cmd->advertising);
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		link_info->autoneg |= BNXT_AUTONEG_SPEED;
+		if (!fw_advertising)
+			link_info->advertising = link_info->support_speeds;
+		else
+			link_info->advertising = fw_advertising;
+		/* any change to autoneg will cause link change, therefore the
+		 * driver should put back the original pause setting in autoneg
+		 */
+		set_pause = true;
+	} else {
+		/* TODO: currently don't support half duplex */
+		if (cmd->duplex == DUPLEX_HALF) {
+			netdev_err(dev, "HALF DUPLEX is not supported!\n");
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		/* If received a request for an unknown duplex, assume full*/
+		if (cmd->duplex == DUPLEX_UNKNOWN)
+			cmd->duplex = DUPLEX_FULL;
+		speed = ethtool_cmd_speed(cmd);
+		link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+		link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+		link_info->advertising = 0;
+	}
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+	return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	if (BNXT_VF(bp))
+		return;
+	epause->autoneg = !!(link_info->auto_pause_setting &
+			     BNXT_LINK_PAUSE_BOTH);
+	epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+	epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+			       struct ethtool_pauseparam *epause)
+{
+	int rc = 0;
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	if (BNXT_VF(bp))
+		return rc;
+
+	if (epause->autoneg) {
+		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+	} else {
+		/* when transition from auto pause to force pause,
+		 * force a link change
+		 */
+		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+			link_info->force_link_chng = true;
+		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+	}
+	if (epause->rx_pause)
+		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+	else
+		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+	if (epause->tx_pause)
+		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+	else
+		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_pause(bp);
+	return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	/* TODO: handle MF, VF, driver close case */
+	return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+			    u16 dir_type,
+			    u16 dir_ordinal,
+			    u16 dir_ext,
+			    u16 dir_attr,
+			    const u8 *data,
+			    size_t data_len)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	struct hwrm_nvm_write_input req = {0};
+	dma_addr_t dma_handle;
+	u8 *kmem;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+	req.dir_type = cpu_to_le16(dir_type);
+	req.dir_ordinal = cpu_to_le16(dir_ordinal);
+	req.dir_ext = cpu_to_le16(dir_ext);
+	req.dir_attr = cpu_to_le16(dir_attr);
+	req.dir_data_length = cpu_to_le32(data_len);
+
+	kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+				  GFP_KERNEL);
+	if (!kmem) {
+		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+			   (unsigned)data_len);
+		return -ENOMEM;
+	}
+	memcpy(kmem, data, data_len);
+	req.host_src_addr = cpu_to_le64(dma_handle);
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+	dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+	return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+			       u16 dir_type,
+			       const u8 *fw_data,
+			       size_t fw_size)
+{
+	int	rc = 0;
+	u16	code_type;
+	u32	stored_crc;
+	u32	calculated_crc;
+	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+	switch (dir_type) {
+	case BNX_DIR_TYPE_BOOTCODE:
+	case BNX_DIR_TYPE_BOOTCODE_2:
+		code_type = CODE_BOOT;
+		break;
+	default:
+		netdev_err(dev, "Unsupported directory entry type: %u\n",
+			   dir_type);
+		return -EINVAL;
+	}
+	if (fw_size < sizeof(struct bnxt_fw_header)) {
+		netdev_err(dev, "Invalid firmware file size: %u\n",
+			   (unsigned int)fw_size);
+		return -EINVAL;
+	}
+	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+		netdev_err(dev, "Invalid firmware signature: %08X\n",
+			   le32_to_cpu(header->signature));
+		return -EINVAL;
+	}
+	if (header->code_type != code_type) {
+		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+			   code_type, header->code_type);
+		return -EINVAL;
+	}
+	if (header->device != DEVICE_CUMULUS_FAMILY) {
+		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+			   DEVICE_CUMULUS_FAMILY, header->device);
+		return -EINVAL;
+	}
+	/* Confirm the CRC32 checksum of the file: */
+	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+					     sizeof(stored_crc)));
+	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+	if (calculated_crc != stored_crc) {
+		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+			   (unsigned long)stored_crc,
+			   (unsigned long)calculated_crc);
+		return -EINVAL;
+	}
+	/* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+			      0, 0, fw_data, fw_size);
+	if (rc == 0) {	/* Firmware update successful */
+		/* TODO: Notify processor it needs to reset itself
+		 */
+	}
+	return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+	switch (dir_type) {
+	case BNX_DIR_TYPE_CHIMP_PATCH:
+	case BNX_DIR_TYPE_BOOTCODE:
+	case BNX_DIR_TYPE_BOOTCODE_2:
+	case BNX_DIR_TYPE_APE_FW:
+	case BNX_DIR_TYPE_APE_PATCH:
+	case BNX_DIR_TYPE_KONG_FW:
+	case BNX_DIR_TYPE_KONG_PATCH:
+		return true;
+	}
+
+	return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+	switch (dir_type) {
+	case BNX_DIR_TYPE_AVS:
+	case BNX_DIR_TYPE_EXP_ROM_MBA:
+	case BNX_DIR_TYPE_PCIE:
+	case BNX_DIR_TYPE_TSCF_UCODE:
+	case BNX_DIR_TYPE_EXT_PHY:
+	case BNX_DIR_TYPE_CCM:
+	case BNX_DIR_TYPE_ISCSI_BOOT:
+	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+		return true;
+	}
+
+	return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+		bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+					 u16 dir_type,
+					 const char *filename)
+{
+	const struct firmware  *fw;
+	int			rc;
+
+	if (bnxt_dir_type_is_executable(dir_type) == false)
+		return -EINVAL;
+
+	rc = request_firmware(&fw, filename, &dev->dev);
+	if (rc != 0) {
+		netdev_err(dev, "Error %d requesting firmware file: %s\n",
+			   rc, filename);
+		return rc;
+	}
+	if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+	else
+		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+				      0, 0, fw->data, fw->size);
+	release_firmware(fw);
+	return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+					char *filename)
+{
+	netdev_err(dev, "packages are not yet supported\n");
+	return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+			     struct ethtool_flash *flash)
+{
+	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+		netdev_err(dev, "flashdev not supported from a virtual function\n");
+		return -EINVAL;
+	}
+
+	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+		return bnxt_flash_package_from_file(dev, flash->data);
+
+	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	struct hwrm_nvm_get_dir_info_input req = {0};
+	struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		*entries = le32_to_cpu(output->entries);
+		*length = le32_to_cpu(output->entry_length);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+	/* The -1 return value allows the entire 32-bit range of offsets to be
+	 * passed via the ethtool command-line utility.
+	 */
+	return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	u32 dir_entries;
+	u32 entry_length;
+	u8 *buf;
+	size_t buflen;
+	dma_addr_t dma_handle;
+	struct hwrm_nvm_get_dir_entries_input req = {0};
+
+	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+	if (rc != 0)
+		return rc;
+
+	/* Insert 2 bytes of directory info (count and size of entries) */
+	if (len < 2)
+		return -EINVAL;
+
+	*data++ = dir_entries;
+	*data++ = entry_length;
+	len -= 2;
+	memset(data, 0xff, len);
+
+	buflen = dir_entries * entry_length;
+	buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+				 GFP_KERNEL);
+	if (!buf) {
+		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+			   (unsigned)buflen);
+		return -ENOMEM;
+	}
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+	req.host_dest_addr = cpu_to_le64(dma_handle);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc == 0)
+		memcpy(data, buf, len > buflen ? buflen : len);
+	dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+	return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+			       u32 length, u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	u8 *buf;
+	dma_addr_t dma_handle;
+	struct hwrm_nvm_read_input req = {0};
+
+	buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+				 GFP_KERNEL);
+	if (!buf) {
+		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+			   (unsigned)length);
+		return -ENOMEM;
+	}
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+	req.host_dest_addr = cpu_to_le64(dma_handle);
+	req.dir_idx = cpu_to_le16(index);
+	req.offset = cpu_to_le32(offset);
+	req.len = cpu_to_le32(length);
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc == 0)
+		memcpy(data, buf, length);
+	dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+	return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+			   struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	u32 index;
+	u32 offset;
+
+	if (eeprom->offset == 0) /* special offset value to get directory */
+		return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+	index = eeprom->offset >> 24;
+	offset = eeprom->offset & 0xffffff;
+
+	if (index == 0) {
+		netdev_err(dev, "unsupported index value: %d\n", index);
+		return -EINVAL;
+	}
+
+	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+	req.dir_idx = cpu_to_le16(index);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+			   struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u8 index, dir_op;
+	u16 type, ext, ordinal, attr;
+
+	if (!BNXT_PF(bp)) {
+		netdev_err(dev, "NVM write not supported from a virtual function\n");
+		return -EINVAL;
+	}
+
+	type = eeprom->magic >> 16;
+
+	if (type == 0xffff) { /* special value for directory operations */
+		index = eeprom->magic & 0xff;
+		dir_op = eeprom->magic >> 8;
+		if (index == 0)
+			return -EINVAL;
+		switch (dir_op) {
+		case 0x0e: /* erase */
+			if (eeprom->offset != ~eeprom->magic)
+				return -EINVAL;
+			return bnxt_erase_nvram_directory(dev, index - 1);
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* Create or re-write an NVM item: */
+	if (bnxt_dir_type_is_executable(type) == true)
+		return -EINVAL;
+	ext = eeprom->magic & 0xffff;
+	ordinal = eeprom->offset >> 16;
+	attr = eeprom->offset & 0xffff;
+
+	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+				eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+	.get_settings		= bnxt_get_settings,
+	.set_settings		= bnxt_set_settings,
+	.get_pauseparam		= bnxt_get_pauseparam,
+	.set_pauseparam		= bnxt_set_pauseparam,
+	.get_drvinfo		= bnxt_get_drvinfo,
+	.get_coalesce		= bnxt_get_coalesce,
+	.set_coalesce		= bnxt_set_coalesce,
+	.get_msglevel		= bnxt_get_msglevel,
+	.set_msglevel		= bnxt_set_msglevel,
+	.get_sset_count		= bnxt_get_sset_count,
+	.get_strings		= bnxt_get_strings,
+	.get_ethtool_stats	= bnxt_get_ethtool_stats,
+	.set_ringparam		= bnxt_set_ringparam,
+	.get_ringparam		= bnxt_get_ringparam,
+	.get_channels		= bnxt_get_channels,
+	.set_channels		= bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+	.get_rxnfc		= bnxt_get_rxnfc,
+#endif
+	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
+	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
+	.get_rxfh               = bnxt_get_rxfh,
+	.flash_device		= bnxt_flash_device,
+	.get_eeprom_len         = bnxt_get_eeprom_len,
+	.get_eeprom             = bnxt_get_eeprom,
+	.set_eeprom		= bnxt_set_eeprom,
+	.get_link		= bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644
index 0000000..98fa81e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644
index 0000000..e0aac65
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE     0x1a4d4342	/* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+	DEVICE_5702_3_4_FAMILY,		/* 0  - Denali, Vinson, K2 */
+	DEVICE_5705_FAMILY,		/* 1  - Bachelor */
+	DEVICE_SHASTA_FAMILY,		/* 2  - 5751 */
+	DEVICE_5706_FAMILY,		/* 3  - Teton */
+	DEVICE_5714_FAMILY,		/* 4  - Hamilton */
+	DEVICE_STANFORD_FAMILY,		/* 5  - 5755 */
+	DEVICE_STANFORD_ME_FAMILY,	/* 6  - 5756 */
+	DEVICE_SOLEDAD_FAMILY,		/* 7  - 5761[E] */
+	DEVICE_CILAI_FAMILY,		/* 8  - 57780/60/90/91 */
+	DEVICE_ASPEN_FAMILY,		/* 9  - 57781/85/61/65/91/95 */
+	DEVICE_ASPEN_PLUS_FAMILY,	/* 10 - 57786 */
+	DEVICE_LOGAN_FAMILY,		/* 11 - Any device in the Logan family
+					 */
+	DEVICE_LOGAN_5762,		/* 12 - Logan Enterprise (aka Columbia)
+					 */
+	DEVICE_LOGAN_57767,		/* 13 - Logan Client */
+	DEVICE_LOGAN_57787,		/* 14 - Logan Consumer */
+	DEVICE_LOGAN_5725,		/* 15 - Logan Server (TruManage-enabled)
+					 */
+	DEVICE_SAWTOOTH_FAMILY,		/* 16 - 5717/18 */
+	DEVICE_COTOPAXI_FAMILY,		/* 17 - 5719 */
+	DEVICE_SNAGGLETOOTH_FAMILY,	/* 18 - 5720 */
+	DEVICE_CUMULUS_FAMILY,		/* 19 - Cumulus/Whitney */
+	MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+	CODE_ASF1,		/* 0  - ASF VERSION 1.03 <deprecated> */
+	CODE_ASF2,		/* 1  - ASF VERSION 2.00 <deprecated> */
+	CODE_PASSTHRU,		/* 2  - PassThru         <deprecated> */
+	CODE_PT_SEC,		/* 3  - PassThru with security <deprecated> */
+	CODE_UMP,		/* 4  - UMP                     <deprecated> */
+	CODE_BOOT,		/* 5  - Bootcode */
+	CODE_DASH,		/* 6  - TruManage (DASH + ASF + PMCI)
+				 *	Management firmwares
+				 */
+	CODE_MCTP_PASSTHRU,	/* 7  - NCSI / MCTP Passt-hrough firmware */
+	CODE_PM_OFFLOAD,	/* 8  - Power-Management Proxy Offload firmwares
+				 */
+	CODE_MDNS_SD_OFFLOAD,	/* 9  - Multicast DNS Service Discovery Proxys
+				 *	Offload firmware
+				 */
+	CODE_DISC_OFFLOAD,	/* 10 - Discovery Offload firmware */
+	CODE_MUSTANG,		/* 11 - I2C Error reporting APE firmwares
+				 *	<deprecated>
+				 */
+	CODE_ARP_BATCH,		/* 12 - ARP Batch firmware */
+	CODE_SMASH,		/* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+				 *	Management firmware
+				 */
+	CODE_APE_DIAG,		/* 14 - APE Test Diag firmware */
+	CODE_APE_PATCH,		/* 15 - APE Patch firmware */
+	CODE_TANG_PATCH,	/* 16 - TANG Patch firmware */
+	CODE_KONG_FW,		/* 17 - KONG firmware */
+	CODE_KONG_PATCH,	/* 18 - KONG Patch firmware */
+	CODE_BONO_FW,		/* 19 - BONO firmware */
+	CODE_BONO_PATCH,	/* 20 - BONO Patch firmware */
+
+	MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+	MEDIA_COPPER,		/* 0 */
+	MEDIA_FIBER,		/* 1 */
+	MEDIA_NONE,		/* 2 */
+	MEDIA_COPPER_FIBER,	/* 3 */
+	MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+	__le32 signature;	/* constains the constant value of
+				 * BNXT_Firmware_Bin_Signatures
+				 */
+	u8 flags;		/* reserved for ChiMP use */
+	u8 code_type;		/* enum SUPPORTED_CODE */
+	u8 device;		/* enum SUPPORTED_FAMILY */
+	u8 media;		/* enum SUPPORTED_MEDIA */
+	u8 version[16];		/* the null terminated version string to
+				 * indicate the version of the
+				 * file, this will be copied from the binary
+				 * file version string
+				 */
+	u8 build;
+	u8 revision;
+	u8 minor_ver;
+	u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644
index 0000000..70fc825
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats  {
+	__le64 rx_ucast_pkts;
+	__le64 rx_mcast_pkts;
+	__le64 rx_bcast_pkts;
+	__le64 rx_discard_pkts;
+	__le64 rx_drop_pkts;
+	__le64 rx_ucast_bytes;
+	__le64 rx_mcast_bytes;
+	__le64 rx_bcast_bytes;
+	__le64 tx_ucast_pkts;
+	__le64 tx_mcast_pkts;
+	__le64 tx_bcast_pkts;
+	__le64 tx_discard_pkts;
+	__le64 tx_drop_pkts;
+	__le64 tx_ucast_bytes;
+	__le64 tx_mcast_bytes;
+	__le64 tx_bcast_bytes;
+	__le64 tpa_pkts;
+	__le64 tpa_bytes;
+	__le64 tpa_events;
+	__le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+	__le16 type;
+	#define EJECT_CMPL_TYPE_MASK				    0x3fUL
+	#define EJECT_CMPL_TYPE_SFT				    0
+	#define EJECT_CMPL_TYPE_STAT_EJECT			   (0x1aUL << 0)
+	__le16 len;
+	__le32 opaque;
+	__le32 v;
+	#define EJECT_CMPL_V					    0x1UL
+	__le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+	__le16 type;
+	#define HWRM_CMPL_TYPE_MASK				    0x3fUL
+	#define HWRM_CMPL_TYPE_SFT				    0
+	#define HWRM_CMPL_TYPE_HWRM_DONE			   (0x20UL << 0)
+	__le16 sequence_id;
+	__le32 unused_1;
+	__le32 v;
+	#define HWRM_CMPL_V					    0x1UL
+	__le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+	__le16 req_len_type;
+	#define HWRM_FWD_REQ_CMPL_TYPE_MASK			    0x3fUL
+	#define HWRM_FWD_REQ_CMPL_TYPE_SFT			    0
+	#define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ		   (0x22UL << 0)
+	#define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK			    0xffc0UL
+	#define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT			    6
+	__le16 source_id;
+	__le32 unused_0;
+	__le32 req_buf_addr_v[2];
+	#define HWRM_FWD_REQ_CMPL_V				    0x1UL
+	#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK		    0xfffffffeUL
+	#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT		    1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+	__le16 type;
+	#define HWRM_FWD_RESP_CMPL_TYPE_MASK			    0x3fUL
+	#define HWRM_FWD_RESP_CMPL_TYPE_SFT			    0
+	#define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP		   (0x24UL << 0)
+	__le16 source_id;
+	__le16 resp_len;
+	__le16 unused_1;
+	__le32 resp_buf_addr_v[2];
+	#define HWRM_FWD_RESP_CMPL_V				    0x1UL
+	#define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK		    0xfffffffeUL
+	#define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT		    1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK		    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT			    0
+	#define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT       (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE    (0x1UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE  (0x2UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE  (0x3UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD   (0x10UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD     (0x11UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD     (0x20UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD       (0x20UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR		   (0x30UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR	   (0xffUL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_V			    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK		    0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT		    1
+	u8 unused_1[3];
+	__le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT  0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT     0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK  0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT   1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK  0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT   0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK  0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT   0
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V      0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK   0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT    0
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT  1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK     0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT      0
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK   0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK     0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT      0
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK   0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK       0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT	    0
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK     0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT      1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK		    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT		    0
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR      (0x30UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V			    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK	    0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT	    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT  0
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK	    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT	    0
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+	__le32 event_data2;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK       0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT	    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR	0
+#define HWRM_VERSION_MINOR	7
+#define HWRM_VERSION_UPDATE	8
+
+#define HWRM_VERSION_STR	"0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE	((__le32)(-1))
+#define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN    (176)  /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE	40
+#define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+	__le16 req_type;
+	#define HWRM_VER_GET					   (0x0UL)
+	#define HWRM_FUNC_DISABLE				   (0x10UL)
+	#define HWRM_FUNC_RESET				   (0x11UL)
+	#define HWRM_FUNC_GETFID				   (0x12UL)
+	#define HWRM_FUNC_VF_ALLOC				   (0x13UL)
+	#define HWRM_FUNC_VF_FREE				   (0x14UL)
+	#define HWRM_FUNC_QCAPS				   (0x15UL)
+	#define HWRM_FUNC_QCFG					   (0x16UL)
+	#define HWRM_FUNC_CFG					   (0x17UL)
+	#define HWRM_FUNC_QSTATS				   (0x18UL)
+	#define HWRM_FUNC_CLR_STATS				   (0x19UL)
+	#define HWRM_FUNC_DRV_UNRGTR				   (0x1aUL)
+	#define HWRM_FUNC_VF_RESC_FREE				   (0x1bUL)
+	#define HWRM_FUNC_VF_VNIC_IDS_QUERY			   (0x1cUL)
+	#define HWRM_FUNC_DRV_RGTR				   (0x1dUL)
+	#define HWRM_FUNC_DRV_QVER				   (0x1eUL)
+	#define HWRM_FUNC_BUF_RGTR				   (0x1fUL)
+	#define HWRM_FUNC_VF_CFG				   (0x20UL)
+	#define HWRM_PORT_PHY_CFG				   (0x20UL)
+	#define HWRM_PORT_MAC_CFG				   (0x21UL)
+	#define HWRM_PORT_ENABLE				   (0x22UL)
+	#define HWRM_PORT_QSTATS				   (0x23UL)
+	#define HWRM_PORT_LPBK_QSTATS				   (0x24UL)
+	#define HWRM_PORT_CLR_STATS				   (0x25UL)
+	#define HWRM_PORT_LPBK_CLR_STATS			   (0x26UL)
+	#define HWRM_PORT_PHY_QCFG				   (0x27UL)
+	#define HWRM_PORT_MAC_QCFG				   (0x28UL)
+	#define HWRM_PORT_BLINK_LED				   (0x29UL)
+	#define HWRM_QUEUE_QPORTCFG				   (0x30UL)
+	#define HWRM_QUEUE_QCFG				   (0x31UL)
+	#define HWRM_QUEUE_CFG					   (0x32UL)
+	#define HWRM_QUEUE_BUFFERS_QCFG			   (0x33UL)
+	#define HWRM_QUEUE_BUFFERS_CFG				   (0x34UL)
+	#define HWRM_QUEUE_PFCENABLE_QCFG			   (0x35UL)
+	#define HWRM_QUEUE_PFCENABLE_CFG			   (0x36UL)
+	#define HWRM_QUEUE_PRI2COS_QCFG			   (0x37UL)
+	#define HWRM_QUEUE_PRI2COS_CFG				   (0x38UL)
+	#define HWRM_QUEUE_COS2BW_QCFG				   (0x39UL)
+	#define HWRM_QUEUE_COS2BW_CFG				   (0x3aUL)
+	#define HWRM_VNIC_ALLOC				   (0x40UL)
+	#define HWRM_VNIC_FREE					   (0x41UL)
+	#define HWRM_VNIC_CFG					   (0x42UL)
+	#define HWRM_VNIC_QCFG					   (0x43UL)
+	#define HWRM_VNIC_TPA_CFG				   (0x44UL)
+	#define HWRM_VNIC_TPA_QCFG				   (0x45UL)
+	#define HWRM_VNIC_RSS_CFG				   (0x46UL)
+	#define HWRM_VNIC_RSS_QCFG				   (0x47UL)
+	#define HWRM_VNIC_PLCMODES_CFG				   (0x48UL)
+	#define HWRM_VNIC_PLCMODES_QCFG			   (0x49UL)
+	#define HWRM_RING_ALLOC				   (0x50UL)
+	#define HWRM_RING_FREE					   (0x51UL)
+	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS		   (0x52UL)
+	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS		   (0x53UL)
+	#define HWRM_RING_RESET				   (0x5eUL)
+	#define HWRM_RING_GRP_ALLOC				   (0x60UL)
+	#define HWRM_RING_GRP_FREE				   (0x61UL)
+	#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC			   (0x70UL)
+	#define HWRM_VNIC_RSS_COS_LB_CTX_FREE			   (0x71UL)
+	#define HWRM_ARB_GRP_ALLOC				   (0x80UL)
+	#define HWRM_ARB_GRP_CFG				   (0x81UL)
+	#define HWRM_CFA_L2_FILTER_ALLOC			   (0x90UL)
+	#define HWRM_CFA_L2_FILTER_FREE			   (0x91UL)
+	#define HWRM_CFA_L2_FILTER_CFG				   (0x92UL)
+	#define HWRM_CFA_L2_SET_RX_MASK			   (0x93UL)
+	#define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING		   (0x94UL)
+	#define HWRM_CFA_TUNNEL_FILTER_ALLOC			   (0x95UL)
+	#define HWRM_CFA_TUNNEL_FILTER_FREE			   (0x96UL)
+	#define HWRM_CFA_ENCAP_RECORD_ALLOC			   (0x97UL)
+	#define HWRM_CFA_ENCAP_RECORD_FREE			   (0x98UL)
+	#define HWRM_CFA_NTUPLE_FILTER_ALLOC			   (0x99UL)
+	#define HWRM_CFA_NTUPLE_FILTER_FREE			   (0x9aUL)
+	#define HWRM_CFA_NTUPLE_FILTER_CFG			   (0x9bUL)
+	#define HWRM_TUNNEL_DST_PORT_QUERY			   (0xa0UL)
+	#define HWRM_TUNNEL_DST_PORT_ALLOC			   (0xa1UL)
+	#define HWRM_TUNNEL_DST_PORT_FREE			   (0xa2UL)
+	#define HWRM_STAT_CTX_ALLOC				   (0xb0UL)
+	#define HWRM_STAT_CTX_FREE				   (0xb1UL)
+	#define HWRM_STAT_CTX_QUERY				   (0xb2UL)
+	#define HWRM_STAT_CTX_CLR_STATS			   (0xb3UL)
+	#define HWRM_FW_RESET					   (0xc0UL)
+	#define HWRM_FW_QSTATUS				   (0xc1UL)
+	#define HWRM_EXEC_FWD_RESP				   (0xd0UL)
+	#define HWRM_REJECT_FWD_RESP				   (0xd1UL)
+	#define HWRM_FWD_RESP					   (0xd2UL)
+	#define HWRM_FWD_ASYNC_EVENT_CMPL			   (0xd3UL)
+	#define HWRM_TEMP_MONITOR_QUERY			   (0xe0UL)
+	#define HWRM_MGMT_L2_FILTER_ALLOC			   (0x100UL)
+	#define HWRM_MGMT_L2_FILTER_FREE			   (0x101UL)
+	#define HWRM_DBG_READ_DIRECT				   (0xff10UL)
+	#define HWRM_DBG_READ_INDIRECT				   (0xff11UL)
+	#define HWRM_DBG_WRITE_DIRECT				   (0xff12UL)
+	#define HWRM_DBG_WRITE_INDIRECT			   (0xff13UL)
+	#define HWRM_DBG_DUMP					   (0xff14UL)
+	#define HWRM_NVM_MODIFY				   (0xfff4UL)
+	#define HWRM_NVM_VERIFY_UPDATE				   (0xfff5UL)
+	#define HWRM_NVM_GET_DEV_INFO				   (0xfff6UL)
+	#define HWRM_NVM_ERASE_DIR_ENTRY			   (0xfff7UL)
+	#define HWRM_NVM_MOD_DIR_ENTRY				   (0xfff8UL)
+	#define HWRM_NVM_FIND_DIR_ENTRY			   (0xfff9UL)
+	#define HWRM_NVM_GET_DIR_ENTRIES			   (0xfffaUL)
+	#define HWRM_NVM_GET_DIR_INFO				   (0xfffbUL)
+	#define HWRM_NVM_RAW_DUMP				   (0xfffcUL)
+	#define HWRM_NVM_READ					   (0xfffdUL)
+	#define HWRM_NVM_WRITE					   (0xfffeUL)
+	#define HWRM_NVM_RAW_WRITE_BLK				   (0xffffUL)
+	__le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+	__le16 error_code;
+	#define HWRM_ERR_CODE_SUCCESS				   (0x0UL)
+	#define HWRM_ERR_CODE_FAIL				   (0x1UL)
+	#define HWRM_ERR_CODE_INVALID_PARAMS			   (0x2UL)
+	#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED		   (0x3UL)
+	#define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR		   (0x4UL)
+	#define HWRM_ERR_CODE_INVALID_FLAGS			   (0x5UL)
+	#define HWRM_ERR_CODE_INVALID_ENABLES			   (0x6UL)
+	#define HWRM_ERR_CODE_HWRM_ERROR			   (0xfUL)
+	#define HWRM_ERR_CODE_UNKNOWN_ERR			   (0xfffeUL)
+	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED		   (0xffffUL)
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 opaque_0;
+	__le16 opaque_1;
+	u8 opaque_2;
+	u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+	__le64 tx_64b_frames;
+	__le64 tx_65b_127b_frames;
+	__le64 tx_128b_255b_frames;
+	__le64 tx_256b_511b_frames;
+	__le64 tx_512b_1023b_frames;
+	__le64 tx_1024b_1518_frames;
+	__le64 tx_good_vlan_frames;
+	__le64 tx_1519b_2047_frames;
+	__le64 tx_2048b_4095b_frames;
+	__le64 tx_4096b_9216b_frames;
+	__le64 tx_9217b_16383b_frames;
+	__le64 tx_good_frames;
+	__le64 tx_total_frames;
+	__le64 tx_ucast_frames;
+	__le64 tx_mcast_frames;
+	__le64 tx_bcast_frames;
+	__le64 tx_pause_frames;
+	__le64 tx_pfc_frames;
+	__le64 tx_jabber_frames;
+	__le64 tx_fcs_err_frames;
+	__le64 tx_control_frames;
+	__le64 tx_oversz_frames;
+	__le64 tx_single_dfrl_frames;
+	__le64 tx_multi_dfrl_frames;
+	__le64 tx_single_coll_frames;
+	__le64 tx_multi_coll_frames;
+	__le64 tx_late_coll_frames;
+	__le64 tx_excessive_coll_frames;
+	__le64 tx_frag_frames;
+	__le64 tx_err;
+	__le64 tx_tagged_frames;
+	__le64 tx_dbl_tagged_frames;
+	__le64 tx_runt_frames;
+	__le64 tx_fifo_underruns;
+	__le64 tx_pfc_ena_frames_pri0;
+	__le64 tx_pfc_ena_frames_pri1;
+	__le64 tx_pfc_ena_frames_pri2;
+	__le64 tx_pfc_ena_frames_pri3;
+	__le64 tx_pfc_ena_frames_pri4;
+	__le64 tx_pfc_ena_frames_pri5;
+	__le64 tx_pfc_ena_frames_pri6;
+	__le64 tx_pfc_ena_frames_pri7;
+	__le64 tx_eee_lpi_events;
+	__le64 tx_eee_lpi_duration;
+	__le64 tx_llfc_logical_msgs;
+	__le64 tx_hcfc_msgs;
+	__le64 tx_total_collisions;
+	__le64 tx_bytes;
+	__le64 tx_xthol_frames;
+	__le64 tx_stat_discard;
+	__le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+	__le64 rx_64b_frames;
+	__le64 rx_65b_127b_frames;
+	__le64 rx_128b_255b_frames;
+	__le64 rx_256b_511b_frames;
+	__le64 rx_512b_1023b_frames;
+	__le64 rx_1024b_1518_frames;
+	__le64 rx_good_vlan_frames;
+	__le64 rx_1519b_2047b_frames;
+	__le64 rx_2048b_4095b_frames;
+	__le64 rx_4096b_9216b_frames;
+	__le64 rx_9217b_16383b_frames;
+	__le64 rx_total_frames;
+	__le64 rx_ucast_frames;
+	__le64 rx_mcast_frames;
+	__le64 rx_bcast_frames;
+	__le64 rx_fcs_err_frames;
+	__le64 rx_ctrl_frames;
+	__le64 rx_pause_frames;
+	__le64 rx_pfc_frames;
+	__le64 rx_unsupported_opcode_frames;
+	__le64 rx_unsupported_da_pausepfc_frames;
+	__le64 rx_wrong_sa_frames;
+	__le64 rx_align_err_frames;
+	__le64 rx_oor_len_frames;
+	__le64 rx_code_err_frames;
+	__le64 rx_false_carrier_frames;
+	__le64 rx_ovrsz_frames;
+	__le64 rx_jbr_frames;
+	__le64 rx_mtu_err_frames;
+	__le64 rx_match_crc_frames;
+	__le64 rx_promiscuous_frames;
+	__le64 rx_tagged_frames;
+	__le64 rx_double_tagged_frames;
+	__le64 rx_trunc_frames;
+	__le64 rx_good_frames;
+	__le64 rx_pfc_xon2xoff_frames_pri0;
+	__le64 rx_pfc_xon2xoff_frames_pri1;
+	__le64 rx_pfc_xon2xoff_frames_pri2;
+	__le64 rx_pfc_xon2xoff_frames_pri3;
+	__le64 rx_pfc_xon2xoff_frames_pri4;
+	__le64 rx_pfc_xon2xoff_frames_pri5;
+	__le64 rx_pfc_xon2xoff_frames_pri6;
+	__le64 rx_pfc_xon2xoff_frames_pri7;
+	__le64 rx_pfc_ena_frames_pri0;
+	__le64 rx_pfc_ena_frames_pri1;
+	__le64 rx_pfc_ena_frames_pri2;
+	__le64 rx_pfc_ena_frames_pri3;
+	__le64 rx_pfc_ena_frames_pri4;
+	__le64 rx_pfc_ena_frames_pri5;
+	__le64 rx_pfc_ena_frames_pri6;
+	__le64 rx_pfc_ena_frames_pri7;
+	__le64 rx_sch_crc_err_frames;
+	__le64 rx_undrsz_frames;
+	__le64 rx_frag_frames;
+	__le64 rx_eee_lpi_events;
+	__le64 rx_eee_lpi_duration;
+	__le64 rx_llfc_physical_msgs;
+	__le64 rx_llfc_logical_msgs;
+	__le64 rx_llfc_msgs_with_crc_err;
+	__le64 rx_hcfc_msgs;
+	__le64 rx_hcfc_msgs_with_crc_err;
+	__le64 rx_bytes;
+	__le64 rx_runt_bytes;
+	__le64 rx_runt_frames;
+	__le64 rx_stat_discard;
+	__le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 hwrm_intf_maj;
+	u8 hwrm_intf_min;
+	u8 hwrm_intf_upd;
+	u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 hwrm_intf_maj;
+	u8 hwrm_intf_min;
+	u8 hwrm_intf_upd;
+	u8 hwrm_intf_rsvd;
+	u8 hwrm_fw_maj;
+	u8 hwrm_fw_min;
+	u8 hwrm_fw_bld;
+	u8 hwrm_fw_rsvd;
+	u8 ape_fw_maj;
+	u8 ape_fw_min;
+	u8 ape_fw_bld;
+	u8 ape_fw_rsvd;
+	u8 kong_fw_maj;
+	u8 kong_fw_min;
+	u8 kong_fw_bld;
+	u8 kong_fw_rsvd;
+	u8 tang_fw_maj;
+	u8 tang_fw_min;
+	u8 tang_fw_bld;
+	u8 tang_fw_rsvd;
+	u8 bono_fw_maj;
+	u8 bono_fw_min;
+	u8 bono_fw_bld;
+	u8 bono_fw_rsvd;
+	char hwrm_fw_name[16];
+	char ape_fw_name[16];
+	char kong_fw_name[16];
+	char tang_fw_name[16];
+	char bono_fw_name[16];
+	__le16 chip_num;
+	u8 chip_rev;
+	u8 chip_metal;
+	u8 chip_bond_id;
+	u8 unused_0;
+	__le16 max_req_win_len;
+	__le16 max_resp_len;
+	__le16 def_req_timeout;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID		    0x1UL
+	__le16 vf_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID		    0x1UL
+	__le16 vf_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_GETFID_REQ_ENABLES_PCI_ID			    0x1UL
+	__le16 pci_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 fid;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID		    0x1UL
+	__le16 first_vf_id;
+	__le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 first_vf_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID		    0x1UL
+	__le16 first_vf_id;
+	__le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_VF_CFG_REQ_ENABLES_MTU			    0x1UL
+	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN		    0x2UL
+	__le16 mtu;
+	__le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 fid;
+	__le16 port_id;
+	__le32 flags;
+	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED	    0x1UL
+	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
+	u8 perm_mac_address[6];
+	__le16 max_rsscos_ctx;
+	__le16 max_cmpl_rings;
+	__le16 max_tx_rings;
+	__le16 max_rx_rings;
+	__le16 max_l2_ctxs;
+	__le16 max_vnics;
+	__le16 first_vf_id;
+	__le16 max_vfs;
+	__le16 max_stat_ctx;
+	__le32 max_encap_records;
+	__le32 max_decap_records;
+	__le32 max_tx_em_flows;
+	__le32 max_tx_wm_flows;
+	__le32 max_rx_em_flows;
+	__le32 max_rx_wm_flows;
+	__le32 max_mcast_filters;
+	__le32 max_flow_id;
+	__le32 max_hw_ring_grps;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 vf_id;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 flags;
+	#define FUNC_CFG_REQ_FLAGS_PROM_MODE			    0x1UL
+	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK		    0x2UL
+	#define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK		    0x4UL
+	#define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH		    0x8UL
+	#define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH		    0x10UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE		    0x20UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_STP			    0x40UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP		    0x80UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2		    0x100UL
+	__le32 enables;
+	#define FUNC_CFG_REQ_ENABLES_MTU			    0x1UL
+	#define FUNC_CFG_REQ_ENABLES_MRU			    0x2UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS		    0x4UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS		    0x8UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS		    0x10UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS		    0x20UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS		    0x40UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_VNICS			    0x80UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS		    0x100UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x200UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN			    0x400UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR		    0x800UL
+	#define FUNC_CFG_REQ_ENABLES_MIN_BW			    0x1000UL
+	#define FUNC_CFG_REQ_ENABLES_MAX_BW			    0x2000UL
+	#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4000UL
+	#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE	    0x8000UL
+	#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS		    0x10000UL
+	#define FUNC_CFG_REQ_ENABLES_EVB_MODE			    0x20000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS		    0x40000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS		    0x80000UL
+	__le16 mtu;
+	__le16 mru;
+	__le16 num_rsscos_ctxs;
+	__le16 num_cmpl_rings;
+	__le16 num_tx_rings;
+	__le16 num_rx_rings;
+	__le16 num_l2_ctxs;
+	__le16 num_vnics;
+	__le16 num_stat_ctxs;
+	__le16 num_hw_ring_grps;
+	u8 dflt_mac_addr[6];
+	__le16 dflt_vlan;
+	__be32 dflt_ip_addr[4];
+	__le32 min_bw;
+	__le32 max_bw;
+	__le16 async_event_cr;
+	u8 vlan_antispoof_mode;
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK	   (0x0UL << 0)
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    (0x1UL << 0)
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+	u8 allowed_vlan_pris;
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK		   (0x0UL << 0)
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN      (0x1UL << 0)
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE  (0x2UL << 0)
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+	u8 evb_mode;
+	#define FUNC_CFG_REQ_EVB_MODE_NO_EVB			   (0x0UL << 0)
+	#define FUNC_CFG_REQ_EVB_MODE_VEB			   (0x1UL << 0)
+	#define FUNC_CFG_REQ_EVB_MODE_VEPA			   (0x2UL << 0)
+	u8 unused_2;
+	__le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 tx_ucast_pkts;
+	__le64 tx_mcast_pkts;
+	__le64 tx_bcast_pkts;
+	__le64 tx_err_pkts;
+	__le64 tx_drop_pkts;
+	__le64 tx_ucast_bytes;
+	__le64 tx_mcast_bytes;
+	__le64 tx_bcast_bytes;
+	__le64 rx_ucast_pkts;
+	__le64 rx_mcast_pkts;
+	__le64 rx_bcast_pkts;
+	__le64 rx_err_pkts;
+	__le64 rx_drop_pkts;
+	__le64 rx_ucast_bytes;
+	__le64 rx_mcast_bytes;
+	__le64 rx_bcast_bytes;
+	__le64 rx_agg_pkts;
+	__le64 rx_agg_bytes;
+	__le64 rx_agg_events;
+	__le64 rx_agg_aborts;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 vf_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 vf_id;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 max_vnic_id_cnt;
+	__le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 vnic_id_cnt;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE		    0x1UL
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE		    0x2UL
+	__le32 enables;
+	#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE		    0x1UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VER			    0x2UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP		    0x4UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD		    0x8UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD	    0x10UL
+	__le16 os_type;
+	u8 ver_maj;
+	u8 ver_min;
+	u8 ver_upd;
+	u8 unused_0;
+	__le16 unused_1;
+	__le32 timestamp;
+	__le32 unused_2;
+	__le32 vf_req_fwd[8];
+	__le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN     0x1UL
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID		    0x1UL
+	#define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR		    0x2UL
+	__le16 vf_id;
+	__le16 req_buf_num_pages;
+	__le16 req_buf_page_size;
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B	   (0x4UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K		   (0xcUL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K		   (0xdUL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K	   (0x10UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M		   (0x16UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M		   (0x17UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G		   (0x1eUL << 0)
+	__le16 req_buf_len;
+	__le16 resp_buf_len;
+	u8 unused_0;
+	u8 unused_1;
+	__le64 req_buf_page_addr0;
+	__le64 req_buf_page_addr1;
+	__le64 req_buf_page_addr2;
+	__le64 req_buf_page_addr3;
+	__le64 req_buf_page_addr4;
+	__le64 req_buf_page_addr5;
+	__le64 req_buf_page_addr6;
+	__le64 req_buf_page_addr7;
+	__le64 req_buf_page_addr8;
+	__le64 req_buf_page_addr9;
+	__le64 error_buf_addr;
+	__le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID	    0x1UL
+	#define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID		    0x2UL
+	__le16 fid;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 os_type;
+	u8 ver_maj;
+	u8 ver_min;
+	u8 ver_upd;
+	u8 unused_0;
+	u8 unused_1;
+	u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY		    0x1UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN		    0x2UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE			    0x4UL
+	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG		    0x8UL
+	__le32 enables;
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE		    0x1UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX		    0x2UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE		    0x4UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED	    0x8UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK      0x10UL
+	#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED		    0x20UL
+	#define PORT_PHY_CFG_REQ_ENABLES_LPBK			    0x40UL
+	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS		    0x80UL
+	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE		    0x100UL
+	__le16 port_id;
+	__le16 force_link_speed;
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB	   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB	   (0x19UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB		   (0x64UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB		   (0xc8UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB		   (0xfaUL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB		   (0x190UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	u8 auto_mode;
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE		   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS		   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED		   (0x2UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW	   (0x3UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_MASK		   (0x4UL << 0)
+	u8 auto_duplex;
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF		   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL		   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH		   (0x2UL << 0)
+	u8 auto_pause;
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX			    0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX			    0x2UL
+	u8 unused_0;
+	__le16 auto_link_speed;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB		   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB		   (0x19UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB		   (0x64UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB		   (0xc8UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB		   (0xfaUL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB		   (0x190UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	__le16 auto_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB	    0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD	    0x4UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB	    0x20UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB	    0x40UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB	    0x80UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB	    0x100UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB	    0x200UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB	    0x400UL
+	u8 wirespeed;
+	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF			   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_WIRESPEED_ON			   (0x1UL << 0)
+	u8 lpbk;
+	#define PORT_PHY_CFG_REQ_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_LPBK_REMOTE			   (0x2UL << 0)
+	u8 force_pause;
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX		    0x1UL
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX		    0x2UL
+	u8 unused_1;
+	__le32 preemphasis;
+	__le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	__le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 link;
+	#define PORT_PHY_QCFG_RESP_LINK_NO_LINK		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SIGNAL			   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_LINK			   (0x2UL << 0)
+	u8 unused_0;
+	__le16 link_speed;
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB		   (0x19UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB		   (0x64UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB		   (0xc8UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB		   (0xfaUL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB		   (0x190UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	u8 duplex;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_HALF			   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_DUPLEX_FULL			   (0x1UL << 0)
+	u8 pause;
+	#define PORT_PHY_QCFG_RESP_PAUSE_TX			    0x1UL
+	#define PORT_PHY_QCFG_RESP_PAUSE_RX			    0x2UL
+	__le16 support_speeds;
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD	    0x1UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB	    0x2UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD	    0x4UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB		    0x8UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB		    0x10UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB	    0x20UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB		    0x40UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB		    0x80UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB		    0x100UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB		    0x200UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB		    0x400UL
+	__le16 force_link_speed;
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB	   (0xaUL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB	   (0x14UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB	   (0x19UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB	   (0x64UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB	   (0xc8UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB	   (0xfaUL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB	   (0x190UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB	   (0x1f4UL << 0)
+	u8 auto_mode;
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED		   (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW	   (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK		   (0x4UL << 0)
+	u8 auto_pause;
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX		    0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX		    0x2UL
+	__le16 auto_link_speed;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB	   (0x19UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB	   (0x64UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB	   (0xc8UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB	   (0xfaUL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB	   (0x190UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB	   (0x1f4UL << 0)
+	__le16 auto_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD      0x4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB      0x20UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB       0x40UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB       0x80UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
+	u8 wirespeed;
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON		   (0x1UL << 0)
+	u8 lpbk;
+	#define PORT_PHY_QCFG_RESP_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE			   (0x2UL << 0)
+	u8 force_pause;
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX		    0x1UL
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX		    0x2UL
+	u8 duplex_setting;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL		   (0x1UL << 0)
+	__le32 preemphasis;
+	u8 phy_maj;
+	u8 phy_min;
+	u8 phy_bld;
+	u8 phy_type;
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4		   (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4		   (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4		   (0x4UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2		   (0x5UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4		   (0x6UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR		   (0x7UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET		   (0x8UL << 0)
+	u8 media_type;
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC		   (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE		   (0x3UL << 0)
+	u8 transceiver_type;
+	#define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+	u8 phy_addr;
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK		    0x1fUL
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT		    0
+	u8 unused_2;
+	__le16 link_partner_adv_speeds;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD   0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB     0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB     0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB   0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB    0x40UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB    0x80UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
+	u8 link_partner_adv_auto_mode;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+	u8 link_partner_adv_pause;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
+	u8 unused_3;
+	u8 unused_4;
+	u8 unused_5;
+	u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK		    0x1UL
+	#define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE       0x2UL
+	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
+	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE	    0x8UL
+	__le32 enables;
+	#define PORT_MAC_CFG_REQ_ENABLES_IPG			    0x1UL
+	#define PORT_MAC_CFG_REQ_ENABLES_LPBK			    0x2UL
+	#define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI     0x4UL
+	#define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI		    0x8UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
+	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI	    0x20UL
+	__le16 port_id;
+	u8 ipg;
+	u8 lpbk;
+	#define PORT_MAC_CFG_REQ_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_MAC_CFG_REQ_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_MAC_CFG_REQ_LPBK_REMOTE			   (0x2UL << 0)
+	u8 ivlan_pri2cos_map_pri;
+	u8 lcos_map_pri;
+	u8 tunnel_pri2cos_map_pri;
+	u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 mru;
+	__le16 mtu;
+	u8 ipg;
+	u8 lpbk;
+	#define PORT_MAC_CFG_RESP_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_MAC_CFG_RESP_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_MAC_CFG_RESP_LPBK_REMOTE			   (0x2UL << 0)
+	u8 unused_0;
+	u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC		    0x1UL
+	__le16 port_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2[3];
+	u8 unused_3;
+	__le64 tx_stat_host_addr;
+	__le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 lpbk_ucast_frames;
+	__le64 lpbk_mcast_frames;
+	__le64 lpbk_bcast_frames;
+	__le64 lpbk_ucast_bytes;
+	__le64 lpbk_mcast_bytes;
+	__le64 lpbk_bcast_bytes;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 num_blinks;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH			    0x1UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	__le16 port_id;
+	__le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 max_configurable_queues;
+	u8 max_configurable_lossless_queues;
+	u8 queue_cfg_allowed;
+	u8 queue_buffers_cfg_allowed;
+	u8 queue_pfcenable_cfg_allowed;
+	u8 queue_pri2cos_cfg_allowed;
+	u8 queue_cos2bw_cfg_allowed;
+	u8 queue_id0;
+	u8 queue_id0_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id1;
+	u8 queue_id1_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id2;
+	u8 queue_id2_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id3;
+	u8 queue_id3_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id4;
+	u8 queue_id4_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id5;
+	u8 queue_id5_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id6;
+	u8 queue_id6_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id7;
+	u8 queue_id7_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_CFG_REQ_FLAGS_PATH			    0x1UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_TX			   (0x0UL << 0)
+	#define QUEUE_CFG_REQ_FLAGS_PATH_RX			   (0x1UL << 0)
+	__le32 enables;
+	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN			    0x1UL
+	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE		    0x2UL
+	__le32 queue_id;
+	__le32 dflt_len;
+	u8 service_profile;
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY		   (0x0UL << 0)
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS		   (0x1UL << 0)
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN		   (0xffUL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH		    0x1UL
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	__le32 enables;
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED		    0x1UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED		    0x2UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP		    0x4UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF		    0x8UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON		    0x10UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL		    0x20UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL		    0x40UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX		    0x80UL
+	__le32 queue_id;
+	__le32 reserved;
+	__le32 shared;
+	__le32 xoff;
+	__le32 xon;
+	__le32 full;
+	__le32 notfull;
+	__le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED   0x1UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED   0x2UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED   0x4UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED   0x8UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED   0x10UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED   0x20UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED   0x40UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED   0x80UL
+	__le16 port_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH		    0x1UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN		    0x2UL
+	__le32 enables;
+	u8 port_id;
+	u8 pri0_cos;
+	u8 pri1_cos;
+	u8 pri2_cos;
+	u8 pri3_cos;
+	u8 pri4_cos;
+	u8 pri5_cos;
+	u8 pri6_cos;
+	u8 pri7_cos;
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	__le32 enables;
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID   0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID   0x2UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID   0x4UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID   0x8UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID   0x10UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID   0x20UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID   0x40UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID   0x80UL
+	__le16 port_id;
+	u8 queue_id0;
+	u8 unused_0;
+	__le32 queue_id0_min_bw;
+	__le32 queue_id0_max_bw;
+	u8 queue_id0_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id0_pri_lvl;
+	u8 queue_id0_bw_weight;
+	u8 queue_id1;
+	__le32 queue_id1_min_bw;
+	__le32 queue_id1_max_bw;
+	u8 queue_id1_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id1_pri_lvl;
+	u8 queue_id1_bw_weight;
+	u8 queue_id2;
+	__le32 queue_id2_min_bw;
+	__le32 queue_id2_max_bw;
+	u8 queue_id2_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id2_pri_lvl;
+	u8 queue_id2_bw_weight;
+	u8 queue_id3;
+	__le32 queue_id3_min_bw;
+	__le32 queue_id3_max_bw;
+	u8 queue_id3_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id3_pri_lvl;
+	u8 queue_id3_bw_weight;
+	u8 queue_id4;
+	__le32 queue_id4_min_bw;
+	__le32 queue_id4_max_bw;
+	u8 queue_id4_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id4_pri_lvl;
+	u8 queue_id4_bw_weight;
+	u8 queue_id5;
+	__le32 queue_id5_min_bw;
+	__le32 queue_id5_max_bw;
+	u8 queue_id5_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id5_pri_lvl;
+	u8 queue_id5_bw_weight;
+	u8 queue_id6;
+	__le32 queue_id6_min_bw;
+	__le32 queue_id6_max_bw;
+	u8 queue_id6_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id6_pri_lvl;
+	u8 queue_id6_bw_weight;
+	u8 queue_id7;
+	__le32 queue_id7_min_bw;
+	__le32 queue_id7_max_bw;
+	u8 queue_id7_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id7_pri_lvl;
+	u8 queue_id7_bw_weight;
+	u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_ALLOC_REQ_FLAGS_DEFAULT			    0x1UL
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 vnic_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 vnic_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_CFG_REQ_FLAGS_DEFAULT			    0x1UL
+	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE		    0x2UL
+	__le32 enables;
+	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP		    0x1UL
+	#define VNIC_CFG_REQ_ENABLES_RSS_RULE			    0x2UL
+	#define VNIC_CFG_REQ_ENABLES_COS_RULE			    0x4UL
+	#define VNIC_CFG_REQ_ENABLES_LB_RULE			    0x8UL
+	#define VNIC_CFG_REQ_ENABLES_MRU			    0x10UL
+	__le16 vnic_id;
+	__le16 dflt_ring_grp;
+	__le16 rss_rule;
+	__le16 cos_rule;
+	__le16 lb_rule;
+	__le16 mru;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_TPA_CFG_REQ_FLAGS_TPA			    0x1UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA		    0x2UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE		    0x4UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO			    0x8UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN		    0x10UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ       0x20UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK		    0x40UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK		    0x80UL
+	__le32 enables;
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS		    0x1UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS		    0x2UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER		    0x4UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN		    0x8UL
+	__le16 vnic_id;
+	__le16 max_agg_segs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1		   (0x0UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2		   (0x1UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4		   (0x2UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8		   (0x3UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX		   (0x1fUL << 0)
+	__le16 max_aggs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_1			   (0x0UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_2			   (0x1UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_4			   (0x2UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_8			   (0x3UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_16			   (0x4UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX			   (0x7UL << 0)
+	u8 unused_0;
+	u8 unused_1;
+	__le32 max_agg_timer;
+	__le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 hash_type;
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4		    0x1UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4		    0x2UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4		    0x4UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6		    0x8UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6		    0x10UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6		    0x20UL
+	__le32 unused_0;
+	__le64 ring_grp_tbl_addr;
+	__le64 hash_key_tbl_addr;
+	__le16 rss_ctx_idx;
+	__le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT      0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT	    0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4		    0x4UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6		    0x8UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE		    0x10UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE		    0x20UL
+	__le32 enables;
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID   0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID     0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID  0x4UL
+	__le32 vnic_id;
+	__le16 jumbo_thresh;
+	__le16 hds_offset;
+	__le16 hds_threshold;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 rss_cos_lb_ctx_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 rss_cos_lb_ctx_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID	    0x1UL
+	#define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID		    0x2UL
+	#define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID		    0x4UL
+	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID	    0x8UL
+	#define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID		    0x10UL
+	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID		    0x20UL
+	u8 ring_type;
+	#define RING_ALLOC_REQ_RING_TYPE_CMPL			   (0x0UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_TX			   (0x1UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_RX			   (0x2UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_STATUS		   (0x3UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_CMD			   (0x4UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	__le64 page_tbl_addr;
+	__le32 fbo;
+	u8 page_size;
+	u8 page_tbl_depth;
+	u8 unused_2;
+	u8 unused_3;
+	__le32 length;
+	__le16 logical_id;
+	__le16 cmpl_ring_id;
+	__le16 queue_id;
+	u8 unused_4;
+	u8 unused_5;
+	__le32 arb_grp_id;
+	__le16 input_number;
+	u8 unused_6;
+	u8 unused_7;
+	__le32 weight;
+	__le32 stat_ctx_id;
+	__le32 min_bw;
+	__le32 max_bw;
+	u8 int_mode;
+	#define RING_ALLOC_REQ_INT_MODE_LEGACY			   (0x0UL << 0)
+	#define RING_ALLOC_REQ_INT_MODE_MSI			   (0x1UL << 0)
+	#define RING_ALLOC_REQ_INT_MODE_MSIX			   (0x2UL << 0)
+	#define RING_ALLOC_REQ_INT_MODE_POLL			   (0x3UL << 0)
+	u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 ring_id;
+	__le16 logical_ring_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 ring_type;
+	#define RING_FREE_REQ_RING_TYPE_CMPL			   (0x0UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_TX			   (0x1UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_RX			   (0x2UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_STATUS			   (0x3UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_CMD			   (0x4UL << 0)
+	u8 unused_0;
+	__le16 ring_id;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 ring_id;
+	__le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 flags;
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+	__le16 num_cmpl_dma_aggr;
+	__le16 num_cmpl_dma_aggr_during_int;
+	__le16 cmpl_aggr_dma_tmr;
+	__le16 cmpl_aggr_dma_tmr_during_int;
+	__le16 int_lat_tmr_min;
+	__le16 int_lat_tmr_max;
+	__le16 num_cmpl_aggr_int;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 ring_id;
+	__le16 flags;
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+	__le16 num_cmpl_dma_aggr;
+	__le16 num_cmpl_dma_aggr_during_int;
+	__le16 cmpl_aggr_dma_tmr;
+	__le16 cmpl_aggr_dma_tmr_during_int;
+	__le16 int_lat_tmr_min;
+	__le16 int_lat_tmr_max;
+	__le16 num_cmpl_aggr_int;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 ring_type;
+	#define RING_RESET_REQ_RING_TYPE_CMPL			   (0x0UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_TX			   (0x1UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_RX			   (0x2UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_STATUS		   (0x3UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_CMD			   (0x4UL << 0)
+	u8 unused_0;
+	__le16 ring_id;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 cr;
+	__le16 rr;
+	__le16 ar;
+	__le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 ring_group_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 ring_group_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 input_number;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 arb_grp_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 arb_grp_id;
+	__le16 input_number;
+	__le16 tx_ring;
+	__le32 weight;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK		    0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP		    0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST	    0x8UL
+	__le32 enables;
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK       0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN	    0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK      0x8UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN	    0x10UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK      0x20UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR	    0x40UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK     0x80UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN	    0x100UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK    0x200UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN	    0x400UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK    0x800UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE	    0x1000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID		    0x2000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE	    0x4000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID	    0x8000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
+	u8 l2_addr[6];
+	u8 unused_0;
+	u8 unused_1;
+	u8 l2_addr_mask[6];
+	__le16 l2_ovlan;
+	__le16 l2_ovlan_mask;
+	__le16 l2_ivlan;
+	__le16 l2_ivlan_mask;
+	u8 unused_2;
+	u8 unused_3;
+	u8 t_l2_addr[6];
+	u8 unused_4;
+	u8 unused_5;
+	u8 t_l2_addr_mask[6];
+	__le16 t_l2_ovlan;
+	__le16 t_l2_ovlan_mask;
+	__le16 t_l2_ivlan;
+	__le16 t_l2_ivlan_mask;
+	u8 src_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT		   (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF		   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF		   (0x2UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC		   (0x3UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG		   (0x4UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE		   (0x5UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO		   (0x6UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG		   (0x7UL << 0)
+	u8 unused_6;
+	__le32 src_id;
+	u8 tunnel_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN	   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE	   (0x2UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE	   (0x3UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE	   (0x5UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE	   (0x8UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     (0xffUL << 0)
+	u8 unused_7;
+	__le16 dst_vnic_id;
+	__le16 mirror_vnic_id;
+	u8 pri_hint;
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER	   (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     (0x2UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX		   (0x3UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN		   (0x4UL << 0)
+	u8 unused_8;
+	__le32 unused_9;
+	__le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 l2_filter_id;
+	__le32 flow_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH		    0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP		    0x2UL
+	__le32 enables;
+	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID    0x1UL
+	__le64 l2_filter_id;
+	__le32 dst_vnic_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 dflt_vnic_id;
+	__le32 mask;
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST		    0x1UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST		    0x2UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST		    0x4UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST		    0x8UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS	    0x10UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST		    0x20UL
+	__le64 mc_tbl_addr;
+	__le32 num_mc_entries;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 dflt_vnic_id;
+	__le32 mirroring_flags;
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+	__le16 vlan_id;
+	u8 bcast_domain;
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+	u8 mcast_domain;
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
+	__le32 enables;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x2UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN       0x4UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR	    0x8UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE   0x10UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR      0x40UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x80UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI	    0x100UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x200UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+	__le64 l2_filter_id;
+	u8 l2_addr[6];
+	__le16 l2_ivlan;
+	__le32 l3_addr[4];
+	__le32 t_l3_addr[4];
+	u8 l3_addr_type;
+	u8 t_l3_addr_type;
+	u8 tunnel_type;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+	u8 unused_0;
+	__le32 vni;
+	__le32 dst_vnic_id;
+	__le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 tunnel_filter_id;
+	__le32 flow_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
+	u8 encap_type;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       (0x1UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       (0x2UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       (0x3UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP	   (0x4UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      (0x5UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS	   (0x6UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN	   (0x7UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       (0x8UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	__le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 encap_record_id;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP		    0x2UL
+	__le32 enables;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR    0x8UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE    0x10UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR     0x20UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR     0x80UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL    0x200UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT       0x400UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK  0x800UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT       0x1000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK  0x2000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT       0x4000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x10000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+	__le64 l2_filter_id;
+	u8 src_macaddr[6];
+	__be16 ethertype;
+	u8 ipaddr_type;
+	u8 ip_protocol;
+	__le16 dst_vnic_id;
+	__le16 mirror_vnic_id;
+	u8 tunnel_type;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+	u8 pri_hint;
+	__be32 src_ipaddr[4];
+	__be32 src_ipaddr_mask[4];
+	__be32 dst_ipaddr[4];
+	__be32 dst_ipaddr_mask[4];
+	__be16 src_port;
+	__be16 src_port_mask;
+	__be16 dst_port;
+	__be16 dst_port_mask;
+	__le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 ntuple_filter_id;
+	__le32 flow_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+	__le32 unused_0;
+	__le64 ntuple_filter_id;
+	__le32 new_dst_vnic_id;
+	__le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 tunnel_type;
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL   (0x0UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE       (0x2UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE       (0x3UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE       (0x8UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL   (0xffUL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 tunnel_dst_port_id;
+	__be16 tunnel_dst_port_val;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 tunnel_type;
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL   (0x0UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE       (0x2UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE       (0x3UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE       (0x8UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL   (0xffUL << 0)
+	u8 unused_0;
+	__be16 tunnel_dst_port_val;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 tunnel_dst_port_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 tunnel_type;
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL    (0x0UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN	   (0x1UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE	   (0x2UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE	   (0x3UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       (0x5UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE	   (0x8UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL    (0xffUL << 0)
+	u8 unused_0;
+	__le16 tunnel_dst_port_id;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 stats_dma_addr;
+	__le32 update_period_ms;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 stat_ctx_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 stat_ctx_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 stat_ctx_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 stat_ctx_id;
+	__le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 tx_ucast_pkts;
+	__le64 tx_mcast_pkts;
+	__le64 tx_bcast_pkts;
+	__le64 tx_err_pkts;
+	__le64 tx_drop_pkts;
+	__le64 tx_ucast_bytes;
+	__le64 tx_mcast_bytes;
+	__le64 tx_bcast_bytes;
+	__le64 rx_ucast_pkts;
+	__le64 rx_mcast_pkts;
+	__le64 rx_bcast_pkts;
+	__le64 rx_err_pkts;
+	__le64 rx_drop_pkts;
+	__le64 rx_ucast_bytes;
+	__le64 rx_mcast_bytes;
+	__le64 rx_bcast_bytes;
+	__le64 rx_agg_pkts;
+	__le64 rx_agg_bytes;
+	__le64 rx_agg_events;
+	__le64 rx_agg_aborts;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 stat_ctx_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	__le32 enables;
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS	    0x1UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN		    0x2UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN		    0x4UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID	    0x8UL
+	u8 l2_address[6];
+	u8 unused_0;
+	u8 unused_1;
+	u8 l2_address_mask[6];
+	__le16 ovlan;
+	__le16 ovlan_mask;
+	__le16 ivlan;
+	__le16 ivlan_mask;
+	u8 unused_2;
+	u8 unused_3;
+	__le32 action_id;
+	u8 action_bypass;
+	#define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS		    0x1UL
+	u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 mgmt_l2_filter_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 mgmt_l2_filter_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_src_addr;
+	__le32 dest_addr;
+	__le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_dest_addr;
+	__le16 dir_idx;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 offset;
+	__le32 len;
+	__le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_dest_addr;
+	__le32 offset;
+	__le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 entries;
+	__le32 entry_length;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_src_addr;
+	__le16 dir_type;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	__le16 dir_attr;
+	__le32 dir_data_length;
+	__le16 option;
+	__le16 flags;
+	#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG	    0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_src_addr;
+	__le16 dir_idx;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 offset;
+	__le32 len;
+	__le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID       0x1UL
+	__le16 dir_idx;
+	__le16 dir_type;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	u8 opt_ordinal;
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK	    0x3UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT		    0
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ		   (0x0UL << 0)
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE		   (0x1UL << 0)
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT		   (0x2UL << 0)
+	u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 dir_item_length;
+	__le32 dir_data_length;
+	__le32 fw_ver;
+	__le16 dir_ordinal;
+	__le16 dir_idx;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 dir_idx;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 manufacturer_id;
+	__le16 device_id;
+	__le32 sector_size;
+	__le32 nvram_size;
+	__le32 reserved_size;
+	__le32 available_size;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM		    0x1UL
+	__le16 dir_idx;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	__le16 dir_attr;
+	__le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 dir_type;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 encap_request[24];
+	__le16 encap_resp_target_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 encap_request[24];
+	__le16 encap_resp_target_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 encap_resp_target_id;
+	__le16 encap_resp_cmpl_ring;
+	__le16 encap_resp_len;
+	u8 unused_0;
+	u8 unused_1;
+	__le64 encap_resp_addr;
+	__le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 encap_async_event_target_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2[3];
+	u8 unused_3;
+	__le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 embedded_proc_type;
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP		   (0x0UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE		   (0x1UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG		   (0x2UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO		   (0x3UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG		   (0x4UL << 0)
+	u8 selfrst_status;
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST	   (0x2UL << 0)
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 selfrst_status;
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       (0x2UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 embedded_proc_type;
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP	   (0x0UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE		   (0x1UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG		   (0x2UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO		   (0x3UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG		   (0x4UL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 selfrst_status;
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     (0x2UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 temp;
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 0000000..3cf3e1b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+	BNX_DIR_TYPE_UNUSED = 0,
+	BNX_DIR_TYPE_PKG_LOG = 1,
+	BNX_DIR_TYPE_CHIMP_PATCH = 3,
+	BNX_DIR_TYPE_BOOTCODE = 4,
+	BNX_DIR_TYPE_VPD = 5,
+	BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+	BNX_DIR_TYPE_AVS = 7,
+	BNX_DIR_TYPE_PCIE = 8,
+	BNX_DIR_TYPE_PORT_MACRO = 9,
+	BNX_DIR_TYPE_APE_FW = 10,
+	BNX_DIR_TYPE_APE_PATCH = 11,
+	BNX_DIR_TYPE_KONG_FW = 12,
+	BNX_DIR_TYPE_KONG_PATCH = 13,
+	BNX_DIR_TYPE_BONO_FW = 14,
+	BNX_DIR_TYPE_BONO_PATCH = 15,
+	BNX_DIR_TYPE_TANG_FW = 16,
+	BNX_DIR_TYPE_TANG_PATCH = 17,
+	BNX_DIR_TYPE_BOOTCODE_2 = 18,
+	BNX_DIR_TYPE_CCM = 19,
+	BNX_DIR_TYPE_PCI_CFG = 20,
+	BNX_DIR_TYPE_TSCF_UCODE = 21,
+	BNX_DIR_TYPE_ISCSI_BOOT = 22,
+	BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+	BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+	BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+	BNX_DIR_TYPE_EXT_PHY = 27,
+	BNX_DIR_TYPE_SHARED_CFG = 40,
+	BNX_DIR_TYPE_PORT_CFG = 41,
+	BNX_DIR_TYPE_FUNC_CFG = 42,
+	BNX_DIR_TYPE_MGMT_CFG = 48,
+	BNX_DIR_TYPE_MGMT_DATA = 49,
+	BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+	BNX_DIR_TYPE_MGMT_WEB_META = 51,
+	BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+	BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST			0
+
+#define BNX_DIR_EXT_INACTIVE			(1 << 0)
+#define BNX_DIR_EXT_UPDATE			(1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM			(1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM		(1 << 1)
+
+#endif				/* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644
index 0000000..ea044bb
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -0,0 +1,829 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+		netdev_err(bp->dev, "vf ndo called though PF is down\n");
+		return -EINVAL;
+	}
+	if (!bp->pf.active_vfs) {
+		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+		return -EINVAL;
+	}
+	if (vf_id >= bp->pf.max_vfs) {
+		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	bool old_setting = false;
+	u32 func_flags;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	vf = &bp->pf.vf[vf_id];
+	if (vf->flags & BNXT_VF_SPOOFCHK)
+		old_setting = true;
+	if (old_setting == setting)
+		return 0;
+
+	func_flags = vf->func_flags;
+	if (setting)
+		func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+	else
+		func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+	/*TODO: if the driver supports VLAN filter on guest VLAN,
+	 * the spoof check should also include vlan anti-spoofing
+	 */
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(func_flags);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		vf->func_flags = func_flags;
+		if (setting)
+			vf->flags |= BNXT_VF_SPOOFCHK;
+		else
+			vf->flags &= ~BNXT_VF_SPOOFCHK;
+	}
+	return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+		       struct ifla_vf_info *ivi)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	ivi->vf = vf_id;
+	vf = &bp->pf.vf[vf_id];
+
+	memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+	ivi->max_tx_rate = vf->max_tx_rate;
+	ivi->min_tx_rate = vf->min_tx_rate;
+	ivi->vlan = vf->vlan;
+	ivi->qos = vf->flags & BNXT_VF_QOS;
+	ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+	if (!(vf->flags & BNXT_VF_LINK_FORCED))
+		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+	else if (vf->flags & BNXT_VF_LINK_UP)
+		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+	else
+		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+	return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+	/* reject bc or mc mac addr, zero mac addr means allow
+	 * VF to use its own mac addr
+	 */
+	if (is_multicast_ether_addr(mac)) {
+		netdev_err(dev, "Invalid VF ethernet address\n");
+		return -EINVAL;
+	}
+	vf = &bp->pf.vf[vf_id];
+
+	memcpy(vf->mac_addr, mac, ETH_ALEN);
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(vf->func_flags);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	u16 vlan_tag;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	/* TODO: needed to implement proper handling of user priority,
+	 * currently fail the command if there is valid priority
+	 */
+	if (vlan_id > 4095 || qos)
+		return -EINVAL;
+
+	vf = &bp->pf.vf[vf_id];
+	vlan_tag = vlan_id;
+	if (vlan_tag == vf->vlan)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(vf->func_flags);
+	req.dflt_vlan = cpu_to_le16(vlan_tag);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		vf->vlan = vlan_tag;
+	return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+		   int max_tx_rate)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	u32 pf_link_speed;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	vf = &bp->pf.vf[vf_id];
+	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+	if (max_tx_rate > pf_link_speed) {
+		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+			    max_tx_rate, vf_id);
+		return -EINVAL;
+	}
+
+	if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+			    min_tx_rate, vf_id);
+		return -EINVAL;
+	}
+	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+		return 0;
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(vf->func_flags);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+	req.max_bw = cpu_to_le32(max_tx_rate);
+	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+	req.min_bw = cpu_to_le32(min_tx_rate);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		vf->min_tx_rate = min_tx_rate;
+		vf->max_tx_rate = max_tx_rate;
+	}
+	return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	vf = &bp->pf.vf[vf_id];
+
+	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+	switch (link) {
+	case IFLA_VF_LINK_STATE_AUTO:
+		vf->flags |= BNXT_VF_LINK_UP;
+		break;
+	case IFLA_VF_LINK_STATE_DISABLE:
+		vf->flags |= BNXT_VF_LINK_FORCED;
+		break;
+	case IFLA_VF_LINK_STATE_ENABLE:
+		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+		break;
+	default:
+		netdev_err(bp->dev, "Invalid link option\n");
+		rc = -EINVAL;
+		break;
+	}
+	/* CHIMP TODO: send msg to VF to update new link state */
+
+	return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+	int i;
+	struct bnxt_vf_info *vf;
+
+	for (i = 0; i < num_vfs; i++) {
+		vf = &bp->pf.vf[i];
+		memset(vf, 0, sizeof(*vf));
+		vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+	}
+	return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
+{
+	int i, rc = 0;
+	struct bnxt_pf_info *pf = &bp->pf;
+	struct hwrm_func_vf_resc_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
+		req.vf_id = cpu_to_le16(i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+	struct pci_dev *pdev = bp->pdev;
+	int i;
+
+	kfree(bp->pf.vf_event_bmap);
+	bp->pf.vf_event_bmap = NULL;
+
+	for (i = 0; i < 4; i++) {
+		if (bp->pf.hwrm_cmd_req_addr[i]) {
+			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+					  bp->pf.hwrm_cmd_req_addr[i],
+					  bp->pf.hwrm_cmd_req_dma_addr[i]);
+			bp->pf.hwrm_cmd_req_addr[i] = NULL;
+		}
+	}
+
+	kfree(bp->pf.vf);
+	bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+	struct pci_dev *pdev = bp->pdev;
+	u32 nr_pages, size, i, j, k = 0;
+
+	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+	if (!bp->pf.vf)
+		return -ENOMEM;
+
+	bnxt_set_vf_attr(bp, num_vfs);
+
+	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+	nr_pages = size / BNXT_PAGE_SIZE;
+	if (size & (BNXT_PAGE_SIZE - 1))
+		nr_pages++;
+
+	for (i = 0; i < nr_pages; i++) {
+		bp->pf.hwrm_cmd_req_addr[i] =
+			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+					   &bp->pf.hwrm_cmd_req_dma_addr[i],
+					   GFP_KERNEL);
+
+		if (!bp->pf.hwrm_cmd_req_addr[i])
+			return -ENOMEM;
+
+		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+			struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+						j * BNXT_HWRM_REQ_MAX_SIZE;
+			vf->hwrm_cmd_req_dma_addr =
+				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+				BNXT_HWRM_REQ_MAX_SIZE;
+			k++;
+		}
+	}
+
+	/* Max 128 VF's */
+	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+	if (!bp->pf.vf_event_bmap)
+		return -ENOMEM;
+
+	bp->pf.hwrm_cmd_req_pages = nr_pages;
+	return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+	struct hwrm_func_buf_rgtr_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+	u32 rc = 0, mtu, i;
+	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt_pf_info *pf = &bp->pf;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+	/* Remaining rings are distributed equally amongs VF's for now */
+	/* TODO: the following workaroud is needed to restrict total number
+	 * of vf_cp_rings not exceed number of HW ring groups. This WA should
+	 * be removed once new HWRM provides HW ring groups capability in
+	 * hwrm_func_qcap.
+	 */
+	vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+	vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+	/* TODO: restore this logic below once the WA above is removed */
+	/* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+	vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+			      *num_vfs;
+	else
+		vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+			      *num_vfs;
+	vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+				  FUNC_CFG_REQ_ENABLES_MRU |
+				  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+				  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+				  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+				  FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+	mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+	req.mru = cpu_to_le16(mtu);
+	req.mtu = cpu_to_le16(mtu);
+
+	req.num_rsscos_ctxs = cpu_to_le16(1);
+	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+	req.num_l2_ctxs = cpu_to_le16(4);
+	vf_vnics = 1;
+
+	req.num_vnics = cpu_to_le16(vf_vnics);
+	/* FIXME spec currently uses 1 bit for stats ctx */
+	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < *num_vfs; i++) {
+		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+		bp->pf.active_vfs = i + 1;
+		bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	if (!rc) {
+		bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+		if (bp->flags & BNXT_FLAG_AGG_RINGS)
+			bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+		else
+			bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+	}
+	return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+	int rc = 0, vfs_supported;
+	int min_rx_rings, min_tx_rings, min_rss_ctxs;
+	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+	/* Check if we can enable requested num of vf's. At a mininum
+	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
+	 * features like TPA will not be available.
+	 */
+	vfs_supported = *num_vfs;
+
+	while (vfs_supported) {
+		min_rx_rings = vfs_supported;
+		min_tx_rings = vfs_supported;
+		min_rss_ctxs = vfs_supported;
+
+		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+			if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+			    min_rx_rings)
+				rx_ok = 1;
+		} else {
+			if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+			    min_rx_rings)
+				rx_ok = 1;
+		}
+
+		if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+			tx_ok = 1;
+
+		if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+			rss_ok = 1;
+
+		if (tx_ok && rx_ok && rss_ok)
+			break;
+
+		vfs_supported--;
+	}
+
+	if (!vfs_supported) {
+		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+		return -EINVAL;
+	}
+
+	if (vfs_supported != *num_vfs) {
+		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+			    *num_vfs, vfs_supported);
+		*num_vfs = vfs_supported;
+	}
+
+	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+	if (rc)
+		goto err_out1;
+
+	/* Reserve resources for VFs */
+	rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+	if (rc)
+		goto err_out2;
+
+	/* Register buffers for VFs */
+	rc = bnxt_hwrm_func_buf_rgtr(bp);
+	if (rc)
+		goto err_out2;
+
+	rc = pci_enable_sriov(bp->pdev, *num_vfs);
+	if (rc)
+		goto err_out2;
+
+	return 0;
+
+err_out2:
+	/* Free the resources reserved for various VF's */
+	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+
+err_out1:
+	bnxt_free_vf_resources(bp);
+
+	return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+	u16 num_vfs = pci_num_vf(bp->pdev);
+
+	if (!num_vfs)
+		return;
+
+	if (pci_vfs_assigned(bp->pdev)) {
+		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
+			    num_vfs);
+	} else {
+		pci_disable_sriov(bp->pdev);
+		/* Free the HW resources reserved for various VF's */
+		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
+	}
+
+	bnxt_free_vf_resources(bp);
+
+	bp->pf.active_vfs = 0;
+	bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+	bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+		return 0;
+	}
+
+	rtnl_lock();
+	if (!netif_running(dev)) {
+		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+		rtnl_unlock();
+		return 0;
+	}
+	bp->sriov_cfg = true;
+	rtnl_unlock();
+
+	if (pci_vfs_assigned(bp->pdev)) {
+		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
+		num_vfs = 0;
+		goto sriov_cfg_exit;
+	}
+
+	/* Check if enabled VFs is same as requested */
+	if (num_vfs && num_vfs == bp->pf.active_vfs)
+		goto sriov_cfg_exit;
+
+	/* if there are previous existing VFs, clean them up */
+	bnxt_sriov_disable(bp);
+	if (!num_vfs)
+		goto sriov_cfg_exit;
+
+	bnxt_sriov_enable(bp, &num_vfs);
+
+sriov_cfg_exit:
+	bp->sriov_cfg = false;
+	wake_up(&bp->sriov_cfg_wait);
+
+	return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+			      void *encap_resp, __le64 encap_resp_addr,
+			      __le16 encap_resp_cpr, u32 msg_size)
+{
+	int rc = 0;
+	struct hwrm_fwd_resp_input req = {0};
+	struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+	/* Set the new target id */
+	req.target_id = cpu_to_le16(vf->fw_fid);
+	req.encap_resp_len = cpu_to_le16(msg_size);
+	req.encap_resp_addr = encap_resp_addr;
+	req.encap_resp_cmpl_ring = encap_resp_cpr;
+	memcpy(req.encap_resp, encap_resp, msg_size);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+		goto fwd_resp_exit;
+	}
+
+	if (resp->error_code) {
+		netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+			   resp->error_code);
+		rc = -1;
+	}
+
+fwd_resp_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+				  u32 msg_size)
+{
+	int rc = 0;
+	struct hwrm_reject_fwd_resp_input req = {0};
+	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+	/* Set the new target id */
+	req.target_id = cpu_to_le16(vf->fw_fid);
+	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+		goto fwd_err_resp_exit;
+	}
+
+	if (resp->error_code) {
+		netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+			   resp->error_code);
+		rc = -1;
+	}
+
+fwd_err_resp_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+				   u32 msg_size)
+{
+	int rc = 0;
+	struct hwrm_exec_fwd_resp_input req = {0};
+	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+	/* Set the new target id */
+	req.target_id = cpu_to_le16(vf->fw_fid);
+	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+		goto exec_fwd_resp_exit;
+	}
+
+	if (resp->error_code) {
+		netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+			   resp->error_code);
+		rc = -1;
+	}
+
+exec_fwd_resp_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+	struct hwrm_cfa_l2_filter_alloc_input *req =
+		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+	if (!is_valid_ether_addr(vf->mac_addr) ||
+	    ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+	else
+		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	int rc = 0;
+
+	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+		/* real link */
+		rc = bnxt_hwrm_exec_fwd_resp(
+			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+	} else {
+		struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+		phy_qcfg_req =
+		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+		mutex_lock(&bp->hwrm_cmd_lock);
+		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+		       sizeof(phy_qcfg_resp));
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+		if (vf->flags & BNXT_VF_LINK_UP) {
+			/* if physical link is down, force link up on VF */
+			if (phy_qcfg_resp.link ==
+			    PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+				phy_qcfg_resp.link =
+					PORT_PHY_QCFG_RESP_LINK_LINK;
+				if (phy_qcfg_resp.auto_link_speed)
+					phy_qcfg_resp.link_speed =
+						phy_qcfg_resp.auto_link_speed;
+				else
+					phy_qcfg_resp.link_speed =
+						phy_qcfg_resp.force_link_speed;
+				phy_qcfg_resp.duplex =
+					PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+				phy_qcfg_resp.pause =
+					(PORT_PHY_QCFG_RESP_PAUSE_TX |
+					 PORT_PHY_QCFG_RESP_PAUSE_RX);
+			}
+		} else {
+			/* force link down */
+			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+			phy_qcfg_resp.link_speed = 0;
+			phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+			phy_qcfg_resp.pause = 0;
+		}
+		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+					phy_qcfg_req->resp_addr,
+					phy_qcfg_req->cmpl_ring,
+					sizeof(phy_qcfg_resp));
+	}
+	return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	int rc = 0;
+	struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+	u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+	switch (req_type) {
+	case HWRM_CFA_L2_FILTER_ALLOC:
+		rc = bnxt_vf_validate_set_mac(bp, vf);
+		break;
+	case HWRM_FUNC_CFG:
+		/* TODO Validate if VF is allowed to change mac address,
+		 * mtu, num of rings etc
+		 */
+		rc = bnxt_hwrm_exec_fwd_resp(
+			bp, vf, sizeof(struct hwrm_func_cfg_input));
+		break;
+	case HWRM_PORT_PHY_QCFG:
+		rc = bnxt_vf_set_link(bp, vf);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+	/* Scan through VF's and process commands */
+	while (1) {
+		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+		if (vf_id >= active_vfs)
+			break;
+
+		clear_bit(vf_id, bp->pf.vf_event_bmap);
+		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+		i = vf_id + 1;
+	}
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+	struct hwrm_func_qcaps_input req = {0};
+	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+		goto update_vf_mac_exit;
+
+	if (!is_valid_ether_addr(resp->perm_mac_address))
+		goto update_vf_mac_exit;
+
+	if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+		memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+	/* overwrite netdev dev_adr with admin VF MAC */
+	memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644
index 0000000..c151280
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
new file mode 100644
index 0000000..b69dc58
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -0,0 +1,5789 @@
+/* cnic.c: QLogic CNIC core network driver.
+ *
+ * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
+ * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
+#include <scsi/iscsi_if.h>
+
+#define BCM_CNIC	1
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "bnx2x/bnx2x.h"
+#include "bnx2x/bnx2x_reg.h"
+#include "bnx2x/bnx2x_fw_defs.h"
+#include "bnx2x/bnx2x_hsi.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
+#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define CNIC_MODULE_NAME	"cnic"
+
+static char version[] =
+	"QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+	      "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("QLogic cnic Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
+static LIST_HEAD(cnic_dev_list);
+static LIST_HEAD(cnic_udev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+	return rcu_dereference_protected(cnic_ulp_tbl[type],
+					 lockdep_is_held(&cnic_lock));
+}
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_service_bnx2x(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static struct cnic_ops cnic_bnx2x_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2x,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static struct workqueue_struct *cnic_wq;
+
+static void cnic_shutdown_rings(struct cnic_dev *);
+static void cnic_init_rings(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_uio_dev *udev = uinfo->priv;
+	struct cnic_dev *dev;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (udev->uio_dev != -1)
+		return -EBUSY;
+
+	rtnl_lock();
+	dev = udev->dev;
+
+	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		rtnl_unlock();
+		return -ENODEV;
+	}
+
+	udev->uio_dev = iminor(inode);
+
+	cnic_shutdown_rings(dev);
+	cnic_init_rings(dev);
+	rtnl_unlock();
+
+	return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_uio_dev *udev = uinfo->priv;
+
+	udev->uio_dev = -1;
+	return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+	atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+	atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+	atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+	atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+	struct cnic_dev *cdev;
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(cdev, &cnic_dev_list, list) {
+		if (netdev == cdev->netdev) {
+			cnic_hold(cdev);
+			read_unlock(&cnic_dev_lock);
+			return cdev;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+	return NULL;
+}
+
+static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
+{
+	atomic_inc(&ulp_ops->ref_count);
+}
+
+static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
+{
+	atomic_dec(&ulp_ops->ref_count);
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	info.cmd = DRV_CTL_CTX_WR_CMD;
+	io->cid_addr = cid_addr;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
+	io->offset = off;
+	io->dma_addr = addr;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_l2_ring *ring = &info.data.ring;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	if (start)
+		info.cmd = DRV_CTL_START_L2_CMD;
+	else
+		info.cmd = DRV_CTL_STOP_L2_CMD;
+
+	ring->cid = cid;
+	ring->client_id = cl_id;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	info.cmd = DRV_CTL_IO_WR_CMD;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	info.cmd = DRV_CTL_IO_RD_CMD;
+	io->offset = off;
+	ethdev->drv_ctl(dev->netdev, &info);
+	return io->data;
+}
+
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct fcoe_capabilities *fcoe_cap =
+		&info.data.register_data.fcoe_features;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	if (reg) {
+		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
+			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
+	} else {
+		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+	}
+
+	info.data.ulp_type = ulp_type;
+	info.drv_state = state;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+	return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+
+	memset(&info, 0, sizeof(struct drv_ctl_info));
+	info.cmd = cmd;
+	info.data.credit.credit_count = count;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
+{
+	u32 i;
+
+	if (!cp->ctx_tbl)
+		return -EINVAL;
+
+	for (i = 0; i < cp->max_cid_space; i++) {
+		if (cp->ctx_tbl[i].cid == cid) {
+			*l5_cid = i;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+			   struct cnic_sock *csk)
+{
+	struct iscsi_path path_req;
+	char *buf = NULL;
+	u16 len = 0;
+	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+	struct cnic_ulp_ops *ulp_ops;
+	struct cnic_uio_dev *udev = cp->udev;
+	int rc = 0, retry = 0;
+
+	if (!udev || udev->uio_dev == -1)
+		return -ENODEV;
+
+	if (csk) {
+		len = sizeof(path_req);
+		buf = (char *) &path_req;
+		memset(&path_req, 0, len);
+
+		msg_type = ISCSI_KEVENT_PATH_REQ;
+		path_req.handle = (u64) csk->l5_cid;
+		if (test_bit(SK_F_IPV6, &csk->flags)) {
+			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+			       sizeof(struct in6_addr));
+			path_req.ip_addr_len = 16;
+		} else {
+			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+			       sizeof(struct in_addr));
+			path_req.ip_addr_len = 4;
+		}
+		path_req.vlan_id = csk->vlan_id;
+		path_req.pmtu = csk->mtu;
+	}
+
+	while (retry < 3) {
+		rc = 0;
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
+		if (ulp_ops)
+			rc = ulp_ops->iscsi_nl_send_msg(
+				cp->ulp_handle[CNIC_ULP_ISCSI],
+				msg_type, buf, len);
+		rcu_read_unlock();
+		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
+			break;
+
+		msleep(100);
+		retry++;
+	}
+	return rc;
+}
+
+static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+				  char *buf, u16 len)
+{
+	int rc = -EINVAL;
+
+	switch (msg_type) {
+	case ISCSI_UEVENT_PATH_UPDATE: {
+		struct cnic_local *cp;
+		u32 l5_cid;
+		struct cnic_sock *csk;
+		struct iscsi_path *path_resp;
+
+		if (len < sizeof(*path_resp))
+			break;
+
+		path_resp = (struct iscsi_path *) buf;
+		cp = dev->cnic_priv;
+		l5_cid = (u32) path_resp->handle;
+		if (l5_cid >= MAX_CM_SK_TBL_SZ)
+			break;
+
+		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
+			rc = -ENODEV;
+			break;
+		}
+		csk = &cp->csk_tbl[l5_cid];
+		csk_hold(csk);
+		if (cnic_in_use(csk) &&
+		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
+
+			csk->vlan_id = path_resp->vlan_id;
+
+			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
+			if (test_bit(SK_F_IPV6, &csk->flags))
+				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+				       sizeof(struct in6_addr));
+			else
+				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+				       sizeof(struct in_addr));
+
+			if (is_valid_ether_addr(csk->ha)) {
+				cnic_cm_set_pg(csk);
+			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
+				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+
+				cnic_cm_upcall(cp, csk,
+					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+				clear_bit(SK_F_CONNECT_START, &csk->flags);
+			}
+		}
+		csk_put(csk);
+		rc = 0;
+	}
+	}
+
+	return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		return 0;
+
+	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_atomic();
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+			msleep(1);
+
+		return 1;
+	}
+	return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_atomic();
+
+	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		msleep(1);
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		return 1;
+	}
+
+	return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl_prot(ulp_type)) {
+		pr_err("%s: Type %d has already been registered\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+	}
+	read_unlock(&cnic_dev_lock);
+
+	atomic_set(&ulp_ops->ref_count, 0);
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+	mutex_unlock(&cnic_lock);
+
+	/* Prevent race conditions with netdev_event */
+	rtnl_lock();
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_init(dev);
+	}
+	rtnl_unlock();
+
+	return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+	struct cnic_dev *dev;
+	struct cnic_ulp_ops *ulp_ops;
+	int i = 0;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+	if (!ulp_ops) {
+		pr_err("%s: Type %d has not been registered\n",
+		       __func__, ulp_type);
+		goto out_unlock;
+	}
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
+			pr_err("%s: Type %d still has devices registered\n",
+			       __func__, ulp_type);
+			read_unlock(&cnic_dev_lock);
+			goto out_unlock;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+
+	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
+
+	mutex_unlock(&cnic_lock);
+	synchronize_rcu();
+	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
+		msleep(100);
+		i++;
+	}
+
+	if (atomic_read(&ulp_ops->ref_count) != 0)
+		pr_warn("%s: Failed waiting for ref count to go to zero\n",
+			__func__);
+	return 0;
+
+out_unlock:
+	mutex_unlock(&cnic_lock);
+	return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+				void *ulp_ctx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
+		pr_err("%s: Driver with type %d has not been registered\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EAGAIN;
+	}
+	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
+		pr_err("%s: Type %d has already been registered to this device\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+	cp->ulp_handle[ulp_type] = ulp_ctx;
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+	cnic_hold(dev);
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+	mutex_unlock(&cnic_lock);
+
+	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i = 0;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+
+	if (ulp_type == CNIC_ULP_ISCSI)
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
+	mutex_lock(&cnic_lock);
+	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
+		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
+		cnic_put(dev);
+	} else {
+		pr_err("%s: device not registered to this ulp type %d\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&cnic_lock);
+
+	if (ulp_type == CNIC_ULP_FCOE)
+		dev->fcoe_cap = NULL;
+
+	synchronize_rcu();
+
+	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
+	       i < 20) {
+		msleep(100);
+		i++;
+	}
+	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
+		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+
+	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+	else
+		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
+			    u32 next)
+{
+	id_tbl->start = start_id;
+	id_tbl->max = size;
+	id_tbl->next = next;
+	spin_lock_init(&id_tbl->lock);
+	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+	if (!id_tbl->table)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+	kfree(id_tbl->table);
+	id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	int ret = -1;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return ret;
+
+	spin_lock(&id_tbl->lock);
+	if (!test_bit(id, id_tbl->table)) {
+		set_bit(id, id_tbl->table);
+		ret = 0;
+	}
+	spin_unlock(&id_tbl->lock);
+	return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+	u32 id;
+
+	spin_lock(&id_tbl->lock);
+	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+	if (id >= id_tbl->max) {
+		id = -1;
+		if (id_tbl->next != 0) {
+			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+			if (id >= id_tbl->next)
+				id = -1;
+		}
+	}
+
+	if (id < id_tbl->max) {
+		set_bit(id, id_tbl->table);
+		id_tbl->next = (id + 1) & (id_tbl->max - 1);
+		id += id_tbl->start;
+	}
+
+	spin_unlock(&id_tbl->lock);
+
+	return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	if (id == -1)
+		return;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return;
+
+	clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+
+	if (!dma->pg_arr)
+		return;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		if (dma->pg_arr[i]) {
+			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
+					  dma->pg_arr[i], dma->pg_map_arr[i]);
+			dma->pg_arr[i] = NULL;
+		}
+	}
+	if (dma->pgtbl) {
+		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+				  dma->pgtbl, dma->pgtbl_map);
+		dma->pgtbl = NULL;
+	}
+	kfree(dma->pg_arr);
+	dma->pg_arr = NULL;
+	dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in big endian format. */
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
+		page_table++;
+	}
+}
+
+static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in little endian format. */
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
+		page_table++;
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+	}
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+			  int pages, int use_pg_tbl)
+{
+	int i, size;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+	if (dma->pg_arr == NULL)
+		return -ENOMEM;
+
+	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+	dma->num_pages = pages;
+
+	for (i = 0; i < pages; i++) {
+		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
+						    CNIC_PAGE_SIZE,
+						    &dma->pg_map_arr[i],
+						    GFP_ATOMIC);
+		if (dma->pg_arr[i] == NULL)
+			goto error;
+	}
+	if (!use_pg_tbl)
+		return 0;
+
+	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
+			  ~(CNIC_PAGE_SIZE - 1);
+	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+					&dma->pgtbl_map, GFP_ATOMIC);
+	if (dma->pgtbl == NULL)
+		goto error;
+
+	cp->setup_pgtbl(dev, dma);
+
+	return 0;
+
+error:
+	cnic_free_dma(dev, dma);
+	return -ENOMEM;
+}
+
+static void cnic_free_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		if (cp->ctx_arr[i].ctx) {
+			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+					  cp->ctx_arr[i].ctx,
+					  cp->ctx_arr[i].mapping);
+			cp->ctx_arr[i].ctx = NULL;
+		}
+	}
+}
+
+static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
+{
+	if (udev->l2_buf) {
+		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
+				  udev->l2_buf, udev->l2_buf_map);
+		udev->l2_buf = NULL;
+	}
+
+	if (udev->l2_ring) {
+		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+				  udev->l2_ring, udev->l2_ring_map);
+		udev->l2_ring = NULL;
+	}
+
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+	uio_unregister_device(&udev->cnic_uinfo);
+
+	__cnic_free_uio_rings(udev);
+
+	pci_dev_put(udev->pdev);
+	kfree(udev);
+}
+
+static void cnic_free_uio(struct cnic_uio_dev *udev)
+{
+	if (!udev)
+		return;
+
+	write_lock(&cnic_dev_lock);
+	list_del_init(&udev->list);
+	write_unlock(&cnic_dev_lock);
+	__cnic_free_uio(udev);
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+
+	if (udev) {
+		udev->dev = NULL;
+		cp->udev = NULL;
+		if (udev->uio_dev == -1)
+			__cnic_free_uio_rings(udev);
+	}
+
+	cnic_free_context(dev);
+	kfree(cp->ctx_arr);
+	cp->ctx_arr = NULL;
+	cp->ctx_blks = 0;
+
+	cnic_free_dma(dev, &cp->gbl_buf_info);
+	cnic_free_dma(dev, &cp->kwq_info);
+	cnic_free_dma(dev, &cp->kwq_16_data_info);
+	cnic_free_dma(dev, &cp->kcq2.dma);
+	cnic_free_dma(dev, &cp->kcq1.dma);
+	kfree(cp->iscsi_tbl);
+	cp->iscsi_tbl = NULL;
+	kfree(cp->ctx_tbl);
+	cp->ctx_tbl = NULL;
+
+	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
+	cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
+		int i, k, arr_size;
+
+		cp->ctx_blk_size = CNIC_PAGE_SIZE;
+		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
+		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+			   sizeof(struct cnic_ctx);
+		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+		if (cp->ctx_arr == NULL)
+			return -ENOMEM;
+
+		k = 0;
+		for (i = 0; i < 2; i++) {
+			u32 j, reg, off, lo, hi;
+
+			if (i == 0)
+				off = BNX2_PG_CTX_MAP;
+			else
+				off = BNX2_ISCSI_CTX_MAP;
+
+			reg = cnic_reg_rd_ind(dev, off);
+			lo = reg >> 16;
+			hi = reg & 0xffff;
+			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+				cp->ctx_arr[k].cid = j;
+		}
+
+		cp->ctx_blks = k;
+		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+			cp->ctx_blks = 0;
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < cp->ctx_blks; i++) {
+			cp->ctx_arr[i].ctx =
+				dma_alloc_coherent(&dev->pcidev->dev,
+						   CNIC_PAGE_SIZE,
+						   &cp->ctx_arr[i].mapping,
+						   GFP_KERNEL);
+			if (cp->ctx_arr[i].ctx == NULL)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+	return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+	return idx;
+}
+
+static u16 cnic_bnx2x_next_idx(u16 idx)
+{
+	idx++;
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+
+	return idx;
+}
+
+static u16 cnic_bnx2x_hw_idx(u16 idx)
+{
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+	return idx;
+}
+
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
+			  bool use_pg_tbl)
+{
+	int err, i, use_page_tbl = 0;
+	struct kcqe **kcq;
+
+	if (use_pg_tbl)
+		use_page_tbl = 1;
+
+	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
+	if (err)
+		return err;
+
+	kcq = (struct kcqe **) info->dma.pg_arr;
+	info->kcq = kcq;
+
+	info->next_idx = cnic_bnx2_next_idx;
+	info->hw_idx = cnic_bnx2_hw_idx;
+	if (use_pg_tbl)
+		return 0;
+
+	info->next_idx = cnic_bnx2x_next_idx;
+	info->hw_idx = cnic_bnx2x_hw_idx;
+
+	for (i = 0; i < KCQ_PAGE_CNT; i++) {
+		struct bnx2x_bd_chain_next *next =
+			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
+		int j = i + 1;
+
+		if (j >= KCQ_PAGE_CNT)
+			j = 0;
+		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
+		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
+	}
+	return 0;
+}
+
+static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
+{
+	struct cnic_local *cp = udev->dev->cnic_priv;
+
+	if (udev->l2_ring)
+		return 0;
+
+	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
+	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+					   &udev->l2_ring_map,
+					   GFP_KERNEL | __GFP_COMP);
+	if (!udev->l2_ring)
+		return -ENOMEM;
+
+	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
+	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+					  &udev->l2_buf_map,
+					  GFP_KERNEL | __GFP_COMP);
+	if (!udev->l2_buf) {
+		__cnic_free_uio_rings(udev);
+		return -ENOMEM;
+	}
+
+	return 0;
+
+}
+
+static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev;
+
+	list_for_each_entry(udev, &cnic_udev_list, list) {
+		if (udev->pdev == dev->pcidev) {
+			udev->dev = dev;
+			if (__cnic_alloc_uio_rings(udev, pages)) {
+				udev->dev = NULL;
+				return -ENOMEM;
+			}
+			cp->udev = udev;
+			return 0;
+		}
+	}
+
+	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
+	if (!udev)
+		return -ENOMEM;
+
+	udev->uio_dev = -1;
+
+	udev->dev = dev;
+	udev->pdev = dev->pcidev;
+
+	if (__cnic_alloc_uio_rings(udev, pages))
+		goto err_udev;
+
+	list_add(&udev->list, &cnic_udev_list);
+
+	pci_dev_get(udev->pdev);
+
+	cp->udev = udev;
+
+	return 0;
+
+ err_udev:
+	kfree(udev);
+	return -ENOMEM;
+}
+
+static int cnic_init_uio(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+	struct uio_info *uinfo;
+	int ret = 0;
+
+	if (!udev)
+		return -ENOMEM;
+
+	uinfo = &udev->cnic_uinfo;
+
+	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
+	uinfo->mem[0].internal_addr = dev->regview;
+	uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+						     TX_MAX_TSS_RINGS + 1);
+		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
+					CNIC_PAGE_MASK;
+		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+		else
+			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+
+		uinfo->name = "bnx2_cnic";
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
+		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
+			CNIC_PAGE_MASK;
+		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+
+		uinfo->name = "bnx2x_cnic";
+	}
+
+	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
+	uinfo->mem[2].size = udev->l2_ring_size;
+	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
+	uinfo->mem[3].size = udev->l2_buf_size;
+	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->version = CNIC_MODULE_VERSION;
+	uinfo->irq = UIO_IRQ_CUSTOM;
+
+	uinfo->open = cnic_uio_open;
+	uinfo->release = cnic_uio_close;
+
+	if (udev->uio_dev == -1) {
+		if (!uinfo->priv) {
+			uinfo->priv = udev;
+
+			ret = uio_register_device(&udev->pdev->dev, uinfo);
+		}
+	} else {
+		cnic_init_rings(dev);
+	}
+
+	return ret;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret;
+
+	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_context(dev);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_uio_rings(dev, 2);
+	if (ret)
+		goto error;
+
+	ret = cnic_init_uio(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return ret;
+}
+
+static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int ctx_blk_size = cp->ethdev->ctx_blk_size;
+	int total_mem, blks, i;
+
+	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
+	blks = total_mem / ctx_blk_size;
+	if (total_mem % ctx_blk_size)
+		blks++;
+
+	if (blks > cp->ethdev->ctx_tbl_len)
+		return -ENOMEM;
+
+	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
+	if (cp->ctx_arr == NULL)
+		return -ENOMEM;
+
+	cp->ctx_blks = blks;
+	cp->ctx_blk_size = ctx_blk_size;
+	if (!CHIP_IS_E1(bp))
+		cp->ctx_align = 0;
+	else
+		cp->ctx_align = ctx_blk_size;
+
+	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
+
+	for (i = 0; i < blks; i++) {
+		cp->ctx_arr[i].ctx =
+			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+					   &cp->ctx_arr[i].mapping,
+					   GFP_KERNEL);
+		if (cp->ctx_arr[i].ctx == NULL)
+			return -ENOMEM;
+
+		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
+			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
+				cnic_free_context(dev);
+				cp->ctx_blk_size += cp->ctx_align;
+				i = -1;
+				continue;
+			}
+		}
+	}
+	return 0;
+}
+
+static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 start_cid = ethdev->starting_cid;
+	int i, j, n, ret, pages;
+	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
+
+	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
+	cp->iscsi_start_cid = start_cid;
+	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
+		cp->max_cid_space += dev->max_fcoe_conn;
+		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
+		if (!cp->fcoe_init_cid)
+			cp->fcoe_init_cid = 0x10;
+	}
+
+	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
+				GFP_KERNEL);
+	if (!cp->iscsi_tbl)
+		goto error;
+
+	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
+				cp->max_cid_space, GFP_KERNEL);
+	if (!cp->ctx_tbl)
+		goto error;
+
+	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
+		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
+	}
+
+	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
+		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
+
+	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+		CNIC_PAGE_SIZE;
+
+	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
+	if (ret)
+		return -ENOMEM;
+
+	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
+		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
+
+		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
+		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
+						   off;
+
+		if ((i % n) == (n - 1))
+			j++;
+	}
+
+	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
+	if (ret)
+		goto error;
+
+	if (CNIC_SUPPORTS_FCOE(bp)) {
+		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
+		if (ret)
+			goto error;
+	}
+
+	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_bnx2x_context(dev);
+	if (ret)
+		goto error;
+
+	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+		return 0;
+
+	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+
+	cp->l2_rx_ring_size = 15;
+
+	ret = cnic_alloc_uio_rings(dev, 4);
+	if (ret)
+		goto error;
+
+	ret = cnic_init_uio(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return -ENOMEM;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+	return cp->max_kwq_idx -
+		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				  u32 num_wqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kwqe *prod_qe;
+	u16 prod, sw_prod, i;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	if (num_wqes > cnic_kwq_avail(cp) &&
+	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
+		spin_unlock_bh(&cp->cnic_ulp_lock);
+		return -EAGAIN;
+	}
+
+	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
+
+	prod = cp->kwq_prod_idx;
+	sw_prod = prod & MAX_KWQ_IDX;
+	for (i = 0; i < num_wqes; i++) {
+		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+		prod++;
+		sw_prod = prod & MAX_KWQ_IDX;
+	}
+	cp->kwq_prod_idx = prod;
+
+	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+	return 0;
+}
+
+static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
+				   union l5cm_specific_data *l5_data)
+{
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	dma_addr_t map;
+
+	map = ctx->kwqe_data_mapping;
+	l5_data->phy_address.lo = (u64) map & 0xffffffff;
+	l5_data->phy_address.hi = (u64) map >> 32;
+	return ctx->kwqe_data;
+}
+
+static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
+				u32 type, union l5cm_specific_data *l5_data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct l5cm_spe kwqe;
+	struct kwqe_16 *kwq[1];
+	u16 type_16;
+	int ret;
+
+	kwqe.hdr.conn_and_cmd_data =
+		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
+			     BNX2X_HW_CID(bp, cid)));
+
+	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+	type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+		   SPE_HDR_FUNCTION_ID;
+
+	kwqe.hdr.type = cpu_to_le16(type_16);
+	kwqe.hdr.reserved1 = 0;
+	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
+	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
+
+	kwq[0] = (struct kwqe_16 *) &kwqe;
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+
+	if (ret == 1)
+		return 0;
+
+	return ret;
+}
+
+static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
+				   struct kcqe *cqes[], u32 num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (likely(ulp_ops)) {
+		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+					  cqes, num_cqes);
+	}
+	rcu_read_unlock();
+}
+
+static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
+				       int en_tcp_dack)
+{
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+	u16 tstorm_flags = 0;
+
+	if (time_stamps) {
+		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+	}
+	if (en_tcp_dack)
+		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
+
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
+}
+
+static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
+	int hq_bds, pages;
+	u32 pfid = bp->pfid;
+
+	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
+	cp->num_ccells = req1->num_ccells_per_conn;
+	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
+			      cp->num_iscsi_tasks;
+	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
+			BNX2X_ISCSI_R2TQE_SIZE;
+	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
+	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+	cp->num_cqs = req1->num_cqs;
+
+	if (!dev->max_iscsi_conn)
+		return 0;
+
+	/* init Tstorm RAM */
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
+		  req1->rq_num_wqes);
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  CNIC_PAGE_SIZE);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+
+	/* init Ustorm RAM */
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
+		  req1->rq_buffer_size);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  CNIC_PAGE_SIZE);
+	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
+		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
+		  req1->rq_num_wqes);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
+		  req1->cq_num_wqes);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
+		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+	/* init Xstorm RAM */
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  CNIC_PAGE_SIZE);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
+		  hq_bds);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
+		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+	/* init Cstorm RAM */
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  CNIC_PAGE_SIZE);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
+		  req1->cq_num_wqes);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
+		  hq_bds);
+
+	cnic_bnx2x_set_tcp_options(dev,
+			req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
+			req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
+
+	return 0;
+}
+
+static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 pfid = bp->pfid;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	if (!dev->max_iscsi_conn) {
+		kcqe.completion_status =
+			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
+		goto done;
+	}
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
+		req2->error_bit_map[1]);
+
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
+		req2->error_bit_map[1]);
+
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
+
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+
+done:
+	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+	return 0;
+}
+
+static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
+		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+		cnic_free_dma(dev, &iscsi->hq_info);
+		cnic_free_dma(dev, &iscsi->r2tq_info);
+		cnic_free_dma(dev, &iscsi->task_array_info);
+		cnic_free_id(&cp->cid_tbl, ctx->cid);
+	} else {
+		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
+	}
+
+	ctx->cid = 0;
+}
+
+static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+	u32 cid;
+	int ret, pages;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
+		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
+		if (cid == -1) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		ctx->cid = cid;
+		return 0;
+	}
+
+	cid = cnic_alloc_new_id(&cp->cid_tbl);
+	if (cid == -1) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ctx->cid = cid;
+	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
+
+	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_bnx2x_conn_resc(dev, l5_cid);
+	return ret;
+}
+
+static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
+				struct regpair *ctx_addr)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
+	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
+	unsigned long align_off = 0;
+	dma_addr_t ctx_map;
+	void *ctx;
+
+	if (cp->ctx_align) {
+		unsigned long mask = cp->ctx_align - 1;
+
+		if (cp->ctx_arr[blk].mapping & mask)
+			align_off = cp->ctx_align -
+				    (cp->ctx_arr[blk].mapping & mask);
+	}
+	ctx_map = cp->ctx_arr[blk].mapping + align_off +
+		(off * BNX2X_CONTEXT_MEM_SIZE);
+	ctx = cp->ctx_arr[blk].ctx + align_off +
+	      (off * BNX2X_CONTEXT_MEM_SIZE);
+	if (init)
+		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
+
+	ctx_addr->lo = ctx_map & 0xffffffff;
+	ctx_addr->hi = (u64) ctx_map >> 32;
+	return ctx;
+}
+
+static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct iscsi_kwqe_conn_offload1 *req1 =
+			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
+	struct iscsi_kwqe_conn_offload2 *req2 =
+			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
+	struct iscsi_kwqe_conn_offload3 *req3;
+	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
+	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+	u32 cid = ctx->cid;
+	u32 hw_cid = BNX2X_HW_CID(bp, cid);
+	struct iscsi_context *ictx;
+	struct regpair context_addr;
+	int i, j, n = 2, n_max;
+	u8 port = BP_PORT(bp);
+
+	ctx->ctx_flags = 0;
+	if (!req2->num_additional_wqes)
+		return -EINVAL;
+
+	n_max = req2->num_additional_wqes + 2;
+
+	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
+	if (ictx == NULL)
+		return -ENOMEM;
+
+	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+
+	ictx->xstorm_ag_context.hq_prod = 1;
+
+	ictx->xstorm_st_context.iscsi.first_burst_length =
+		ISCSI_DEF_FIRST_BURST_LEN;
+	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
+		ISCSI_DEF_MAX_RECV_SEG_LEN;
+	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
+		req1->sq_page_table_addr_lo;
+	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
+		req1->sq_page_table_addr_hi;
+	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
+	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
+	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
+		iscsi->hq_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
+		(u64) iscsi->hq_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
+		iscsi->hq_info.pgtbl[0];
+	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
+		iscsi->hq_info.pgtbl[1];
+	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
+		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
+		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
+		iscsi->r2tq_info.pgtbl[0];
+	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
+		iscsi->r2tq_info.pgtbl[1];
+	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
+		BNX2X_ISCSI_PBL_NOT_CACHED;
+	ictx->xstorm_st_context.iscsi.flags.flags |=
+		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
+	ictx->xstorm_st_context.iscsi.flags.flags |=
+		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
+		ETH_P_8021Q;
+	if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
+	    bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
+
+		port = 0;
+	}
+	ictx->xstorm_st_context.common.flags =
+		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
+	ictx->xstorm_st_context.common.flags =
+		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
+
+	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
+	/* TSTORM requires the base address of RQ DB & not PTE */
+	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
+		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
+	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
+		req2->rq_page_table_addr_hi;
+	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
+	ictx->tstorm_st_context.tcp.flags2 |=
+		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
+	ictx->tstorm_st_context.tcp.ooo_support_mode =
+		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
+
+	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
+
+	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
+		req2->rq_page_table_addr_lo;
+	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
+		req2->rq_page_table_addr_hi;
+	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
+	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
+	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
+		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
+		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
+	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
+		iscsi->r2tq_info.pgtbl[0];
+	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
+		iscsi->r2tq_info.pgtbl[1];
+	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
+		req1->cq_page_table_addr_lo;
+	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
+		req1->cq_page_table_addr_hi;
+	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
+	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
+	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
+	ictx->ustorm_st_context.task_pbe_cache_index =
+		BNX2X_ISCSI_PBL_NOT_CACHED;
+	ictx->ustorm_st_context.task_pdu_cache_index =
+		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
+
+	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
+		if (j == 3) {
+			if (n >= n_max)
+				break;
+			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+			j = 0;
+		}
+		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
+		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
+			req3->qp_first_pte[j].hi;
+		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
+			req3->qp_first_pte[j].lo;
+	}
+
+	ictx->ustorm_st_context.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->ustorm_st_context.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	ictx->ustorm_st_context.tce_phy_addr.lo =
+		iscsi->task_array_info.pgtbl[0];
+	ictx->ustorm_st_context.tce_phy_addr.hi =
+		iscsi->task_array_info.pgtbl[1];
+	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
+	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
+	ictx->ustorm_st_context.negotiated_rx_and_flags |=
+		ISCSI_DEF_MAX_BURST_LEN;
+	ictx->ustorm_st_context.negotiated_rx |=
+		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
+		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
+
+	ictx->cstorm_st_context.hq_pbl_base.lo =
+		iscsi->hq_info.pgtbl_map & 0xffffffff;
+	ictx->cstorm_st_context.hq_pbl_base.hi =
+		(u64) iscsi->hq_info.pgtbl_map >> 32;
+	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
+	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
+	ictx->cstorm_st_context.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->cstorm_st_context.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	/* CSTORM and USTORM initialization is different, CSTORM requires
+	 * CQ DB base & not PTE addr */
+	ictx->cstorm_st_context.cq_db_base.lo =
+		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
+	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
+	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
+	for (i = 0; i < cp->num_cqs; i++) {
+		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
+			ISCSI_INITIAL_SN;
+		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
+			ISCSI_INITIAL_SN;
+	}
+
+	ictx->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+				       ISCSI_CONNECTION_TYPE);
+	ictx->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+				       ISCSI_CONNECTION_TYPE);
+	return 0;
+
+}
+
+static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+				   u32 num, int *work)
+{
+	struct iscsi_kwqe_conn_offload1 *req1;
+	struct iscsi_kwqe_conn_offload2 *req2;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_context *ctx;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+	u32 l5_cid;
+	int ret = 0;
+
+	if (num < 2) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
+	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
+	if ((num - 2) < req2->num_additional_wqes) {
+		*work = num;
+		return -EINVAL;
+	}
+	*work = 2 + req2->num_additional_wqes;
+
+	l5_cid = req1->iscsi_conn_id;
+	if (l5_cid >= MAX_ISCSI_TBL_SZ)
+		return -EINVAL;
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
+	kcqe.iscsi_conn_id = l5_cid;
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+	ctx = &cp->ctx_tbl[l5_cid];
+	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
+		kcqe.completion_status =
+			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
+		goto done;
+	}
+
+	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
+		atomic_dec(&cp->iscsi_conn);
+		goto done;
+	}
+	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+	if (ret) {
+		atomic_dec(&cp->iscsi_conn);
+		ret = 0;
+		goto done;
+	}
+	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
+	if (ret < 0) {
+		cnic_free_bnx2x_conn_resc(dev, l5_cid);
+		atomic_dec(&cp->iscsi_conn);
+		goto done;
+	}
+
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
+
+done:
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+	return 0;
+}
+
+
+static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_update *req =
+		(struct iscsi_kwqe_conn_update *) kwqe;
+	void *data;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
+	int ret;
+
+	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
+		return -EINVAL;
+
+	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!data)
+		return -ENOMEM;
+
+	memcpy(data, kwqe, sizeof(struct kwqe));
+
+	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 hw_cid;
+
+	init_waitqueue_head(&ctx->waitq);
+	ctx->wait_cond = 0;
+	memset(&l5_data, 0, sizeof(l5_data));
+	hw_cid = BNX2X_HW_CID(bp, ctx->cid);
+
+	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
+
+	if (ret == 0) {
+		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
+		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
+			return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_destroy *req =
+		(struct iscsi_kwqe_conn_destroy *) kwqe;
+	u32 l5_cid = req->reserved0;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	int ret = 0;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+		goto skip_cfc_delete;
+
+	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
+
+		if (delta > (2 * HZ))
+			delta = 0;
+
+		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
+		goto destroy_reply;
+	}
+
+	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
+
+skip_cfc_delete:
+	cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+	if (!ret) {
+		atomic_dec(&cp->iscsi_conn);
+		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+	}
+
+destroy_reply:
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
+	kcqe.iscsi_conn_id = l5_cid;
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+	kcqe.iscsi_conn_context_id = req->context_id;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+	return 0;
+}
+
+static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
+				      struct l4_kwq_connect_req1 *kwqe1,
+				      struct l4_kwq_connect_req3 *kwqe3,
+				      struct l5cm_active_conn_buffer *conn_buf)
+{
+	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
+	struct l5cm_xstorm_conn_buffer *xstorm_buf =
+		&conn_buf->xstorm_conn_buffer;
+	struct l5cm_tstorm_conn_buffer *tstorm_buf =
+		&conn_buf->tstorm_conn_buffer;
+	struct regpair context_addr;
+	u32 cid = BNX2X_SW_CID(kwqe1->cid);
+	struct in6_addr src_ip, dst_ip;
+	int i;
+	u32 *addrp;
+
+	addrp = (u32 *) &conn_addr->local_ip_addr;
+	for (i = 0; i < 4; i++, addrp++)
+		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+	addrp = (u32 *) &conn_addr->remote_ip_addr;
+	for (i = 0; i < 4; i++, addrp++)
+		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
+
+	xstorm_buf->context_addr.hi = context_addr.hi;
+	xstorm_buf->context_addr.lo = context_addr.lo;
+	xstorm_buf->mss = 0xffff;
+	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
+	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
+		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
+	xstorm_buf->pseudo_header_checksum =
+		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
+
+	if (kwqe3->ka_timeout) {
+		tstorm_buf->ka_enable = 1;
+		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
+		tstorm_buf->ka_interval = kwqe3->ka_interval;
+		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
+	}
+	tstorm_buf->max_rt_time = 0xffffffff;
+}
+
+static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 pfid = bp->pfid;
+	u8 *mac = dev->mac_addr;
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
+
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 mac[4]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 mac[2]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 mac[0]);
+}
+
+static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
+			      u32 num, int *work)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct l4_kwq_connect_req1 *kwqe1 =
+		(struct l4_kwq_connect_req1 *) wqes[0];
+	struct l4_kwq_connect_req3 *kwqe3;
+	struct l5cm_active_conn_buffer *conn_buf;
+	struct l5cm_conn_addr_params *conn_addr;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid = kwqe1->pg_cid;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	int ret;
+
+	if (num < 2) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
+		*work = 3;
+	else
+		*work = 2;
+
+	if (num < *work) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "conn_buf size too big\n");
+		return -ENOMEM;
+	}
+	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!conn_buf)
+		return -ENOMEM;
+
+	memset(conn_buf, 0, sizeof(*conn_buf));
+
+	conn_addr = &conn_buf->conn_addr_buf;
+	conn_addr->remote_addr_0 = csk->ha[0];
+	conn_addr->remote_addr_1 = csk->ha[1];
+	conn_addr->remote_addr_2 = csk->ha[2];
+	conn_addr->remote_addr_3 = csk->ha[3];
+	conn_addr->remote_addr_4 = csk->ha[4];
+	conn_addr->remote_addr_5 = csk->ha[5];
+
+	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
+		struct l4_kwq_connect_req2 *kwqe2 =
+			(struct l4_kwq_connect_req2 *) wqes[1];
+
+		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
+		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
+		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
+
+		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
+		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
+		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
+		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
+	}
+	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
+
+	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
+	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
+	conn_addr->local_tcp_port = kwqe1->src_port;
+	conn_addr->remote_tcp_port = kwqe1->dst_port;
+
+	conn_addr->pmtu = kwqe3->pmtu;
+	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
+
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
+
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
+			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	if (!ret)
+		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+	return ret;
+}
+
+static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
+	union l5cm_specific_data l5_data;
+	int ret;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
+			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
+	union l5cm_specific_data l5_data;
+	int ret;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
+			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
+	struct l4_kcq kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.pg_host_opaque = req->host_opaque;
+	kcqe.pg_cid = req->host_opaque;
+	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+	return 0;
+}
+
+static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
+	struct l4_kcq kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.pg_host_opaque = req->pg_host_opaque;
+	kcqe.pg_cid = req->pg_cid;
+	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+	return 0;
+}
+
+static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_stat *req;
+	struct fcoe_stat_ramrod_params *fcoe_stat;
+	union l5cm_specific_data l5_data;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int ret;
+	u32 cid;
+
+	req = (struct fcoe_kwqe_stat *) kwqe;
+	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
+
+	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+	if (!fcoe_stat)
+		return -ENOMEM;
+
+	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
+	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
+
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
+				 u32 num, int *work)
+{
+	int ret;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 cid;
+	struct fcoe_init_ramrod_params *fcoe_init;
+	struct fcoe_kwqe_init1 *req1;
+	struct fcoe_kwqe_init2 *req2;
+	struct fcoe_kwqe_init3 *req3;
+	union l5cm_specific_data l5_data;
+
+	if (num < 3) {
+		*work = num;
+		return -EINVAL;
+	}
+	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
+	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
+	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
+	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
+		*work = 1;
+		return -EINVAL;
+	}
+	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
+		*work = 2;
+		return -EINVAL;
+	}
+
+	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_init size too big\n");
+		return -ENOMEM;
+	}
+	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+	if (!fcoe_init)
+		return -ENOMEM;
+
+	memset(fcoe_init, 0, sizeof(*fcoe_init));
+	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
+	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
+	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
+	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
+	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
+	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
+
+	fcoe_init->sb_num = cp->status_blk_num;
+	fcoe_init->eq_prod = MAX_KCQ_IDX;
+	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
+	cp->kcq2.sw_prod_idx = 0;
+
+	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	*work = 3;
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+				 u32 num, int *work)
+{
+	int ret = 0;
+	u32 cid = -1, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct fcoe_kwqe_conn_offload1 *req1;
+	struct fcoe_kwqe_conn_offload2 *req2;
+	struct fcoe_kwqe_conn_offload3 *req3;
+	struct fcoe_kwqe_conn_offload4 *req4;
+	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
+	struct cnic_context *ctx;
+	struct fcoe_context *fctx;
+	struct regpair ctx_addr;
+	union l5cm_specific_data l5_data;
+	struct fcoe_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	if (num < 4) {
+		*work = num;
+		return -EINVAL;
+	}
+	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
+	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
+	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
+	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
+
+	*work = 4;
+
+	l5_cid = req1->fcoe_conn_id;
+	if (l5_cid >= dev->max_fcoe_conn)
+		goto err_reply;
+
+	l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+	ctx = &cp->ctx_tbl[l5_cid];
+	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+		goto err_reply;
+
+	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+	if (ret) {
+		ret = 0;
+		goto err_reply;
+	}
+	cid = ctx->cid;
+
+	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
+	if (fctx) {
+		u32 hw_cid = BNX2X_HW_CID(bp, cid);
+		u32 val;
+
+		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+					     FCOE_CONNECTION_TYPE);
+		fctx->xstorm_ag_context.cdu_reserved = val;
+		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+					     FCOE_CONNECTION_TYPE);
+		fctx->ustorm_ag_context.cdu_usage = val;
+	}
+	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_offload size too big\n");
+		goto err_reply;
+	}
+	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!fcoe_offload)
+		goto err_reply;
+
+	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
+	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
+	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
+	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
+	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
+
+	cid = BNX2X_HW_CID(bp, cid);
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	if (!ret)
+		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+	return ret;
+
+err_reply:
+	if (cid != -1)
+		cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
+	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
+	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_conn_enable_disable *req;
+	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 cid, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+	cid = req->context_id;
+	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
+
+	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_enable size too big\n");
+		return -ENOMEM;
+	}
+	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!fcoe_enable)
+		return -ENOMEM;
+
+	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
+	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_conn_enable_disable *req;
+	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 cid, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+	cid = req->context_id;
+	l5_cid = req->conn_id;
+	if (l5_cid >= dev->max_fcoe_conn)
+		return -EINVAL;
+
+	l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_disable size too big\n");
+		return -ENOMEM;
+	}
+	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!fcoe_disable)
+		return -ENOMEM;
+
+	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
+	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_conn_destroy *req;
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 cid, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx;
+	struct fcoe_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
+	cid = req->context_id;
+	l5_cid = req->conn_id;
+	if (l5_cid >= dev->max_fcoe_conn)
+		return -EINVAL;
+
+	l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+	ctx = &cp->ctx_tbl[l5_cid];
+
+	init_waitqueue_head(&ctx->waitq);
+	ctx->wait_cond = 0;
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	if (ret == 0) {
+		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
+		if (ctx->wait_cond)
+			kcqe.completion_status = 0;
+	}
+
+	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
+
+	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
+	kcqe.fcoe_conn_id = req->conn_id;
+	kcqe.fcoe_conn_context_id = cid;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+	return ret;
+}
+
+static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 i;
+
+	for (i = start_cid; i < cp->max_cid_space; i++) {
+		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int j;
+
+		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+			msleep(10);
+
+		for (j = 0; j < 5; j++) {
+			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+				break;
+			msleep(20);
+		}
+
+		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+			netdev_warn(dev->netdev, "CID %x not deleted\n",
+				   ctx->cid);
+	}
+}
+
+static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_destroy *req;
+	union l5cm_specific_data l5_data;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int ret;
+	u32 cid;
+
+	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
+
+	req = (struct fcoe_kwqe_destroy *) kwqe;
+	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kcqe kcqe;
+	struct kcqe *cqes[1];
+	u32 cid;
+	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
+	u32 kcqe_op;
+	int ulp_type;
+
+	cid = kwqe->kwqe_info0;
+	memset(&kcqe, 0, sizeof(kcqe));
+
+	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
+		u32 l5_cid = 0;
+
+		ulp_type = CNIC_ULP_FCOE;
+		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
+			struct fcoe_kwqe_conn_enable_disable *req;
+
+			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
+			cid = req->context_id;
+			l5_cid = req->conn_id;
+		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
+			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
+		} else {
+			return;
+		}
+		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
+		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
+		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
+		kcqe.kcqe_info2 = cid;
+		kcqe.kcqe_info0 = l5_cid;
+
+	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
+		ulp_type = CNIC_ULP_ISCSI;
+		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
+			cid = kwqe->kwqe_info1;
+
+		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
+		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
+		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
+		kcqe.kcqe_info2 = cid;
+		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
+
+	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
+		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
+
+		ulp_type = CNIC_ULP_L4;
+		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
+			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
+		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
+			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
+			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		else
+			return;
+
+		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
+				    KCQE_FLAGS_LAYER_MASK_L4;
+		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
+		l4kcqe->cid = cid;
+		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
+	} else {
+		return;
+	}
+
+	cqes[0] = &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
+}
+
+static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
+					 struct kwqe *wqes[], u32 num_wqes)
+{
+	int i, work, ret;
+	u32 opcode;
+	struct kwqe *kwqe;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	for (i = 0; i < num_wqes; ) {
+		kwqe = wqes[i];
+		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+		work = 1;
+
+		switch (opcode) {
+		case ISCSI_KWQE_OPCODE_INIT1:
+			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_INIT2:
+			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
+			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
+						     num_wqes - i, &work);
+			break;
+		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
+			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
+			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_CONNECT1:
+			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
+						 &work);
+			break;
+		case L4_KWQE_OPCODE_VALUE_CLOSE:
+			ret = cnic_bnx2x_close(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_RESET:
+			ret = cnic_bnx2x_reset(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
+			ret = cnic_bnx2x_offload_pg(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
+			ret = cnic_bnx2x_update_pg(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
+			ret = 0;
+			break;
+		default:
+			ret = 0;
+			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+				   opcode);
+			break;
+		}
+		if (ret < 0) {
+			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+				   opcode);
+
+			/* Possibly bnx2x parity error, send completion
+			 * to ulp drivers with error code to speed up
+			 * cleanup and reset recovery.
+			 */
+			if (ret == -EIO || ret == -EAGAIN)
+				cnic_bnx2x_kwqe_err(dev, kwqe);
+		}
+		i += work;
+	}
+	return 0;
+}
+
+static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
+					struct kwqe *wqes[], u32 num_wqes)
+{
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int i, work, ret;
+	u32 opcode;
+	struct kwqe *kwqe;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+		return -EINVAL;
+
+	for (i = 0; i < num_wqes; ) {
+		kwqe = wqes[i];
+		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+		work = 1;
+
+		switch (opcode) {
+		case FCOE_KWQE_OPCODE_INIT1:
+			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
+						    num_wqes - i, &work);
+			break;
+		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
+			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
+						    num_wqes - i, &work);
+			break;
+		case FCOE_KWQE_OPCODE_ENABLE_CONN:
+			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_DISABLE_CONN:
+			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_DESTROY_CONN:
+			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_DESTROY:
+			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_STAT:
+			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
+			break;
+		default:
+			ret = 0;
+			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+				   opcode);
+			break;
+		}
+		if (ret < 0) {
+			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+				   opcode);
+
+			/* Possibly bnx2x parity error, send completion
+			 * to ulp drivers with error code to speed up
+			 * cleanup and reset recovery.
+			 */
+			if (ret == -EIO || ret == -EAGAIN)
+				cnic_bnx2x_kwqe_err(dev, kwqe);
+		}
+		i += work;
+	}
+	return 0;
+}
+
+static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				   u32 num_wqes)
+{
+	int ret = -EINVAL;
+	u32 layer_code;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2x is down */
+
+	if (!num_wqes)
+		return 0;
+
+	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
+	switch (layer_code) {
+	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
+	case KWQE_FLAGS_LAYER_MASK_L4:
+	case KWQE_FLAGS_LAYER_MASK_L2:
+		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
+		break;
+
+	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
+		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
+		break;
+	}
+	return ret;
+}
+
+static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
+{
+	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
+		return KCQE_FLAGS_LAYER_MASK_L4;
+
+	return opflag & KCQE_FLAGS_LAYER_MASK;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, j, comp = 0;
+
+	i = 0;
+	j = 1;
+	while (num_cqes) {
+		struct cnic_ulp_ops *ulp_ops;
+		int ulp_type;
+		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
+
+		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+			comp++;
+
+		while (j < num_cqes) {
+			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
+				break;
+
+			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+				comp++;
+			j++;
+		}
+
+		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+			ulp_type = CNIC_ULP_RDMA;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+			ulp_type = CNIC_ULP_ISCSI;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
+			ulp_type = CNIC_ULP_FCOE;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+			ulp_type = CNIC_ULP_L4;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+			goto end;
+		else {
+			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
+				   kcqe_op_flag);
+			goto end;
+		}
+
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+		if (likely(ulp_ops)) {
+			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+						  cp->completed_kcq + i, j);
+		}
+		rcu_read_unlock();
+end:
+		num_cqes -= j;
+		i += j;
+		j = 1;
+	}
+	if (unlikely(comp))
+		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 i, ri, hw_prod, last;
+	struct kcqe *kcqe;
+	int kcqe_cnt = 0, last_cnt = 0;
+
+	i = ri = last = info->sw_prod_idx;
+	ri &= MAX_KCQ_IDX;
+	hw_prod = *info->hw_prod_idx_ptr;
+	hw_prod = info->hw_idx(hw_prod);
+
+	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+		cp->completed_kcq[kcqe_cnt++] = kcqe;
+		i = info->next_idx(i);
+		ri = i & MAX_KCQ_IDX;
+		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+			last_cnt = kcqe_cnt;
+			last = i;
+		}
+	}
+
+	info->sw_prod_idx = last;
+	return last_cnt;
+}
+
+static int cnic_l2_completion(struct cnic_local *cp)
+{
+	u16 hw_cons, sw_cons;
+	struct cnic_uio_dev *udev = cp->udev;
+	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
+					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
+	u32 cmd;
+	int comp = 0;
+
+	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
+		return 0;
+
+	hw_cons = *cp->rx_cons_ptr;
+	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
+		hw_cons++;
+
+	sw_cons = cp->rx_cons;
+	while (sw_cons != hw_cons) {
+		u8 cqe_fp_flags;
+
+		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
+		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
+			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
+			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
+			    cmd == RAMROD_CMD_ID_ETH_HALT)
+				comp++;
+		}
+		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
+	}
+	return comp;
+}
+
+static void cnic_chk_pkt_rings(struct cnic_local *cp)
+{
+	u16 rx_cons, tx_cons;
+	int comp = 0;
+
+	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+		return;
+
+	rx_cons = *cp->rx_cons_ptr;
+	tx_cons = *cp->tx_cons_ptr;
+	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+			comp = cnic_l2_completion(cp);
+
+		cp->tx_cons = tx_cons;
+		cp->rx_cons = rx_cons;
+
+		if (cp->udev)
+			uio_event_notify(&cp->udev->cnic_uinfo);
+	}
+	if (comp)
+		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+}
+
+static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
+	int kcqe_cnt;
+
+	/* status block index must be read before reading other fields */
+	rmb();
+	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		status_idx = (u16) *cp->kcq1.status_idx_ptr;
+		/* status block index must be read first */
+		rmb();
+		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+	}
+
+	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
+
+	cnic_chk_pkt_rings(cp);
+
+	return status_idx;
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+		struct status_block *sblk = status_blk;
+
+		return sblk->status_idx;
+	}
+
+	return cnic_service_bnx2_queues(dev);
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->last_status_idx = cnic_service_bnx2_queues(dev);
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_doirq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
+
+		prefetch(cp->status_blk.gen);
+		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+		tasklet_schedule(&cp->cnic_irq_task);
+	}
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+	struct cnic_dev *dev = dev_instance;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (cp->ack_int)
+		cp->ack_int(dev);
+
+	cnic_doirq(dev);
+
+	return IRQ_HANDLED;
+}
+
+static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
+				      u16 index, u8 op, u8 update)
+{
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
+		       COMMAND_REG_INT_ACK);
+	struct igu_ack_register igu_ack;
+
+	igu_ack.status_block_index = index;
+	igu_ack.sb_id_and_flags =
+			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
+}
+
+static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
+			    u16 index, u8 op, u8 update)
+{
+	struct igu_regular cmd_data;
+	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
+
+	cmd_data.sb_id_and_flags =
+		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
+		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+		(update << IGU_REGULAR_BUPDATE_SHIFT) |
+		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
+
+
+	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
+}
+
+static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
+			   IGU_INT_DISABLE, 0);
+}
+
+static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
+			IGU_INT_DISABLE, 0);
+}
+
+static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
+			   IGU_INT_ENABLE, 1);
+}
+
+static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
+			IGU_INT_ENABLE, 1);
+}
+
+static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
+{
+	u32 last_status = *info->status_idx_ptr;
+	int kcqe_cnt;
+
+	/* status block index must be read before reading the KCQ */
+	rmb();
+	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that sblk fields can change. */
+		barrier();
+
+		last_status = *info->status_idx_ptr;
+		/* status block index must be read before reading the KCQ */
+		rmb();
+	}
+	return last_status;
+}
+
+static void cnic_service_bnx2x_bh(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 status_idx, new_status_idx;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return;
+
+	while (1) {
+		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+
+		CNIC_WR16(dev, cp->kcq1.io_addr,
+			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+
+		if (!CNIC_SUPPORTS_FCOE(bp)) {
+			cp->arm_int(dev, status_idx);
+			break;
+		}
+
+		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+		if (new_status_idx != status_idx)
+			continue;
+
+		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
+			  MAX_KCQ_IDX);
+
+		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
+				status_idx, IGU_INT_ENABLE, 1);
+
+		break;
+	}
+}
+
+static int cnic_service_bnx2x(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		cnic_doirq(dev);
+
+	cnic_chk_pkt_rings(cp);
+
+	return 0;
+}
+
+static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
+{
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (if_type == CNIC_ULP_ISCSI)
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
+	mutex_lock(&cnic_lock);
+	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+					    lockdep_is_held(&cnic_lock));
+	if (!ulp_ops) {
+		mutex_unlock(&cnic_lock);
+		return;
+	}
+	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+	mutex_unlock(&cnic_lock);
+
+	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+
+	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
+		cnic_ulp_stop_one(cp, if_type);
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+						    lockdep_is_held(&cnic_lock));
+		if (!ulp_ops || !ulp_ops->cnic_start) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+		mutex_unlock(&cnic_lock);
+
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+
+		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+	}
+}
+
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+	int rc;
+
+	mutex_lock(&cnic_lock);
+	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
+					    lockdep_is_held(&cnic_lock));
+	if (ulp_ops && ulp_ops->cnic_get_stats)
+		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+	else
+		rc = -ENODEV;
+	mutex_unlock(&cnic_lock);
+	return rc;
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+	struct cnic_dev *dev = data;
+	int ulp_type = CNIC_ULP_ISCSI;
+
+	switch (info->cmd) {
+	case CNIC_CTL_STOP_CMD:
+		cnic_hold(dev);
+
+		cnic_ulp_stop(dev);
+		cnic_stop_hw(dev);
+
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_START_CMD:
+		cnic_hold(dev);
+
+		if (!cnic_start_hw(dev))
+			cnic_ulp_start(dev);
+
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_STOP_ISCSI_CMD: {
+		struct cnic_local *cp = dev->cnic_priv;
+		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
+		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
+		break;
+	}
+	case CNIC_CTL_COMPLETION_CMD: {
+		struct cnic_ctl_completion *comp = &info->data.comp;
+		u32 cid = BNX2X_SW_CID(comp->cid);
+		u32 l5_cid;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+			break;
+
+		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
+			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+			if (unlikely(comp->error)) {
+				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
+				netdev_err(dev->netdev,
+					   "CID %x CFC delete comp error %x\n",
+					   cid, comp->error);
+			}
+
+			ctx->wait_cond = 1;
+			wake_up(&ctx->waitq);
+		}
+		break;
+	}
+	case CNIC_CTL_FCOE_STATS_GET_CMD:
+		ulp_type = CNIC_ULP_FCOE;
+		/* fall through */
+	case CNIC_CTL_ISCSI_STATS_GET_CMD:
+		cnic_hold(dev);
+		cnic_copy_ulp_stats(dev, ulp_type);
+		cnic_put(dev);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = cnic_ulp_tbl_prot(i);
+		if (!ulp_ops || !ulp_ops->cnic_init) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+		ulp_get(ulp_ops);
+		mutex_unlock(&cnic_lock);
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_init(dev);
+
+		ulp_put(ulp_ops);
+	}
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = cnic_ulp_tbl_prot(i);
+		if (!ulp_ops || !ulp_ops->cnic_exit) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+		ulp_get(ulp_ops);
+		mutex_unlock(&cnic_lock);
+
+		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_exit(dev);
+
+		ulp_put(ulp_ops);
+	}
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_offload_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+	l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->sa0 = dev->mac_addr[0];
+	l4kwqe->sa1 = dev->mac_addr[1];
+	l4kwqe->sa2 = dev->mac_addr[2];
+	l4kwqe->sa3 = dev->mac_addr[3];
+	l4kwqe->sa4 = dev->mac_addr[4];
+	l4kwqe->sa5 = dev->mac_addr[5];
+
+	l4kwqe->etype = ETH_P_IP;
+	l4kwqe->ipid_start = DEF_IPID_START;
+	l4kwqe->host_opaque = csk->l5_cid;
+
+	if (csk->vlan_id) {
+		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+		l4kwqe->vlan_tag = csk->vlan_id;
+		l4kwqe->l2hdr_nbytes += 4;
+	}
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_update_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+	l4kwqe->pg_cid = csk->pg_cid;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->pg_host_opaque = csk->l5_cid;
+	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_upload *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->pg_cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_connect_req1 *l4kwqe1;
+	struct l4_kwq_connect_req2 *l4kwqe2;
+	struct l4_kwq_connect_req3 *l4kwqe3;
+	struct kwqe *wqes[3];
+	u8 tcp_flags = 0;
+	int num_wqes = 2;
+
+	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+	l4kwqe3->flags =
+		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+	l4kwqe3->ka_timeout = csk->ka_timeout;
+	l4kwqe3->ka_interval = csk->ka_interval;
+	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+	l4kwqe3->tos = csk->tos;
+	l4kwqe3->ttl = csk->ttl;
+	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+	l4kwqe3->pmtu = csk->mtu;
+	l4kwqe3->rcv_buf = csk->rcv_buf;
+	l4kwqe3->snd_buf = csk->snd_buf;
+	l4kwqe3->seed = csk->seed;
+
+	wqes[0] = (struct kwqe *) l4kwqe1;
+	if (test_bit(SK_F_IPV6, &csk->flags)) {
+		wqes[1] = (struct kwqe *) l4kwqe2;
+		wqes[2] = (struct kwqe *) l4kwqe3;
+		num_wqes = 3;
+
+		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+		l4kwqe2->flags =
+			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+			       sizeof(struct tcphdr);
+	} else {
+		wqes[1] = (struct kwqe *) l4kwqe3;
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+			       sizeof(struct tcphdr);
+	}
+
+	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+	l4kwqe1->flags =
+		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+	l4kwqe1->cid = csk->cid;
+	l4kwqe1->pg_cid = csk->pg_cid;
+	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+	if (csk->tcp_flags & SK_TCP_NAGLE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+	if (csk->tcp_flags & SK_TCP_SACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+	l4kwqe1->tcp_flags = tcp_flags;
+
+	return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_close_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_reset_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+			  u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_sock *csk1;
+
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return -EINVAL;
+
+	if (cp->ctx_tbl) {
+		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+			return -EAGAIN;
+	}
+
+	csk1 = &cp->csk_tbl[l5_cid];
+	if (atomic_read(&csk1->ref_count))
+		return -EAGAIN;
+
+	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+		return -EBUSY;
+
+	csk1->dev = dev;
+	csk1->cid = cid;
+	csk1->l5_cid = l5_cid;
+	csk1->ulp_type = ulp_type;
+	csk1->context = context;
+
+	csk1->ka_timeout = DEF_KA_TIMEOUT;
+	csk1->ka_interval = DEF_KA_INTERVAL;
+	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+	csk1->tos = DEF_TOS;
+	csk1->ttl = DEF_TTL;
+	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+	csk1->rcv_buf = DEF_RCV_BUF;
+	csk1->snd_buf = DEF_SND_BUF;
+	csk1->seed = DEF_SEED;
+	csk1->tcp_flags = 0;
+
+	*csk = csk1;
+	return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+	if (csk->src_port) {
+		struct cnic_dev *dev = csk->dev;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
+		csk->src_port = 0;
+	}
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+		cnic_cm_upload_pg(csk);
+		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	}
+	cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	csk_hold(csk);
+	clear_bit(SK_F_INUSE, &csk->flags);
+	smp_mb__after_atomic();
+	while (atomic_read(&csk->ref_count) != 1)
+		msleep(1);
+	cnic_cm_cleanup(csk);
+
+	csk->flags = 0;
+	csk_put(csk);
+	return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+				struct net_device **vlan_dev)
+{
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		*vlan_dev = vlan_dev_real_dev(dev);
+		return vlan_dev_vlan_id(dev);
+	}
+	*vlan_dev = dev;
+	return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+			     struct dst_entry **dst)
+{
+#if defined(CONFIG_INET)
+	struct rtable *rt;
+
+	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+	if (!IS_ERR(rt)) {
+		*dst = &rt->dst;
+		return 0;
+	}
+	return PTR_ERR(rt);
+#else
+	return -ENETUNREACH;
+#endif
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+			     struct dst_entry **dst)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+	struct flowi6 fl6;
+
+	memset(&fl6, 0, sizeof(fl6));
+	fl6.daddr = dst_addr->sin6_addr;
+	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+		fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+	*dst = ip6_route_output(&init_net, NULL, &fl6);
+	if ((*dst)->error) {
+		dst_release(*dst);
+		*dst = NULL;
+		return -ENETUNREACH;
+	} else
+		return 0;
+#endif
+
+	return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+					   int ulp_type)
+{
+	struct cnic_dev *dev = NULL;
+	struct dst_entry *dst;
+	struct net_device *netdev = NULL;
+	int err = -ENETUNREACH;
+
+	if (dst_addr->sin_family == AF_INET)
+		err = cnic_get_v4_route(dst_addr, &dst);
+	else if (dst_addr->sin_family == AF_INET6) {
+		struct sockaddr_in6 *dst_addr6 =
+			(struct sockaddr_in6 *) dst_addr;
+
+		err = cnic_get_v6_route(dst_addr6, &dst);
+	} else
+		return NULL;
+
+	if (err)
+		return NULL;
+
+	if (!dst->dev)
+		goto done;
+
+	cnic_get_vlan(dst->dev, &netdev);
+
+	dev = cnic_from_netdev(netdev);
+
+done:
+	dst_release(dst);
+	if (dev)
+		cnic_put(dev);
+	return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	int is_v6, rc = 0;
+	struct dst_entry *dst = NULL;
+	struct net_device *realdev;
+	__be16 local_port;
+	u32 port_id;
+
+	if (saddr->local.v6.sin6_family == AF_INET6 &&
+	    saddr->remote.v6.sin6_family == AF_INET6)
+		is_v6 = 1;
+	else if (saddr->local.v4.sin_family == AF_INET &&
+		 saddr->remote.v4.sin_family == AF_INET)
+		is_v6 = 0;
+	else
+		return -EINVAL;
+
+	clear_bit(SK_F_IPV6, &csk->flags);
+
+	if (is_v6) {
+		set_bit(SK_F_IPV6, &csk->flags);
+		cnic_get_v6_route(&saddr->remote.v6, &dst);
+
+		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+		       sizeof(struct in6_addr));
+		csk->dst_port = saddr->remote.v6.sin6_port;
+		local_port = saddr->local.v6.sin6_port;
+
+	} else {
+		cnic_get_v4_route(&saddr->remote.v4, &dst);
+
+		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+		csk->dst_port = saddr->remote.v4.sin_port;
+		local_port = saddr->local.v4.sin_port;
+	}
+
+	csk->vlan_id = 0;
+	csk->mtu = dev->netdev->mtu;
+	if (dst && dst->dev) {
+		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
+		if (realdev == dev->netdev) {
+			csk->vlan_id = vlan;
+			csk->mtu = dst_mtu(dst);
+		}
+	}
+
+	port_id = be16_to_cpu(local_port);
+	if (port_id >= CNIC_LOCAL_PORT_MIN &&
+	    port_id < CNIC_LOCAL_PORT_MAX) {
+		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
+			port_id = 0;
+	} else
+		port_id = 0;
+
+	if (!port_id) {
+		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
+		if (port_id == -1) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		local_port = cpu_to_be16(port_id);
+	}
+	csk->src_port = local_port;
+
+err_out:
+	dst_release(dst);
+	return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+	csk->state = 0;
+	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+	clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	int err = 0;
+
+	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+		return -EOPNOTSUPP;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+		return -EINVAL;
+
+	cnic_init_csk_state(csk);
+
+	err = cnic_get_route(csk, saddr);
+	if (err)
+		goto err_out;
+
+	err = cnic_resolve_addr(csk, saddr);
+	if (!err)
+		return 0;
+
+err_out:
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_abort_prep(csk))
+		return cnic_cm_abort_req(csk);
+
+	/* Getting here means that we haven't started connect, or
+	 * connect was not successful, or it has been reset by the target.
+	 */
+
+	cp->close_conn(csk, opcode);
+	if (csk->state != opcode) {
+		/* Wait for remote reset sequence to complete */
+		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			msleep(1);
+
+		return -EALREADY;
+	}
+
+	return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_close_prep(csk)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		return cnic_cm_close_req(csk);
+	} else {
+		/* Wait for remote reset sequence to complete */
+		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			msleep(1);
+
+		return -EALREADY;
+	}
+	return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+			   u8 opcode)
+{
+	struct cnic_ulp_ops *ulp_ops;
+	int ulp_type = csk->ulp_type;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (ulp_ops) {
+		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+			ulp_ops->cm_connect_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			ulp_ops->cm_close_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+			ulp_ops->cm_remote_abort(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+			ulp_ops->cm_abort_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+			ulp_ops->cm_remote_close(csk);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+	if (cnic_offld_prep(csk)) {
+		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			cnic_cm_update_pg(csk);
+		else
+			cnic_cm_offload_pg(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 l5_cid = kcqe->pg_host_opaque;
+	u8 opcode = kcqe->op_code;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+	csk_hold(csk);
+	if (!cnic_in_use(csk))
+		goto done;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		goto done;
+	}
+	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
+	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk,
+			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+		goto done;
+	}
+
+	csk->pg_cid = kcqe->pg_cid;
+	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	cnic_cm_conn_req(csk);
+
+done:
+	csk_put(csk);
+}
+
+static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
+	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+	ctx->timestamp = jiffies;
+	ctx->wait_cond = 1;
+	wake_up(&ctx->waitq);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+	u8 opcode = l4kcqe->op_code;
+	u32 l5_cid;
+	struct cnic_sock *csk;
+
+	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
+		cnic_process_fcoe_term_conn(dev, kcqe);
+		return;
+	}
+	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		cnic_cm_process_offld_pg(dev, l4kcqe);
+		return;
+	}
+
+	l5_cid = l4kcqe->conn_id;
+	if (opcode & 0x80)
+		l5_cid = l4kcqe->cid;
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return;
+
+	csk = &cp->csk_tbl[l5_cid];
+	csk_hold(csk);
+
+	if (!cnic_in_use(csk)) {
+		csk_put(csk);
+		return;
+	}
+
+	switch (opcode) {
+	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
+		if (l4kcqe->status != 0) {
+			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+			cnic_cm_upcall(cp, csk,
+				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+		}
+		break;
+	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+		if (l4kcqe->status == 0)
+			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+		else if (l4kcqe->status ==
+			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
+			set_bit(SK_F_HW_ERR, &csk->flags);
+
+		smp_mb__before_atomic();
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+
+	case L5CM_RAMROD_CMD_ID_CLOSE: {
+		struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
+
+		if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
+			netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+				    l4kcqe->status, l5kcqe->completion_status);
+			opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+			/* Fall through */
+		} else {
+			break;
+		}
+	}
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
+			set_bit(SK_F_HW_ERR, &csk->flags);
+
+		cp->close_conn(csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+		/* after we already sent CLOSE_REQ */
+		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
+		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
+		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
+		else
+			cnic_cm_upcall(cp, csk, opcode);
+		break;
+	}
+	csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+	struct cnic_dev *dev = data;
+	int i;
+
+	for (i = 0; i < num; i++)
+		cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+	.indicate_kcqes		= cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	kfree(cp->csk_tbl);
+	cp->csk_tbl = NULL;
+	cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 port_id;
+
+	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+			      GFP_KERNEL);
+	if (!cp->csk_tbl)
+		return -ENOMEM;
+
+	port_id = prandom_u32();
+	port_id %= CNIC_LOCAL_PORT_RANGE;
+	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+			     CNIC_LOCAL_PORT_MIN, port_id)) {
+		cnic_cm_free_mem(dev);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		/* Unsolicited RESET_COMP or RESET_RECEIVED */
+		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+		csk->state = opcode;
+	}
+
+	/* 1. If event opcode matches the expected event in csk->state
+	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
+	 *    event
+	 * 3. If the expected event is 0, meaning the connection was never
+	 *    never established, we accept the opcode from cm_abort.
+	 */
+	if (opcode == csk->state || csk->state == 0 ||
+	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
+	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
+			if (csk->state == 0)
+				csk->state = opcode;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
+		cnic_cm_upcall(cp, csk, opcode);
+		return;
+	}
+
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	cnic_close_conn(csk);
+	csk->state = opcode;
+	cnic_cm_upcall(cp, csk, opcode);
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+	u32 seed;
+
+	seed = prandom_u32();
+	cnic_ctx_wr(dev, 45, 0, seed);
+	return 0;
+}
+
+static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
+	union l5cm_specific_data l5_data;
+	u32 cmd = 0;
+	int close_complete = 0;
+
+	switch (opcode) {
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+		if (cnic_ready_to_close(csk, opcode)) {
+			if (test_bit(SK_F_HW_ERR, &csk->flags))
+				close_complete = 1;
+			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+			else
+				close_complete = 1;
+		}
+		break;
+	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+		break;
+	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+		close_complete = 1;
+		break;
+	}
+	if (cmd) {
+		memset(&l5_data, 0, sizeof(l5_data));
+
+		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
+				    &l5_data);
+	} else if (close_complete) {
+		ctx->timestamp = jiffies;
+		cnic_close_conn(csk);
+		cnic_cm_upcall(cp, csk, csk->state);
+	}
+}
+
+static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!cp->ctx_tbl)
+		return;
+
+	if (!netif_running(dev->netdev))
+		return;
+
+	cnic_bnx2x_delete_wait(dev, 0);
+
+	cancel_delayed_work(&cp->delete_task);
+	flush_workqueue(cnic_wq);
+
+	if (atomic_read(&cp->iscsi_conn) != 0)
+		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+			    atomic_read(&cp->iscsi_conn));
+}
+
+static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 pfid = bp->pfid;
+	u32 port = BP_PORT(bp);
+
+	cnic_init_bnx2x_mac(dev);
+	cnic_bnx2x_set_tcp_options(dev, 0, 1);
+
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
+
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
+		DEF_MAX_DA_COUNT);
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
+		DEF_MAX_CWND);
+	return 0;
+}
+
+static void cnic_delete_task(struct work_struct *work)
+{
+	struct cnic_local *cp;
+	struct cnic_dev *dev;
+	u32 i;
+	int need_resched = 0;
+
+	cp = container_of(work, struct cnic_local, delete_task.work);
+	dev = cp->dev;
+
+	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
+		struct drv_ctl_info info;
+
+		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+
+		memset(&info, 0, sizeof(struct drv_ctl_info));
+		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
+		cp->ethdev->drv_ctl(dev->netdev, &info);
+	}
+
+	for (i = 0; i < cp->max_cid_space; i++) {
+		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int err;
+
+		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
+		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+			continue;
+
+		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+			need_resched = 1;
+			continue;
+		}
+
+		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+			continue;
+
+		err = cnic_bnx2x_destroy_ramrod(dev, i);
+
+		cnic_free_bnx2x_conn_resc(dev, i);
+		if (!err) {
+			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+				atomic_dec(&cp->iscsi_conn);
+
+			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+		}
+	}
+
+	if (need_resched)
+		queue_delayed_work(cnic_wq, &cp->delete_task,
+				   msecs_to_jiffies(10));
+
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int err;
+
+	err = cnic_cm_alloc_mem(dev);
+	if (err)
+		return err;
+
+	err = cp->start_cm(dev);
+
+	if (err)
+		goto err_out;
+
+	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
+
+	dev->cm_create = cnic_cm_create;
+	dev->cm_destroy = cnic_cm_destroy;
+	dev->cm_connect = cnic_cm_connect;
+	dev->cm_abort = cnic_cm_abort;
+	dev->cm_close = cnic_cm_close;
+	dev->cm_select_dev = cnic_cm_select_dev;
+
+	cp->ulp_handle[CNIC_ULP_L4] = dev;
+	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+	return 0;
+
+err_out:
+	cnic_cm_free_mem(dev);
+	return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	if (!cp->csk_tbl)
+		return 0;
+
+	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+		struct cnic_sock *csk = &cp->csk_tbl[i];
+
+		clear_bit(SK_F_INUSE, &csk->flags);
+		cnic_cm_cleanup(csk);
+	}
+	cnic_cm_free_mem(dev);
+
+	return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+	u32 cid_addr;
+	int i;
+
+	cid_addr = GET_CID_ADDR(cid);
+
+	for (i = 0; i < CTX_SIZE; i += 4)
+		cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret = 0, i;
+	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
+		return 0;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		int j;
+		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+		u32 val;
+
+		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
+
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+			(u64) cp->ctx_arr[i].mapping >> 32);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		cp->disable_int_sync(dev);
+		tasklet_kill(&cp->cnic_irq_task);
+		free_irq(ethdev->irq_arr[0].vector, dev);
+	}
+}
+
+static int cnic_request_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
+	if (err)
+		tasklet_disable(&cp->cnic_irq_task);
+
+	return err;
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		int err, i = 0;
+		int sblk_num = cp->status_blk_num;
+		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
+		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
+			     (unsigned long) dev);
+		err = cnic_request_irq(dev);
+		if (err)
+			return err;
+
+		while (cp->status_blk.bnx2->status_completion_producer_index &&
+		       i < 10) {
+			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+				1 << (11 + sblk_num));
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (cp->status_blk.bnx2->status_completion_producer_index) {
+			cnic_free_irq(dev);
+			goto failed;
+		}
+
+	} else {
+		struct status_block *sblk = cp->status_blk.gen;
+		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+		int i = 0;
+
+		while (sblk->status_completion_producer_index && i < 10) {
+			CNIC_WR(dev, BNX2_HC_COMMAND,
+				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (sblk->status_completion_producer_index)
+			goto failed;
+
+	}
+	return 0;
+
+failed:
+	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
+	return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+	synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct cnic_uio_dev *udev = cp->udev;
+	u32 cid_addr, tx_cid, sb_id;
+	u32 val, offset0, offset1, offset2, offset3;
+	int i;
+	struct bnx2_tx_bd *txbd;
+	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+	struct status_block *s_blk = cp->status_blk.gen;
+
+	sb_id = cp->status_blk_num;
+	tx_cid = 20;
+	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk.bnx2;
+
+		tx_cid = TX_TSS_CID + sb_id - 1;
+		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+			(TX_TSS_CID << 7));
+		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+	}
+	cp->tx_cons = *cp->tx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(tx_cid);
+	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
+		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+		for (i = 0; i < PHY_CTX_SIZE; i += 4)
+			cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		cnic_init_context(dev, tx_cid);
+		cnic_init_context(dev, tx_cid + 1);
+
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+	txbd = udev->l2_ring;
+
+	buf_map = udev->l2_buf_map;
+	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
+		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) ring_map >> 32;
+	cnic_ctx_wr(dev, cid_addr, offset2, val);
+	txbd->tx_bd_haddr_hi = val;
+
+	val = (u64) ring_map & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, offset3, val);
+	txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct cnic_uio_dev *udev = cp->udev;
+	u32 cid_addr, sb_id, val, coal_reg, coal_val;
+	int i;
+	struct bnx2_rx_bd *rxbd;
+	struct status_block *s_blk = cp->status_blk.gen;
+	dma_addr_t ring_map = udev->l2_ring_map;
+
+	sb_id = cp->status_blk_num;
+	cnic_init_context(dev, 2);
+	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+	coal_reg = BNX2_HC_COMMAND;
+	coal_val = CNIC_RD(dev, coal_reg);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk.bnx2;
+
+		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+		coal_reg = BNX2_HC_COALESCE_NOW;
+		coal_val = 1 << (11 + sb_id);
+	}
+	i = 0;
+	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+		CNIC_WR(dev, coal_reg, coal_val);
+		udelay(10);
+		i++;
+		barrier();
+	}
+	cp->rx_cons = *cp->rx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(2);
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+	if (sb_id == 0)
+		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
+	else
+		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
+	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->rx_bd_len = cp->l2_single_buf_size;
+		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+	rxbd->rx_bd_haddr_hi = val;
+
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+	rxbd->rx_bd_haddr_lo = val;
+
+	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct kwqe *wqes[1], l2kwqe;
+
+	memset(&l2kwqe, 0, sizeof(l2kwqe));
+	wqes[0] = &l2kwqe;
+	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
+			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
+			       KWQE_OPCODE_SHIFT) | 2;
+	dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 val;
+
+	val = cp->func << 2;
+
+	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+	dev->mac_addr[0] = (u8) (val >> 8);
+	dev->mac_addr[1] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+	dev->mac_addr[2] = (u8) (val >> 24);
+	dev->mac_addr[3] = (u8) (val >> 16);
+	dev->mac_addr[4] = (u8) (val >> 8);
+	dev->mac_addr[5] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
+		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct status_block *sblk = cp->status_blk.gen;
+	u32 val, kcq_cid_addr, kwq_cid_addr;
+	int err;
+
+	cnic_set_bnx2_mac(dev);
+
+	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	if (CNIC_PAGE_BITS > 12)
+		val |= (12 - 8)  << 4;
+	else
+		val |= (CNIC_PAGE_BITS - 8)  << 4;
+
+	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+	err = cnic_setup_5709_context(dev, 1);
+	if (err)
+		return err;
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->max_kwq_idx = MAX_KWQ_IDX;
+	cp->kwq_prod_idx = 0;
+	cp->kwq_con_idx = 0;
+	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
+
+	if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
+		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+	else
+		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+	/* Initialize the kernel work queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kwq_info.pgtbl_map;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->kcq1.sw_prod_idx = 0;
+	cp->kcq1.hw_prod_idx_ptr =
+		&sblk->status_completion_producer_index;
+
+	cp->kcq1.status_idx_ptr = &sblk->status_idx;
+
+	/* Initialize the kernel complete queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kcq1.dma.pgtbl_map;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->int_num = 0;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *msblk = cp->status_blk.bnx2;
+		u32 sb_id = cp->status_blk_num;
+		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
+
+		cp->kcq1.hw_prod_idx_ptr =
+			&msblk->status_completion_producer_index;
+		cp->kcq1.status_idx_ptr = &msblk->status_idx;
+		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
+		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+	}
+
+	/* Enable Commnad Scheduler notification when we write to the
+	 * host producer index of the kernel contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+	/* Enable Command Scheduler notification when we write to either
+	 * the Send Queue or Receive Queue producer indexes of the kernel
+	 * bypass contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+	/* Notify COM when the driver post an application buffer. */
+	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+	/* Set the CP and COM doorbells.  These two processors polls the
+	 * doorbell for a non zero value before running.  This must be done
+	 * after setting up the kernel queue contexts. */
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	err = cnic_init_bnx2_irq(dev);
+	if (err) {
+		netdev_err(dev->netdev, "cnic_init_irq failed\n");
+		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+		return err;
+	}
+
+	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
+
+	return 0;
+}
+
+static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 start_offset = ethdev->ctx_tbl_offset;
+	int i;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		struct cnic_ctx *ctx = &cp->ctx_arr[i];
+		dma_addr_t map = ctx->mapping;
+
+		if (cp->ctx_align) {
+			unsigned long mask = cp->ctx_align - 1;
+
+			map = (map + mask) & ~mask;
+		}
+
+		cnic_ctx_tbl_wr(dev, start_offset + i, map);
+	}
+}
+
+static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err = 0;
+
+	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
+		     (unsigned long) dev);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+		err = cnic_request_irq(dev);
+
+	return err;
+}
+
+static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
+						u16 sb_id, u8 sb_index,
+						u8 disable)
+{
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+
+	u32 addr = BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+			offsetof(struct hc_status_block_data_e1x, index_data) +
+			sizeof(struct hc_index_data)*sb_index +
+			offsetof(struct hc_index_data, flags);
+	u16 flags = CNIC_RD16(dev, addr);
+	/* clear and set */
+	flags &= ~HC_INDEX_DATA_HC_ENABLED;
+	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
+		  HC_INDEX_DATA_HC_ENABLED);
+	CNIC_WR16(dev, addr, flags);
+}
+
+static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u8 sb_id = cp->status_blk_num;
+
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+			offsetof(struct hc_status_block_data_e1x, index_data) +
+			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
+			offsetof(struct hc_index_data, timeout), 64 / 4);
+	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
+}
+
+static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
+{
+}
+
+static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
+				    struct client_init_ramrod_data *data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_uio_dev *udev = cp->udev;
+	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
+	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
+	int i;
+	u32 cli = cp->ethdev->iscsi_l2_client_id;
+	u32 val;
+
+	memset(txbd, 0, CNIC_PAGE_SIZE);
+
+	buf_map = udev->l2_buf_map;
+	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
+		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+		struct eth_tx_parse_bd_e1x *pbd_e1x =
+			&((txbd + 1)->parse_bd_e1x);
+		struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
+		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
+
+		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+		reg_bd->addr_hi = start_bd->addr_hi;
+		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
+		start_bd->nbytes = cpu_to_le16(0x10);
+		start_bd->nbd = cpu_to_le16(3);
+		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+		start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
+		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+		if (BNX2X_CHIP_IS_E2_PLUS(bp))
+			pbd_e2->parsing_data = (UNICAST_ADDRESS <<
+				ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+		else
+			pbd_e1x->global_data = (UNICAST_ADDRESS <<
+				ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
+	}
+
+	val = (u64) ring_map >> 32;
+	txbd->next_bd.addr_hi = cpu_to_le32(val);
+
+	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
+
+	val = (u64) ring_map & 0xffffffff;
+	txbd->next_bd.addr_lo = cpu_to_le32(val);
+
+	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
+
+	/* Other ramrod params */
+	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
+	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
+
+	/* reset xstorm per client statistics */
+	if (cli < MAX_STAT_COUNTER_ID) {
+		data->general.statistics_zero_flg = 1;
+		data->general.statistics_en_flg = 1;
+		data->general.statistics_counter_id = cli;
+	}
+
+	cp->tx_cons_ptr =
+		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
+}
+
+static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
+				    struct client_init_ramrod_data *data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_uio_dev *udev = cp->udev;
+	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
+				CNIC_PAGE_SIZE);
+	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
+				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
+	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
+	int i;
+	u32 cli = cp->ethdev->iscsi_l2_client_id;
+	int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
+	u32 val;
+	dma_addr_t ring_map = udev->l2_ring_map;
+
+	/* General data */
+	data->general.client_id = cli;
+	data->general.activate_flg = 1;
+	data->general.sp_client_id = cli;
+	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+	data->general.func_id = bp->pfid;
+
+	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+	}
+
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
+	rxbd->addr_hi = cpu_to_le32(val);
+	data->rx.bd_page_base.hi = cpu_to_le32(val);
+
+	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
+	rxbd->addr_lo = cpu_to_le32(val);
+	data->rx.bd_page_base.lo = cpu_to_le32(val);
+
+	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
+	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
+	rxcqe->addr_hi = cpu_to_le32(val);
+	data->rx.cqe_page_base.hi = cpu_to_le32(val);
+
+	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
+	rxcqe->addr_lo = cpu_to_le32(val);
+	data->rx.cqe_page_base.lo = cpu_to_le32(val);
+
+	/* Other ramrod params */
+	data->rx.client_qzone_id = cl_qzone_id;
+	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
+	data->rx.status_block_id = BNX2X_DEF_SB_ID;
+
+	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
+
+	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
+	data->rx.outer_vlan_removal_enable_flg = 1;
+	data->rx.silent_vlan_removal_flg = 1;
+	data->rx.silent_vlan_value = 0;
+	data->rx.silent_vlan_mask = 0xffff;
+
+	cp->rx_cons_ptr =
+		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
+	cp->rx_cons = *cp->rx_cons_ptr;
+}
+
+static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 pfid = bp->pfid;
+
+	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
+			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
+	cp->kcq1.sw_prod_idx = 0;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
+		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+		cp->kcq1.hw_prod_idx_ptr =
+			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+		cp->kcq1.status_idx_ptr =
+			&sb->sb.running_index[SM_RX_ID];
+	} else {
+		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
+
+		cp->kcq1.hw_prod_idx_ptr =
+			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+		cp->kcq1.status_idx_ptr =
+			&sb->sb.running_index[SM_RX_ID];
+	}
+
+	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
+		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
+					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
+		cp->kcq2.sw_prod_idx = 0;
+		cp->kcq2.hw_prod_idx_ptr =
+			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
+		cp->kcq2.status_idx_ptr =
+			&sb->sb.running_index[SM_RX_ID];
+	}
+}
+
+static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int func, ret;
+	u32 pfid;
+
+	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
+	cp->func = bp->pf_num;
+
+	func = CNIC_FUNC(cp);
+	pfid = bp->pfid;
+
+	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
+			       cp->iscsi_start_cid, 0);
+
+	if (ret)
+		return -ENOMEM;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
+		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
+					cp->fcoe_start_cid, 0);
+
+		if (ret)
+			return -ENOMEM;
+	}
+
+	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
+
+	cnic_init_bnx2x_kcq(dev);
+
+	/* Only 1 EQ */
+	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
+		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
+		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
+		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
+		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
+		HC_INDEX_ISCSI_EQ_CONS);
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
+		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
+		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
+
+	cnic_setup_bnx2x_context(dev);
+
+	ret = cnic_init_bnx2x_irq(dev);
+	if (ret)
+		return ret;
+
+	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
+	return 0;
+}
+
+static void cnic_init_rings(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	struct cnic_uio_dev *udev = cp->udev;
+
+	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+		return;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		cnic_init_bnx2_tx_ring(dev);
+		cnic_init_bnx2_rx_ring(dev);
+		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		u32 cli = cp->ethdev->iscsi_l2_client_id;
+		u32 cid = cp->ethdev->iscsi_l2_cid;
+		u32 cl_qzone_id;
+		struct client_init_ramrod_data *data;
+		union l5cm_specific_data l5_data;
+		struct ustorm_eth_rx_producers rx_prods = {0};
+		u32 off, i, *cid_ptr;
+
+		rx_prods.bd_prod = 0;
+		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
+		barrier();
+
+		cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
+
+		off = BAR_USTRORM_INTMEM +
+			(BNX2X_CHIP_IS_E2_PLUS(bp) ?
+			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
+			 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
+
+		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
+			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+
+		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
+		data = udev->l2_buf;
+		cid_ptr = udev->l2_buf + 12;
+
+		memset(data, 0, sizeof(*data));
+
+		cnic_init_bnx2x_tx_ring(dev, data);
+		cnic_init_bnx2x_rx_ring(dev, data);
+
+		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
+		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
+
+		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+			cid, ETH_CONNECTION_TYPE, &l5_data);
+
+		i = 0;
+		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+		       ++i < 10)
+			msleep(1);
+
+		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+			netdev_err(dev->netdev,
+				"iSCSI CLIENT_SETUP did not complete\n");
+		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
+		cnic_ring_ctl(dev, cid, cli, 1);
+		*cid_ptr = cid >> 4;
+		*(cid_ptr + 1) = cid * bp->db_size;
+		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
+	}
+}
+
+static void cnic_shutdown_rings(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+	void *rx_ring;
+
+	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+		return;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		cnic_shutdown_bnx2_rx_ring(dev);
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		u32 cli = cp->ethdev->iscsi_l2_client_id;
+		u32 cid = cp->ethdev->iscsi_l2_cid;
+		union l5cm_specific_data l5_data;
+		int i;
+
+		cnic_ring_ctl(dev, cid, cli, 0);
+
+		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
+		l5_data.phy_address.lo = cli;
+		l5_data.phy_address.hi = 0;
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
+			cid, ETH_CONNECTION_TYPE, &l5_data);
+		i = 0;
+		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+		       ++i < 10)
+			msleep(1);
+
+		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+			netdev_err(dev->netdev,
+				"iSCSI CLIENT_HALT did not complete\n");
+		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
+
+		memset(&l5_data, 0, sizeof(l5_data));
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+			cid, NONE_CONNECTION_TYPE, &l5_data);
+		msleep(10);
+	}
+	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
+	memset(rx_ring, 0, CNIC_PAGE_SIZE);
+}
+
+static int cnic_register_netdev(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (!ethdev)
+		return -ENODEV;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
+		return 0;
+
+	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+	if (err)
+		netdev_err(dev->netdev, "register_cnic failed\n");
+
+	/* Read iSCSI config again.  On some bnx2x device, iSCSI config
+	 * can change after firmware is downloaded.
+	 */
+	dev->max_iscsi_conn = ethdev->max_iscsi_conn;
+	if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+		dev->max_iscsi_conn = 0;
+
+	return err;
+}
+
+static void cnic_unregister_netdev(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!ethdev)
+		return;
+
+	ethdev->drv_unregister_cnic(dev->netdev);
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EALREADY;
+
+	dev->regview = ethdev->io_base;
+	pci_dev_get(dev->pcidev);
+	cp->func = PCI_FUNC(dev->pcidev->devfn);
+	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+	err = cp->alloc_resc(dev);
+	if (err) {
+		netdev_err(dev->netdev, "allocate resource failure\n");
+		goto err1;
+	}
+
+	err = cp->start_hw(dev);
+	if (err)
+		goto err1;
+
+	err = cnic_cm_open(dev);
+	if (err)
+		goto err1;
+
+	set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+	cp->enable_int(dev);
+
+	return 0;
+
+err1:
+	cp->free_resc(dev);
+	pci_dev_put(dev->pcidev);
+	return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+	cnic_disable_bnx2_int_sync(dev);
+
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cnic_setup_5709_context(dev, 0);
+	cnic_free_irq(dev);
+
+	cnic_free_resc(dev);
+}
+
+
+static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
+	u32 sb_id = cp->status_blk_num;
+	u32 idx_off, syn_off;
+
+	cnic_free_irq(dev);
+
+	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
+		idx_off = offsetof(struct hc_status_block_e2, index_values) +
+			  (hc_index * sizeof(u16));
+
+		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
+	} else {
+		idx_off = offsetof(struct hc_status_block_e1x, index_values) +
+			  (hc_index * sizeof(u16));
+
+		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
+	}
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
+		  idx_off, 0);
+
+	*cp->kcq1.hw_prod_idx_ptr = 0;
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
+	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
+	cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+		int i = 0;
+
+		/* Need to wait for the ring shutdown event to complete
+		 * before clearing the CNIC_UP flag.
+		 */
+		while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
+			msleep(100);
+			i++;
+		}
+		cnic_shutdown_rings(dev);
+		cp->stop_cm(dev);
+		cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
+		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
+		synchronize_rcu();
+		cnic_cm_shutdown(dev);
+		cp->stop_hw(dev);
+		pci_dev_put(dev->pcidev);
+	}
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+	int i = 0;
+
+	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+		msleep(100);
+		i++;
+	}
+	if (atomic_read(&dev->ref_count) != 0)
+		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
+
+	netdev_info(dev->netdev, "Removed CNIC device\n");
+	dev_put(dev->netdev);
+	kfree(dev);
+}
+
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+				struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int ret;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;     /* bnx2x is down */
+
+	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+		return -EINVAL;
+
+	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+	return ret;
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+				       struct pci_dev *pdev)
+{
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	int alloc_size;
+
+	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+	cdev = kzalloc(alloc_size, GFP_KERNEL);
+	if (cdev == NULL)
+		return NULL;
+
+	cdev->netdev = dev;
+	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+	cdev->register_device = cnic_register_device;
+	cdev->unregister_device = cnic_unregister_device;
+	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
+
+	cp = cdev->cnic_priv;
+	cp->dev = cdev;
+	cp->l2_single_buf_size = 0x400;
+	cp->l2_rx_ring_size = 3;
+
+	spin_lock_init(&cp->cnic_ulp_lock);
+
+	netdev_info(dev, "Added CNIC device\n");
+
+	return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *ethdev = NULL;
+
+	if (bp->cnic_probe)
+		ethdev = (bp->cnic_probe)(dev);
+
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	pci_dev_get(pdev);
+	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+	    (pdev->revision < 0x10)) {
+		pci_dev_put(pdev);
+		goto cnic_err;
+	}
+	pci_dev_put(pdev);
+
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL)
+		goto cnic_err;
+
+	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+	cp->chip_id = ethdev->chip_id;
+
+	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+
+	cp->cnic_ops = &cnic_bnx2_ops;
+	cp->start_hw = cnic_start_bnx2_hw;
+	cp->stop_hw = cnic_stop_bnx2_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl;
+	cp->alloc_resc = cnic_alloc_bnx2_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2_hw;
+	cp->enable_int = cnic_enable_bnx2_int;
+	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+	cp->close_conn = cnic_close_bnx2_conn;
+	return cdev;
+
+cnic_err:
+	dev_put(dev);
+	return NULL;
+}
+
+static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *ethdev = NULL;
+
+	if (bp->cnic_probe)
+		ethdev = bp->cnic_probe(dev);
+
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL) {
+		dev_put(dev);
+		return NULL;
+	}
+
+	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+	cp->chip_id = ethdev->chip_id;
+
+	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+	if (CNIC_SUPPORTS_FCOE(bp)) {
+		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+		cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
+	}
+
+	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
+		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
+
+	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
+
+	cp->cnic_ops = &cnic_bnx2x_ops;
+	cp->start_hw = cnic_start_bnx2x_hw;
+	cp->stop_hw = cnic_stop_bnx2x_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl_le;
+	cp->alloc_resc = cnic_alloc_bnx2x_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2x_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
+	cp->enable_int = cnic_enable_bnx2x_int;
+	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
+	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
+		cp->ack_int = cnic_ack_bnx2x_e2_msix;
+		cp->arm_int = cnic_arm_bnx2x_e2_msix;
+	} else {
+		cp->ack_int = cnic_ack_bnx2x_msix;
+		cp->arm_int = cnic_arm_bnx2x_msix;
+	}
+	cp->close_conn = cnic_close_bnx2x_conn;
+	return cdev;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+	struct ethtool_drvinfo drvinfo;
+	struct cnic_dev *cdev = NULL;
+
+	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+		if (!strcmp(drvinfo.driver, "bnx2"))
+			cdev = init_bnx2_cnic(dev);
+		if (!strcmp(drvinfo.driver, "bnx2x"))
+			cdev = init_bnx2x_cnic(dev);
+		if (cdev) {
+			write_lock(&cnic_dev_lock);
+			list_add(&cdev->list, &cnic_dev_list);
+			write_unlock(&cnic_dev_lock);
+		}
+	}
+	return cdev;
+}
+
+static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
+			      u16 vlan_id)
+{
+	int if_type;
+
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+		void *ctx;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+						lockdep_is_held(&cnic_lock));
+		if (!ulp_ops || !ulp_ops->indicate_netevent) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+
+		ctx = cp->ulp_handle[if_type];
+
+		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+		mutex_unlock(&cnic_lock);
+
+		ulp_ops->indicate_netevent(ctx, event, vlan_id);
+
+		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+	}
+}
+
+/* netdev event handler */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+							 void *ptr)
+{
+	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+	struct cnic_dev *dev;
+	int new_dev = 0;
+
+	dev = cnic_from_netdev(netdev);
+
+	if (!dev && event == NETDEV_REGISTER) {
+		/* Check for the hot-plug device */
+		dev = is_cnic_dev(netdev);
+		if (dev) {
+			new_dev = 1;
+			cnic_hold(dev);
+		}
+	}
+	if (dev) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (new_dev)
+			cnic_ulp_init(dev);
+		else if (event == NETDEV_UNREGISTER)
+			cnic_ulp_exit(dev);
+
+		if (event == NETDEV_UP) {
+			if (cnic_register_netdev(dev) != 0) {
+				cnic_put(dev);
+				goto done;
+			}
+			if (!cnic_start_hw(dev))
+				cnic_ulp_start(dev);
+		}
+
+		cnic_rcv_netevent(cp, event, 0);
+
+		if (event == NETDEV_GOING_DOWN) {
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+			cnic_unregister_netdev(dev);
+		} else if (event == NETDEV_UNREGISTER) {
+			write_lock(&cnic_dev_lock);
+			list_del_init(&dev->list);
+			write_unlock(&cnic_dev_lock);
+
+			cnic_put(dev);
+			cnic_free_dev(dev);
+			goto done;
+		}
+		cnic_put(dev);
+	} else {
+		struct net_device *realdev;
+		u16 vid;
+
+		vid = cnic_get_vlan(netdev, &realdev);
+		if (realdev) {
+			dev = cnic_from_netdev(realdev);
+			if (dev) {
+				vid |= VLAN_TAG_PRESENT;
+				cnic_rcv_netevent(dev->cnic_priv, event, vid);
+				cnic_put(dev);
+			}
+		}
+	}
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+	.notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+	struct cnic_uio_dev *udev;
+
+	while (!list_empty(&cnic_udev_list)) {
+		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
+				  list);
+		cnic_free_uio(udev);
+	}
+}
+
+static int __init cnic_init(void)
+{
+	int rc = 0;
+
+	pr_info("%s", version);
+
+	rc = register_netdevice_notifier(&cnic_netdev_notifier);
+	if (rc) {
+		cnic_release();
+		return rc;
+	}
+
+	cnic_wq = create_singlethread_workqueue("cnic_wq");
+	if (!cnic_wq) {
+		cnic_release();
+		unregister_netdevice_notifier(&cnic_netdev_notifier);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+	unregister_netdevice_notifier(&cnic_netdev_notifier);
+	cnic_release();
+	destroy_workqueue(cnic_wq);
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
new file mode 100644
index 0000000..4baea81
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -0,0 +1,427 @@
+/* cnic.h: QLogic CNIC core network driver.
+ *
+ * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define HC_INDEX_ISCSI_EQ_CONS			6
+
+#define HC_INDEX_FCOE_EQ_CONS			3
+
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS		5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS	1
+
+#define KWQ_PAGE_CNT	4
+#define KCQ_PAGE_CNT	16
+
+#define KWQ_CID 		24
+#define KCQ_CID 		25
+
+/*
+ *	krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS	0x00000000
+#define L5_KRNLQ_SIZE	0x00000000
+#define L5_KRNLQ_TYPE	0x00000000
+#define KRNLQ_FLAGS_PG_SZ					(0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256					(0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512					(1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K					(2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K					(3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K					(4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K					(5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K					(6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K					(7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K					(8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K					(9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K					(10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K					(11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M					(12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M					(13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ					(1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE	((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE						(0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY					(0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ					(6<<28)
+
+#define L5_KRNLQ_HOST_QIDX		0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX		0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 	0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX	0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 	0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 	0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX		0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 		0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES		0x0000001c
+#define L5_KRNLQ_QIDX_INCR		0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 	0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 	0x00000024
+
+#define BNX2_PG_CTX_MAP			0x1a0034
+#define BNX2_ISCSI_CTX_MAP		0x1a0074
+
+#define MAX_COMPLETED_KCQE	64
+
+#define MAX_CNIC_L5_CONTEXT	256
+
+#define MAX_CM_SK_TBL_SZ	MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ	256
+
+#define CNIC_LOCAL_PORT_MIN	60000
+#define CNIC_LOCAL_PORT_MAX	61024
+#define CNIC_LOCAL_PORT_RANGE	(CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX	((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX	((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BNX2_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BNX2_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) ==		\
+		(MAX_KCQE_CNT - 1)) ?					\
+		(x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x)						\
+	&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_START		0x8000
+
+#define DEF_KA_TIMEOUT		10000
+#define DEF_KA_INTERVAL		300000
+#define DEF_KA_MAX_PROBE_COUNT	3
+#define DEF_TOS			0
+#define DEF_TTL			0xfe
+#define DEF_SND_SEQ_SCALE	0
+#define DEF_RCV_BUF		0xffff
+#define DEF_SND_BUF		0xffff
+#define DEF_SEED		0
+#define DEF_MAX_RT_TIME		500
+#define DEF_MAX_DA_COUNT	2
+#define DEF_SWS_TIMER		1000
+#define DEF_MAX_CWND		0xffff
+
+struct cnic_ctx {
+	u32		cid;
+	void		*ctx;
+	dma_addr_t	mapping;
+};
+
+#define BNX2_MAX_CID		0x2000
+
+struct cnic_dma {
+	int		num_pages;
+	void		**pg_arr;
+	dma_addr_t	*pg_map_arr;
+	int		pgtbl_size;
+	u32		*pgtbl;
+	dma_addr_t	pgtbl_map;
+};
+
+struct cnic_id_tbl {
+	spinlock_t	lock;
+	u32		start;
+	u32		max;
+	u32		next;
+	unsigned long	*table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE	128
+
+struct kwqe_16_data {
+	u8	data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+	struct cnic_dma		task_array_info;
+	struct cnic_dma		r2tq_info;
+	struct cnic_dma		hq_info;
+};
+
+struct cnic_context {
+	u32			cid;
+	struct kwqe_16_data	*kwqe_data;
+	dma_addr_t		kwqe_data_mapping;
+	wait_queue_head_t	waitq;
+	int			wait_cond;
+	unsigned long		timestamp;
+	unsigned long		ctx_flags;
+#define	CTX_FL_OFFLD_START	0
+#define	CTX_FL_DELETE_WAIT	1
+#define	CTX_FL_CID_ERROR	2
+	u8			ulp_proto_id;
+	union {
+		struct cnic_iscsi	*iscsi;
+	} proto;
+};
+
+struct kcq_info {
+	struct cnic_dma	dma;
+	struct kcqe	**kcq;
+
+	u16		*hw_prod_idx_ptr;
+	u16		sw_prod_idx;
+	u16		*status_idx_ptr;
+	u32		io_addr;
+
+	u16		(*next_idx)(u16);
+	u16		(*hw_idx)(u16);
+};
+
+#define UIO_USE_TX_DOORBELL 0x017855DB
+
+struct cnic_uio_dev {
+	struct uio_info		cnic_uinfo;
+	u32			uio_dev;
+
+	int			l2_ring_size;
+	void			*l2_ring;
+	dma_addr_t		l2_ring_map;
+
+	int			l2_buf_size;
+	void			*l2_buf;
+	dma_addr_t		l2_buf_map;
+
+	struct cnic_dev		*dev;
+	struct pci_dev		*pdev;
+	struct list_head	list;
+};
+
+struct cnic_local {
+
+	spinlock_t cnic_ulp_lock;
+	void *ulp_handle[MAX_CNIC_ULP_TYPE];
+	unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT	0
+#define ULP_F_START	1
+#define ULP_F_CALL_PENDING	2
+	struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+	unsigned long cnic_local_flags;
+#define	CNIC_LCL_FL_KWQ_INIT		0x0
+#define	CNIC_LCL_FL_L2_WAIT		0x1
+#define	CNIC_LCL_FL_RINGS_INITED	0x2
+#define	CNIC_LCL_FL_STOP_ISCSI		0x4
+
+	struct cnic_dev *dev;
+
+	struct cnic_eth_dev *ethdev;
+
+	struct cnic_uio_dev *udev;
+
+	int		l2_rx_ring_size;
+	int		l2_single_buf_size;
+
+	u16		*rx_cons_ptr;
+	u16		*tx_cons_ptr;
+	u16		rx_cons;
+	u16		tx_cons;
+
+	struct cnic_dma		kwq_info;
+	struct kwqe		**kwq;
+
+	struct cnic_dma		kwq_16_data_info;
+
+	u16		max_kwq_idx;
+
+	u16		kwq_prod_idx;
+	u32		kwq_io_addr;
+
+	u16		*kwq_con_idx_ptr;
+	u16		kwq_con_idx;
+
+	struct kcq_info	kcq1;
+	struct kcq_info	kcq2;
+
+	union {
+		void				*gen;
+		struct status_block_msix	*bnx2;
+		struct host_hc_status_block_e1x	*bnx2x_e1x;
+		/* index values - which counter to update */
+		#define SM_RX_ID		0
+		#define SM_TX_ID		1
+	} status_blk;
+
+	struct host_sp_status_block	*bnx2x_def_status_blk;
+
+	u32				status_blk_num;
+	u32				bnx2x_igu_sb_id;
+	u32				int_num;
+	u32				last_status_idx;
+	struct tasklet_struct		cnic_irq_task;
+
+	struct kcqe		*completed_kcq[MAX_COMPLETED_KCQE];
+
+	struct cnic_sock	*csk_tbl;
+	struct cnic_id_tbl	csk_port_tbl;
+
+	struct cnic_dma		gbl_buf_info;
+
+	struct cnic_iscsi	*iscsi_tbl;
+	struct cnic_context	*ctx_tbl;
+	struct cnic_id_tbl	cid_tbl;
+	atomic_t		iscsi_conn;
+	u32			iscsi_start_cid;
+
+	u32			fcoe_init_cid;
+	u32			fcoe_start_cid;
+	struct cnic_id_tbl	fcoe_cid_tbl;
+
+	u32			max_cid_space;
+
+	/* per connection parameters */
+	int			num_iscsi_tasks;
+	int			num_ccells;
+	int			task_array_size;
+	int			r2tq_size;
+	int			hq_size;
+	int			num_cqs;
+
+	struct delayed_work	delete_task;
+
+	struct cnic_ctx		*ctx_arr;
+	int			ctx_blks;
+	int			ctx_blk_size;
+	unsigned long		ctx_align;
+	int			cids_per_blk;
+
+	u32			chip_id;
+	int			func;
+
+	u32			shmem_base;
+
+	struct cnic_ops		*cnic_ops;
+	int			(*start_hw)(struct cnic_dev *);
+	void			(*stop_hw)(struct cnic_dev *);
+	void			(*setup_pgtbl)(struct cnic_dev *,
+					       struct cnic_dma *);
+	int			(*alloc_resc)(struct cnic_dev *);
+	void			(*free_resc)(struct cnic_dev *);
+	int			(*start_cm)(struct cnic_dev *);
+	void			(*stop_cm)(struct cnic_dev *);
+	void			(*enable_int)(struct cnic_dev *);
+	void			(*disable_int_sync)(struct cnic_dev *);
+	void			(*ack_int)(struct cnic_dev *);
+	void			(*arm_int)(struct cnic_dev *, u32 index);
+	void			(*close_conn)(struct cnic_sock *, u32 opcode);
+};
+
+struct bnx2x_bd_chain_next {
+	u32	addr_lo;
+	u32	addr_hi;
+	u8	reserved[8];
+};
+
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T 	(1)
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN		(ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT		(ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#define CDU_VALID_DATA(_cid, _region, _type)	\
+	(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+
+#define CDU_CRC8(_cid, _region, _type)	\
+	(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)	\
+	(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+
+#define BNX2X_CONTEXT_MEM_SIZE		1024
+#define BNX2X_FCOE_CID			16
+
+#define BNX2X_ISCSI_START_CID		18
+#define BNX2X_ISCSI_NUM_CONNECTIONS	128
+#define BNX2X_ISCSI_TASK_CONTEXT_SIZE	128
+#define BNX2X_ISCSI_MAX_PENDING_R2TS	4
+#define BNX2X_ISCSI_R2TQE_SIZE		8
+#define BNX2X_ISCSI_HQ_BD_SIZE		64
+#define BNX2X_ISCSI_GLB_BUF_SIZE	64
+#define BNX2X_ISCSI_PBL_NOT_CACHED	0xff
+#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED	0xff
+
+#define BNX2X_FCOE_NUM_CONNECTIONS	1024
+
+#define BNX2X_FCOE_L5_CID_BASE		MAX_ISCSI_TBL_SZ
+
+#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
+
+#define BNX2X_RX_DESC_CNT		(BNX2_PAGE_SIZE / \
+					 sizeof(struct eth_rx_bd))
+#define BNX2X_MAX_RX_DESC_CNT		(BNX2X_RX_DESC_CNT - 2)
+#define BNX2X_RCQ_DESC_CNT		(BNX2_PAGE_SIZE / \
+					 sizeof(union eth_rx_cqe))
+#define BNX2X_MAX_RCQ_DESC_CNT		(BNX2X_RCQ_DESC_CNT - 1)
+
+#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) ==		\
+		(BNX2X_MAX_RCQ_DESC_CNT - 1)) ?				\
+		((x) + 2) : ((x) + 1)
+
+#define BNX2X_DEF_SB_ID			HC_SP_SB_ID
+
+#define BNX2X_SHMEM_MF_BLK_OFFSET	0x7e4
+
+#define BNX2X_SHMEM_ADDR(base, field)	(base + \
+					 offsetof(struct shmem_region, field))
+
+#define BNX2X_SHMEM2_ADDR(base, field)	(base + \
+					 offsetof(struct shmem2_region, field))
+
+#define BNX2X_SHMEM2_HAS(base, field)				\
+		((base) &&					\
+		 (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) >	\
+		  offsetof(struct shmem2_region, field)))
+
+#define BNX2X_MF_CFG_ADDR(base, field)				\
+			((base) + offsetof(struct mf_cfg, field))
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 		ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define CNIC_FUNC(cp)			((cp)->func)
+
+#define BNX2X_HW_CID(bp, x)		((BP_PORT(bp) << 23) | \
+					 (BP_VN(bp) << 17) | (x))
+
+#define BNX2X_SW_CID(x)			(x & 0x1ffff)
+
+#define BNX2X_CL_QZONE_ID(bp, cli)					\
+		(BNX2X_CHIP_IS_E2_PLUS(bp) ? cli :			\
+		 cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
+
+#ifndef MAX_STAT_COUNTER_ID
+#define MAX_STAT_COUNTER_ID						\
+	(CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H :			\
+	 ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 :	\
+	  MAX_STAT_COUNTER_ID_E1))
+#endif
+
+#define CNIC_SUPPORTS_FCOE(cp)						\
+	(BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
+
+#define CNIC_RAMROD_TMO			(HZ / 4)
+
+#endif
+
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
new file mode 100644
index 0000000..b384997
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -0,0 +1,5462 @@
+
+/* cnic.c: QLogic CNIC core network driver.
+ *
+ * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH                  (4)
+#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE       (8)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1               (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2               (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3               (52)
+#define L4_KWQE_OPCODE_VALUE_RESET                  (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE                  (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET          (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE			(0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT		(L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE		(L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT		(L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE	(L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD	(L5CM_RAMROD_CMD_ID_BASE + 15)
+
+#define FCOE_RAMROD_CMD_ID_INIT_FUNC		(FCOE_KCQE_OPCODE_INIT_FUNC)
+#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC		(FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_STAT_FUNC		(FCOE_KCQE_OPCODE_STAT_FUNC)
+#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN		(FCOE_KCQE_OPCODE_OFFLOAD_CONN)
+#define FCOE_RAMROD_CMD_ID_ENABLE_CONN		(FCOE_KCQE_OPCODE_ENABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DISABLE_CONN		(FCOE_KCQE_OPCODE_DISABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DESTROY_CONN		(FCOE_KCQE_OPCODE_DESTROY_CONN)
+#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN	(0x81)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP             (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP             (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE          (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE       (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED         (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED         (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS           (0)
+#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR         (4)
+#define L4_KCQE_COMPLETION_STATUS_PARITY_ERROR	    (0x81)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT           (0x93)
+
+#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL    (0x83)
+#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG      (0x89)
+
+#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
+#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH              (0xa1)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+	u32 cid;
+	u32 pg_cid;
+	u32 conn_id;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u16 status;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u16 status;
+#endif
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+	u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+	u16 pg_status;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u16 pg_status;
+#endif
+	u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 pg_cid;
+	u32 src_ip;
+	u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+	u16 dst_port;
+	u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 src_port;
+	u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1[3];
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+	u8 rsrv1[3];
+#endif
+	u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rsrv;
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 reserved2;
+	u32 src_ip_v6_2;
+	u32 src_ip_v6_3;
+	u32 src_ip_v6_4;
+	u32 dst_ip_v6_2;
+	u32 dst_ip_v6_3;
+	u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 ka_timeout;
+	u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+	u8 snd_seq_scale;
+	u8 ttl;
+	u8 tos;
+	u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_max_probe_count;
+	u8 tos;
+	u8 ttl;
+	u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pmtu;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u16 pmtu;
+#endif
+	u32 rcv_buf;
+	u32 snd_buf;
+	u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 l2hdr_nbytes;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa0;
+	u8 sa1;
+	u8 sa2;
+	u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sa3;
+	u8 sa2;
+	u8 sa1;
+	u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa4;
+	u8 sa5;
+	u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+	u16 etype;
+	u8 sa5;
+	u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 vlan_tag;
+	u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+	u16 ipid_start;
+	u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ipid_count;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 ipid_count;
+#endif
+	u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 pg_cid;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+	u8 pg_unused_a;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u8 pg_unused_a;
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserverd3;
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+	u32 reserved4;
+	u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+/*
+ * bnx2x structures
+ */
+
+/*
+ * The iscsi aggregative context of Cstorm
+ */
+struct cstorm_iscsi_ag_context {
+	u32 agg_vars1;
+#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
+#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
+#if defined(__BIG_ENDIAN)
+	u8 __aux1_th;
+	u8 __aux1_val;
+	u16 __agg_vars2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_vars2;
+	u8 __aux1_val;
+	u8 __aux1_th;
+#endif
+	u32 rel_seq;
+	u32 rel_seq_th;
+#if defined(__BIG_ENDIAN)
+	u16 hq_cons;
+	u16 hq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_prod;
+	u16 hq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved62;
+	u8 __reserved61;
+	u8 __reserved60;
+	u8 __reserved59;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __reserved59;
+	u8 __reserved60;
+	u8 __reserved61;
+	u8 __reserved62;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved64;
+	u16 cq_u_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_u_prod;
+	u16 __reserved64;
+#endif
+	u32 __cq_u_prod1;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_vars3;
+	u16 cq_u_pend;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_u_pend;
+	u16 __agg_vars3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __aux2_th;
+	u16 aux2_val;
+#elif defined(__LITTLE_ENDIAN)
+	u16 aux2_val;
+	u16 __aux2_th;
+#endif
+};
+
+/*
+ * The fcoe extra aggregative context section of Tstorm
+ */
+struct tstorm_fcoe_extra_ag_context_section {
+	u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
+#endif
+	u32 __lcq_prod;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 __reserved66;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 __lcq_cons;
+	u32 __reserved2;
+};
+
+/*
+ * The fcoe aggregative context of Tstorm
+ */
+struct tstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_fcoe_extra_ag_context_section __extra_section;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Tstorm
+ */
+struct tstorm_tcp_tcp_ag_context_section {
+	u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
+#endif
+	u32 snd_nxt;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 wnd_right_edge_local;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 snd_una;
+	u32 __reserved2;
+};
+
+/*
+ * The iscsi aggregative context of Tstorm
+ */
+struct tstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_tcp_tcp_ag_context_section tcp;
+};
+
+
+
+/*
+ * The fcoe aggregative context of Ustorm
+ */
+struct ustorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 pbf_tx_seq_ack;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_tx_seq_ack;
+	u8 agg_misc2;
+	u8 cdu_usage;
+#endif
+	u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
+#endif
+	u32 expired_task_id;
+	u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+	u16 cq_prod;
+	u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_cons;
+	u16 cq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
+#endif
+};
+
+
+/*
+ * The iscsi aggregative context of Ustorm
+ */
+struct ustorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 __cq_local_comp_itt_val;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_local_comp_itt_val;
+	u8 agg_misc2;
+	u8 cdu_usage;
+#endif
+	u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
+#endif
+	u32 agg_val1;
+	u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
+#endif
+};
+
+
+/*
+ * The fcoe aggregative context section of Xstorm
+ */
+struct xstorm_fcoe_extra_ag_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+	u8 __reserved_da_cnt;
+	u16 __mtu;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __mtu;
+	u8 __reserved_da_cnt;
+	u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+	u32 snd_nxt;
+	u32 __xfrqe_bd_addr_lo;
+	u32 __xfrqe_bd_addr_hi;
+	u32 __xfrqe_data1;
+#if defined(__BIG_ENDIAN)
+	u8 __agg_val8_th;
+	u8 __tx_dest;
+	u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+	u8 __tx_dest;
+	u8 __agg_val8_th;
+#endif
+	u32 __sq_base_addr_lo;
+	u32 __sq_base_addr_hi;
+	u32 __xfrq_base_addr_lo;
+	u32 __xfrq_base_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 __xfrq_cons;
+	u16 __xfrq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __xfrq_prod;
+	u16 __xfrq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars5;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars3;
+	u8 __reserved_force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __reserved_force_pure_ack_cnt;
+	u8 __tcp_agg_vars3;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars5;
+#endif
+	u32 __tcp_agg_vars6;
+#if defined(__BIG_ENDIAN)
+	u16 __xfrqe_mng;
+	u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __tcp_agg_vars7;
+	u16 __xfrqe_mng;
+#endif
+	u32 __xfrqe_data0;
+	u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved3;
+	u8 __reserved2;
+	u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __da_only_cnt;
+	u8 __reserved2;
+	u16 __reserved3;
+#endif
+};
+
+/*
+ * The fcoe aggregative context of Xstorm
+ */
+struct xstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+	u8 __state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __state;
+	u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+	u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+	u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_cons;
+	u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+#endif
+	struct xstorm_fcoe_extra_ag_context_section __extra_section;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __agg_val11;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val11;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 confq_cons;
+	u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 confq_prod;
+	u16 confq_cons;
+#endif
+	u32 agg_vars8;
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 __cache_wqe_db;
+	u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_prod;
+	u16 __cache_wqe_db;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 completion_seq;
+	u32 confq_pbl_base_lo;
+	u32 confq_pbl_base_hi;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Xstorm
+ */
+struct xstorm_tcp_tcp_ag_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+	u8 __da_cnt;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u8 __da_cnt;
+	u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+	u32 snd_nxt;
+	u32 tx_wnd;
+	u32 snd_una;
+	u32 local_adv_wnd;
+#if defined(__BIG_ENDIAN)
+	u8 __agg_val8_th;
+	u8 __tx_dest;
+	u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+	u8 __tx_dest;
+	u8 __agg_val8_th;
+#endif
+	u32 ack_to_far_end;
+	u32 rto_timer;
+	u32 ka_timer;
+	u32 ts_to_echo;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val7_th;
+	u16 __agg_val7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val7;
+	u16 __agg_val7_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars5;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars3;
+	u8 __force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __force_pure_ack_cnt;
+	u8 __tcp_agg_vars3;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars5;
+#endif
+	u32 tcp_agg_vars6;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc6;
+	u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __tcp_agg_vars7;
+	u16 __agg_misc6;
+#endif
+	u32 __agg_val10;
+	u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved3;
+	u8 __reserved2;
+	u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __da_only_cnt;
+	u8 __reserved2;
+	u16 __reserved3;
+#endif
+};
+
+/*
+ * The iscsi aggregative context of Xstorm
+ */
+struct xstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+	u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_cons;
+	u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+	struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __gen_data;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 hq_prod;
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 hq_prod;
+#endif
+	u32 agg_vars8;
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 r2tq_prod;
+	u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_prod;
+	u16 r2tq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 hq_cons_tcp_seq;
+	u32 exp_stat_sn;
+	u32 rst_seq_num;
+};
+
+
+/*
+ * The L5cm aggregative context of XStorm
+ */
+struct xstorm_l5cm_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+	u16 agg_val4_th;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val4_th;
+	u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+	struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __gen_data;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+	u32 agg_vars8;
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 agg_misc0;
+	u16 agg_val4;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val4;
+	u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 completion_seq;
+	u32 agg_misc4;
+	u32 rst_seq_num;
+};
+
+/*
+ * ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_info {
+	__le16 aborted_task_id;
+	__le16 reserved0;
+	__le32 reserved1;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+	u8 r_ctl;
+	u8 rsrv[3];
+	__le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+	__le32 buf_addr_hi;
+	__le32 buf_addr_lo;
+	__le16 buf_len;
+	__le16 rsrv0;
+	__le16 flags;
+	__le16 rsrv1;
+};
+
+
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+	struct regpair cur_buf_addr;
+	__le16 cur_buf_rem;
+	__le16 second_buf_rem;
+	struct regpair second_buf_addr;
+};
+
+
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+	__le16 cleaned_task_id;
+	__le16 rolled_tx_seq_cnt;
+	__le32 rolled_tx_data_offset;
+};
+
+
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_flags {
+	u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_payload {
+	struct regpair reserved0;
+	__le32 fcp_resid;
+	u8 scsi_status_code;
+	struct fcoe_fcp_rsp_flags fcp_flags;
+	__le16 retry_delay_timer;
+	__le32 fcp_rsp_len;
+	__le32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_union {
+	struct fcoe_fcp_rsp_payload payload;
+	struct regpair reserved0;
+};
+
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+	u8 s_id[3];
+	u8 cs_ctl;
+	u8 d_id[3];
+	u8 r_ctl;
+	__le16 seq_cnt;
+	u8 df_ctl;
+	u8 seq_id;
+	u8 f_ctl[3];
+	u8 type;
+	__le32 parameters;
+	__le16 rx_id;
+	__le16 ox_id;
+};
+
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 mp_payload_len;
+	__le32 rsrv;
+};
+
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+	struct fcoe_fcp_rsp_union fcp_rsp;
+	struct fcoe_abts_rsp_union abts_rsp;
+	struct fcoe_mp_rsp_union mp_rsp;
+	__le32 opaque[8];
+};
+
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+	__le32 rsrv0[6];
+	struct fcoe_abts_info ctx;
+};
+
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+	__le32 rsrv0[6];
+	struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+	__le32 data_offset;
+	__le16 seq_cnt;
+	__le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+	__le32 rsrv0[6];
+	struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+	struct regpair cur_sge_addr;
+	__le16 cur_sge_off;
+	u8 cur_sge_idx;
+	u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+	struct fcoe_mul_sges_ctx mul_sgl;
+	struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+	__le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+	__le32 burst_len;
+	__le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kcqe_params {
+	__le32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kcqe {
+	__le32 fcoe_conn_id;
+	__le32 completion_status;
+	__le32 fcoe_conn_context_id;
+	union fcoe_kcqe_params params;
+	__le16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+};
+
+
+
+/*
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_header {
+	u8 op_code;
+	u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init1 {
+	__le16 num_tasks;
+	struct fcoe_kwqe_header hdr;
+	__le32 task_list_pbl_addr_lo;
+	__le32 task_list_pbl_addr_hi;
+	__le32 dummy_buffer_addr_lo;
+	__le32 dummy_buffer_addr_hi;
+	__le16 sq_num_wqes;
+	__le16 rq_num_wqes;
+	__le16 rq_buffer_log_size;
+	__le16 cq_num_wqes;
+	__le16 mtu;
+	u8 num_sessions_log;
+	u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init2 {
+	u8 hsi_major_version;
+	u8 hsi_minor_version;
+	struct fcoe_kwqe_header hdr;
+	__le32 hash_tbl_pbl_addr_lo;
+	__le32 hash_tbl_pbl_addr_hi;
+	__le32 t2_hash_tbl_addr_lo;
+	__le32 t2_hash_tbl_addr_hi;
+	__le32 t2_ptr_hash_tbl_addr_lo;
+	__le32 t2_ptr_hash_tbl_addr_hi;
+	__le32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init3 {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 error_bit_map_lo;
+	__le32 error_bit_map_hi;
+	u8 perf_config;
+	u8 reserved21[3];
+	__le32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload1 {
+	__le16 fcoe_conn_id;
+	struct fcoe_kwqe_header hdr;
+	__le32 sq_addr_lo;
+	__le32 sq_addr_hi;
+	__le32 rq_pbl_addr_lo;
+	__le32 rq_pbl_addr_hi;
+	__le32 rq_first_pbe_addr_lo;
+	__le32 rq_first_pbe_addr_hi;
+	__le16 rq_prod;
+	__le16 reserved0;
+};
+
+/*
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload2 {
+	__le16 tx_max_fc_pay_len;
+	struct fcoe_kwqe_header hdr;
+	__le32 cq_addr_lo;
+	__le32 cq_addr_hi;
+	__le32 xferq_addr_lo;
+	__le32 xferq_addr_hi;
+	__le32 conn_db_addr_lo;
+	__le32 conn_db_addr_hi;
+	__le32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload3 {
+	__le16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+	struct fcoe_kwqe_header hdr;
+	u8 s_id[3];
+	u8 tx_max_conc_seqs_c3;
+	u8 d_id[3];
+	u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+	__le32 reserved;
+	__le32 confq_first_pbe_addr_lo;
+	__le32 confq_first_pbe_addr_hi;
+	__le16 tx_total_conc_seqs;
+	__le16 rx_max_fc_pay_len;
+	__le16 rx_total_conc_seqs;
+	u8 rx_max_conc_seqs_c3;
+	u8 rx_open_seqs_exch_c3;
+};
+
+/*
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload4 {
+	u8 e_d_tov_timer_val;
+	u8 reserved2;
+	struct fcoe_kwqe_header hdr;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u8 dst_mac_addr_hi[2];
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	__le32 lcq_addr_lo;
+	__le32 lcq_addr_hi;
+	__le32 confq_pbl_base_addr_lo;
+	__le32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_enable_disable {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	u8 dst_mac_addr_hi[2];
+	__le16 reserved1;
+	u8 s_id[3];
+	u8 vlan_flag;
+	u8 d_id[3];
+	u8 reserved3;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_destroy {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_destroy {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_stat {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 stat_params_addr_lo;
+	__le32 stat_params_addr_hi;
+	__le32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kwqe {
+	struct fcoe_kwqe_init1 init1;
+	struct fcoe_kwqe_init2 init2;
+	struct fcoe_kwqe_init3 init3;
+	struct fcoe_kwqe_conn_offload1 conn_offload1;
+	struct fcoe_kwqe_conn_offload2 conn_offload2;
+	struct fcoe_kwqe_conn_offload3 conn_offload3;
+	struct fcoe_kwqe_conn_offload4 conn_offload4;
+	struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+	struct fcoe_kwqe_conn_destroy conn_destroy;
+	struct fcoe_kwqe_destroy destroy;
+	struct fcoe_kwqe_stat statistics;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+	struct fcoe_cached_sge_ctx cached_sge;
+	struct fcoe_ext_mul_sges_ctx sgl;
+	__le32 opaque[5];
+};
+
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_s_stat_ctx {
+	u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
+	u8 seq_id;
+	struct fcoe_s_stat_ctx s_stat;
+	__le16 seq_cnt;
+	__le32 low_exp_ro;
+	__le32 high_exp_ro;
+};
+
+
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+	struct fcoe_read_flow_info read_info;
+	union fcoe_comp_flow_info comp_info;
+	__le32 opaque[8];
+};
+
+
+
+/*
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_sqe {
+	__le16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0;
+};
+
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+	struct fcoe_fc_frame tx_frame;
+	struct fcoe_fcp_cmd_payload fcp_cmd;
+	struct fcoe_ext_cleanup_info cleanup;
+	struct fcoe_ext_abts_info abts;
+	struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+	__le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+	u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+	u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_COMP_TRNS (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_COMP_TRNS_SHIFT 7
+	__le16 rsrv3;
+	__le32 verify_tx_seq;
+};
+
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+	union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+	__le32 data_2_trns;
+	__le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+	__le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+	__le16 rx_id;
+	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+	struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+	struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+	struct fcoe_rx_seq_ctx rx_seq_ctx;
+	union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_task_ctx_entry {
+	struct fcoe_tce_tx_only txwr_only;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr_only;
+};
+
+
+
+
+
+
+
+
+
+
+/*
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_xfrqe {
+	__le16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * Cached SGEs $$KEEP_ENDIANNESS$$
+ */
+struct common_fcoe_sgl {
+	struct fcoe_bd_ctx sge[3];
+};
+
+
+/*
+ * FCoE SQ\XFRQ element
+ */
+struct fcoe_cached_wqe {
+	struct fcoe_sqe sqe;
+	struct fcoe_xfrqe xfrqe;
+};
+
+
+/*
+ * FCoE connection enable\disable params passed by driver to FW in FCoE enable
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_enable_disable_ramrod_params {
+	struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
+};
+
+
+/*
+ * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_offload_ramrod_params {
+	struct fcoe_kwqe_conn_offload1 offload_kwqe1;
+	struct fcoe_kwqe_conn_offload2 offload_kwqe2;
+	struct fcoe_kwqe_conn_offload3 offload_kwqe3;
+	struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+};
+
+
+struct ustorm_fcoe_mng_ctx {
+#if defined(__BIG_ENDIAN)
+	u8 mid_seq_proc_flag;
+	u8 tce_in_cam_flag;
+	u8 tce_on_ior_flag;
+	u8 en_cached_tce_flag;
+#elif defined(__LITTLE_ENDIAN)
+	u8 en_cached_tce_flag;
+	u8 tce_on_ior_flag;
+	u8 tce_in_cam_flag;
+	u8 mid_seq_proc_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 tce_cam_addr;
+	u8 cached_conn_flag;
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 cached_conn_flag;
+	u8 tce_cam_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 dma_tce_ram_addr;
+	u16 tce_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tce_ram_addr;
+	u16 dma_tce_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 wr_done_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 wr_done_seq;
+	u16 ox_id;
+#endif
+	struct regpair task_addr;
+};
+
+/*
+ * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and
+ * used in FCoE context section
+ */
+struct ustorm_fcoe_params {
+#if defined(__BIG_ENDIAN)
+	u16 fcoe_conn_id;
+	u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+	u16 fcoe_conn_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 hc_csdm_byte_en;
+	u8 func_id;
+	u8 port_id;
+	u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vnic_id;
+	u8 port_id;
+	u8 func_id;
+	u8 hc_csdm_byte_en;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rx_total_conc_seqs;
+	u16 rx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_max_fc_pay_len;
+	u16 rx_total_conc_seqs;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 task_pbe_idx_off;
+	u8 task_in_page_log_size;
+	u16 rx_max_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_max_conc_seqs;
+	u8 task_in_page_log_size;
+	u8 task_pbe_idx_off;
+#endif
+};
+
+/*
+ * FCoE 16-bits index structure
+ */
+struct fcoe_idx16_fields {
+	u16 fields;
+#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
+#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
+#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
+#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+};
+
+/*
+ * FCoE 16-bits index union
+ */
+union fcoe_idx16_field_union {
+	struct fcoe_idx16_fields fields;
+	u16 val;
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place_mng {
+#if defined(__BIG_ENDIAN)
+	u16 sge_off;
+	u8 num_sges;
+	u8 sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sge_idx;
+	u8 num_sges;
+	u16 sge_off;
+#endif
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place {
+	struct ustorm_fcoe_data_place_mng cached_mng;
+	struct fcoe_bd_ctx cached_sge[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+union fcoe_u_tce_tx_wr_rx_rd_union {
+	struct fcoe_abts_info abts;
+	struct fcoe_cleanup_info cleanup;
+	struct fcoe_fw_tx_seq_ctx tx_seq_ctx;
+	u32 opaque[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+struct fcoe_u_tce_tx_wr_rx_rd {
+	union fcoe_u_tce_tx_wr_rx_rd_union union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+struct ustorm_fcoe_tce {
+	struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr;
+};
+
+struct ustorm_fcoe_cache_ctx {
+	u32 rsrv0;
+	struct ustorm_fcoe_data_place data_place;
+	struct ustorm_fcoe_tce tce;
+};
+
+/*
+ * Ustorm FCoE Storm Context
+ */
+struct ustorm_fcoe_st_context {
+	struct ustorm_fcoe_mng_ctx mng_ctx;
+	struct ustorm_fcoe_params fcoe_params;
+	struct regpair cq_base_addr;
+	struct regpair rq_pbl_base;
+	struct regpair rq_cur_page_addr;
+	struct regpair confq_pbl_base_addr;
+	struct regpair conn_db_base;
+	struct regpair xfrq_base_addr;
+	struct regpair lcq_base_addr;
+#if defined(__BIG_ENDIAN)
+	union fcoe_idx16_field_union rq_cons;
+	union fcoe_idx16_field_union rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	union fcoe_idx16_field_union rq_prod;
+	union fcoe_idx16_field_union rq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 xfrq_prod;
+	u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_cons;
+	u16 xfrq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 lcq_cons;
+	u16 hc_cram_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hc_cram_address;
+	u16 lcq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_xfrq_lcq_confq_size;
+	u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 confq_prod;
+	u16 sq_xfrq_lcq_confq_size;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 hc_csdm_agg_int;
+	u8 rsrv2;
+	u8 available_rqes;
+	u8 sp_q_flush_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sp_q_flush_cnt;
+	u8 available_rqes;
+	u8 rsrv2;
+	u8 hc_csdm_agg_int;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 num_pend_tasks;
+	u16 pbf_ack_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_ack_ram_addr;
+	u16 num_pend_tasks;
+#endif
+	struct ustorm_fcoe_cache_ctx cache_ctx;
+};
+
+/*
+ * The FCoE non-aggregative context of Tstorm
+ */
+struct tstorm_fcoe_st_context {
+	struct regpair reserved0;
+	struct regpair reserved1;
+};
+
+/*
+ * Ethernet context section
+ */
+struct xstorm_fcoe_eth_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u8 local_addr_0;
+	u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_1;
+	u8 local_addr_0;
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved_vlan_type;
+	u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+	u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 local_addr_2;
+	u8 local_addr_3;
+	u8 local_addr_4;
+	u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_5;
+	u8 local_addr_4;
+	u8 local_addr_3;
+	u8 local_addr_2;
+#endif
+};
+
+/*
+ * Flags used in FCoE context section - 1 byte
+ */
+struct xstorm_fcoe_context_flags {
+	u8 flags;
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7
+};
+
+struct xstorm_fcoe_tce {
+	struct fcoe_tce_tx_only txwr;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+};
+
+/*
+ * FCP_DATA parameters required for transmission
+ */
+struct xstorm_fcoe_fcp_data {
+	u32 io_rem;
+#if defined(__BIG_ENDIAN)
+	u16 cached_sge_off;
+	u8 cached_num_sges;
+	u8 cached_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cached_sge_idx;
+	u8 cached_num_sges;
+	u16 cached_sge_off;
+#endif
+	u32 buf_addr_hi_0;
+	u32 buf_addr_lo_0;
+#if defined(__BIG_ENDIAN)
+	u16 num_of_pending_tasks;
+	u16 buf_len_0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_0;
+	u16 num_of_pending_tasks;
+#endif
+	u32 buf_addr_hi_1;
+	u32 buf_addr_lo_1;
+#if defined(__BIG_ENDIAN)
+	u16 task_pbe_idx_off;
+	u16 buf_len_1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_1;
+	u16 task_pbe_idx_off;
+#endif
+	u32 buf_addr_hi_2;
+	u32 buf_addr_lo_2;
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 buf_len_2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_2;
+	u16 ox_id;
+#endif
+};
+
+/*
+ * vlan configuration
+ */
+struct xstorm_fcoe_vlan_conf {
+	u8 vlan_conf;
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0)
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3)
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3
+#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4)
+#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4
+};
+
+/*
+ * FCoE 16-bits vlan structure
+ */
+struct fcoe_vlan_fields {
+	u16 fields;
+#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+/*
+ * FCoE 16-bits vlan union
+ */
+union fcoe_vlan_field_union {
+	struct fcoe_vlan_fields fields;
+	u16 val;
+};
+
+/*
+ * FCoE 16-bits vlan, vif union
+ */
+union fcoe_vlan_vif_field_union {
+	union fcoe_vlan_field_union vlan;
+	u16 vif;
+};
+
+/*
+ * FCoE context section
+ */
+struct xstorm_fcoe_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 cs_ctl;
+	u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id[3];
+	u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rctl;
+	u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id[3];
+	u8 rctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_xfrq_lcq_confq_size;
+	u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tx_max_fc_pay_len;
+	u16 sq_xfrq_lcq_confq_size;
+#endif
+	u32 lcq_prod;
+#if defined(__BIG_ENDIAN)
+	u8 port_id;
+	u8 func_id;
+	u8 seq_id;
+	struct xstorm_fcoe_context_flags tx_flags;
+#elif defined(__LITTLE_ENDIAN)
+	struct xstorm_fcoe_context_flags tx_flags;
+	u8 seq_id;
+	u8 func_id;
+	u8 port_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 mtu;
+	u8 func_mode;
+	u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vnic_id;
+	u8 func_mode;
+	u16 mtu;
+#endif
+	struct regpair confq_curr_page_addr;
+	struct fcoe_cached_wqe cached_wqe[8];
+	struct regpair lcq_base_addr;
+	struct xstorm_fcoe_tce tce;
+	struct xstorm_fcoe_fcp_data fcp_data;
+#if defined(__BIG_ENDIAN)
+	u8 tx_max_conc_seqs_c3;
+	u8 vlan_flag;
+	u8 dcb_val;
+	u8 data_pb_cmd_size;
+#elif defined(__LITTLE_ENDIAN)
+	u8 data_pb_cmd_size;
+	u8 dcb_val;
+	u8 vlan_flag;
+	u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 fcoe_tx_stat_params_ram_addr;
+	u16 fcoe_tx_fc_seq_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 fcoe_tx_fc_seq_ram_addr;
+	u16 fcoe_tx_stat_params_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 fcp_cmd_line_credit;
+	u8 eth_hdr_size;
+	u16 pbf_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_addr;
+	u8 eth_hdr_size;
+	u8 fcp_cmd_line_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	union fcoe_vlan_vif_field_union multi_func_val;
+	u8 page_log_size;
+	struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+#elif defined(__LITTLE_ENDIAN)
+	struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+	u8 page_log_size;
+	union fcoe_vlan_vif_field_union multi_func_val;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 fcp_cmd_frame_size;
+	u16 pbf_addr_ff;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_addr_ff;
+	u16 fcp_cmd_frame_size;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 vlan_num;
+	u8 cos;
+	u8 cache_xfrq_cons;
+	u8 cache_sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cache_sq_cons;
+	u8 cache_xfrq_cons;
+	u8 cos;
+	u8 vlan_num;
+#endif
+	u32 verify_tx_seq;
+};
+
+/*
+ * Xstorm FCoE Storm Context
+ */
+struct xstorm_fcoe_st_context {
+	struct xstorm_fcoe_eth_context_section eth;
+	struct xstorm_fcoe_context_section fcoe;
+};
+
+/*
+ * Fcoe connection context
+ */
+struct fcoe_context {
+	struct ustorm_fcoe_st_context ustorm_st_context;
+	struct tstorm_fcoe_st_context tstorm_st_context;
+	struct xstorm_fcoe_ag_context xstorm_ag_context;
+	struct tstorm_fcoe_ag_context tstorm_ag_context;
+	struct ustorm_fcoe_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct xstorm_fcoe_st_context xstorm_st_context;
+};
+
+/*
+ * FCoE init params passed by driver to FW in FCoE init ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_init_ramrod_params {
+	struct fcoe_kwqe_init1 init_kwqe1;
+	struct fcoe_kwqe_init2 init_kwqe2;
+	struct fcoe_kwqe_init3 init_kwqe3;
+	struct regpair eq_pbl_base;
+	__le32 eq_pbl_size;
+	__le32 reserved2;
+	__le16 eq_prod;
+	__le16 sb_num;
+	u8 sb_id;
+	u8 reserved0;
+	__le16 reserved1;
+};
+
+/*
+ * FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_stat_ramrod_params {
+	struct fcoe_kwqe_stat stat_kwqe;
+};
+
+/*
+ * CQ DB CQ producer and pending completion counter
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
+#if defined(__BIG_ENDIAN)
+	u16 cntr;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cntr;
+#endif
+};
+
+/*
+ * CQ DB pending completion ITT array
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
+};
+
+/*
+ * Cstorm CQ sequence to notify array, updated by driver
+ */
+struct iscsi_cq_db_sqn_2_notify_arr {
+	u16 sqn[8];
+};
+
+/*
+ * Cstorm iSCSI Storm Context
+ */
+struct cstorm_iscsi_st_context {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe;
+	struct regpair task_pbl_base;
+	struct regpair cq_db_base;
+#if defined(__BIG_ENDIAN)
+	u16 hq_bd_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hq_bd_itt;
+#endif
+	u32 hq_bd_data_segment_len;
+	u32 hq_bd_buffer_offset;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv;
+	u8 cq_proc_en_bit_map;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 hq_bd_opcode;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hq_bd_opcode;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 cq_proc_en_bit_map;
+	u8 rsrv;
+#endif
+	u32 hq_tcp_seq;
+#if defined(__BIG_ENDIAN)
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+#endif
+	struct regpair rsrv1;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct iscsi_cmd_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 expected_data_transfer_length;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 scsi_command_block[4];
+};
+
+
+/*
+ * Buffer per connection, used in Tstorm
+ */
+struct iscsi_conn_buf {
+	struct regpair reserved[8];
+};
+
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_rq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_r2tq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_cq_db {
+#if defined(__BIG_ENDIAN)
+	u16 cq_sn;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cq_sn;
+#endif
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct rings_db {
+	struct ustorm_iscsi_rq_db rq;
+	struct ustorm_iscsi_r2tq_db r2tq;
+	struct ustorm_iscsi_cq_db cq[8];
+#if defined(__BIG_ENDIAN)
+	u16 rq_prod;
+	u16 r2tq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 r2tq_prod;
+	u16 rq_prod;
+#endif
+	struct regpair cq_pbl_base;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_placement_db {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+	u32 local_sge_0_address_hi;
+	u32 local_sge_0_address_lo;
+#if defined(__BIG_ENDIAN)
+	u16 curr_sge_offset;
+	u16 local_sge_0_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_0_size;
+	u16 curr_sge_offset;
+#endif
+	u32 local_sge_1_address_hi;
+	u32 local_sge_1_address_lo;
+#if defined(__BIG_ENDIAN)
+	u8 exp_padding_2b;
+	u8 nal_len_3b;
+	u16 local_sge_1_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_1_size;
+	u8 nal_len_3b;
+	u8 exp_padding_2b;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 local_sge_index_2b;
+	u16 reserved7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved7;
+	u8 local_sge_index_2b;
+	u8 sgl_size;
+#endif
+	u32 rem_pdu;
+	u32 place_db_bitfield_1;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
+	u32 place_db_bitfield_2;
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
+	u32 nal;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24
+};
+
+/*
+ * Ustorm iSCSI Storm Context
+ */
+struct ustorm_iscsi_st_context {
+	u32 exp_stat_sn;
+	u32 exp_data_sn;
+	struct rings_db ring;
+	struct regpair task_pbl_base;
+	struct regpair tce_phy_addr;
+	struct ustorm_iscsi_placement_db place_db;
+	u32 reserved8;
+	u32 rem_rcv_len;
+#if defined(__BIG_ENDIAN)
+	u16 hdr_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hdr_itt;
+#endif
+	u32 nal_bytes;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_second_byte_union;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+	u8 task_pdu_cache_index;
+	u8 task_pbe_cache_index;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbe_cache_index;
+	u8 task_pdu_cache_index;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+	u8 hdr_second_byte_union;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 acDecrement;
+#elif defined(__LITTLE_ENDIAN)
+	u8 acDecrement;
+	u8 reserved2;
+	u16 reserved3;
+#endif
+	u32 task_stat;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_opcode;
+	u8 num_cqs;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 num_cqs;
+	u8 hdr_opcode;
+#endif
+	u32 negotiated_rx;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
+	u32 negotiated_rx_and_flags;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
+};
+
+/*
+ * TCP context region, shared in TOE, RDMA and ISCSI
+ */
+struct tstorm_tcp_st_context_section {
+	u32 flags1;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
+	u32 flags2;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u8 tcp_sm_state;
+	u8 rto_exp;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rto_exp;
+	u8 tcp_sm_state;
+	u16 mss;
+#endif
+	u32 rcv_nxt;
+	u32 timestamp_recent;
+	u32 timestamp_recent_time;
+	u32 cwnd;
+	u32 ss_thresh;
+	u32 cwnd_accum;
+	u32 prev_seg_seq;
+	u32 expected_rel_seq;
+	u32 recover;
+#if defined(__BIG_ENDIAN)
+	u8 retransmit_count;
+	u8 ka_max_probe_count;
+	u8 persist_probe_count;
+	u8 ka_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_probe_count;
+	u8 persist_probe_count;
+	u8 ka_max_probe_count;
+	u8 retransmit_count;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 statistics_counter_id;
+	u8 ooo_support_mode;
+	u8 snd_wnd_scale;
+	u8 dup_ack_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 dup_ack_count;
+	u8 snd_wnd_scale;
+	u8 ooo_support_mode;
+	u8 statistics_counter_id;
+#endif
+	u32 retransmit_start_time;
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 isle_start_seq;
+	u32 isle_end_seq;
+#if defined(__BIG_ENDIAN)
+	u16 second_isle_address;
+	u16 recent_seg_wnd;
+#elif defined(__LITTLE_ENDIAN)
+	u16 recent_seg_wnd;
+	u16 second_isle_address;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 max_isles_ever_happened;
+	u8 isles_number;
+	u16 last_isle_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 last_isle_address;
+	u8 isles_number;
+	u8 max_isles_ever_happened;
+#endif
+	u32 max_rt_time;
+#if defined(__BIG_ENDIAN)
+	u16 lsb_mac_address;
+	u16 vlan_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 vlan_id;
+	u16 lsb_mac_address;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 msb_mac_address;
+	u16 mid_mac_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mid_mac_address;
+	u16 msb_mac_address;
+#endif
+	u32 rightmost_received_seq;
+};
+
+/*
+ * Termination variables
+ */
+struct iscsi_term_vars {
+	u8 BitMap;
+#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
+#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define ISCSI_TERM_VARS_RSRV (0x1<<7)
+#define ISCSI_TERM_VARS_RSRV_SHIFT 7
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct tstorm_iscsi_st_context_section {
+	u32 nalPayload;
+	u32 b2nh;
+#if defined(__BIG_ENDIAN)
+	u16 rq_cons;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+	u8 hdr_bytes_2_fetch;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hdr_bytes_2_fetch;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+	u16 rq_cons;
+#endif
+	struct regpair rq_db_phy_addr;
+#if defined(__BIG_ENDIAN)
+	struct iscsi_term_vars term_vars;
+	u8 rsrv1;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u8 rsrv1;
+	struct iscsi_term_vars term_vars;
+#endif
+	u32 process_nxt;
+};
+
+/*
+ * The iSCSI non-aggregative context of Tstorm
+ */
+struct tstorm_iscsi_st_context {
+	struct tstorm_tcp_st_context_section tcp;
+	struct tstorm_iscsi_st_context_section iscsi;
+};
+
+/*
+ * Ethernet context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_eth_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u8 local_addr_0;
+	u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_1;
+	u8 local_addr_0;
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved_vlan_type;
+	u16 vlan_params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 vlan_params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+	u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 local_addr_2;
+	u8 local_addr_3;
+	u8 local_addr_4;
+	u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_5;
+	u8 local_addr_4;
+	u8 local_addr_3;
+	u8 local_addr_2;
+#endif
+};
+
+/*
+ * IpV4 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v4_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_id;
+	u16 __pbf_hdr_cmd_rsvd_flags_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_flags_offset;
+	u16 __pbf_hdr_cmd_rsvd_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+	u8 tos;
+	u16 __pbf_hdr_cmd_rsvd_length;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_length;
+	u8 tos;
+	u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+#endif
+	u32 ip_local_addr;
+#if defined(__BIG_ENDIAN)
+	u8 ttl;
+	u8 __pbf_hdr_cmd_rsvd_protocol;
+	u16 __pbf_hdr_cmd_rsvd_csum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_csum;
+	u8 __pbf_hdr_cmd_rsvd_protocol;
+	u8 ttl;
+#endif
+	u32 __pbf_hdr_cmd_rsvd_1;
+	u32 ip_remote_addr;
+};
+
+/*
+ * context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_padded_ip_v4_context_section {
+	struct xstorm_ip_v4_context_section ip_v4;
+	u32 reserved1[4];
+};
+
+/*
+ * IpV6 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v6_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 pbf_hdr_cmd_rsvd_payload_len;
+	u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+	u8 hop_limit;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hop_limit;
+	u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+	u16 pbf_hdr_cmd_rsvd_payload_len;
+#endif
+	u32 priority_flow_label;
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0)
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20)
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28)
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28
+	u32 ip_local_addr_lo_hi;
+	u32 ip_local_addr_lo_lo;
+	u32 ip_local_addr_hi_hi;
+	u32 ip_local_addr_hi_lo;
+	u32 ip_remote_addr_lo_hi;
+	u32 ip_remote_addr_lo_lo;
+	u32 ip_remote_addr_hi_hi;
+	u32 ip_remote_addr_hi_lo;
+};
+
+union xstorm_ip_context_section_types {
+	struct xstorm_padded_ip_v4_context_section padded_ip_v4;
+	struct xstorm_ip_v6_context_section ip_v6;
+};
+
+/*
+ * TCP context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_tcp_context_section {
+	u32 snd_max;
+#if defined(__BIG_ENDIAN)
+	u16 remote_port;
+	u16 local_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_port;
+	u16 remote_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 original_nagle_1b;
+	u8 ts_enabled;
+	u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+	u8 ts_enabled;
+	u8 original_nagle_1b;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pseudo_csum;
+	u16 window_scaling_factor;
+#elif defined(__LITTLE_ENDIAN)
+	u16 window_scaling_factor;
+	u16 pseudo_csum;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u8 statistics_counter_id;
+	u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+	u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+	u8 statistics_counter_id;
+	u16 reserved2;
+#endif
+	u32 ts_time_diff;
+	u32 __next_timer_expir;
+};
+
+/*
+ * Common context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_common_context_section {
+	struct xstorm_eth_context_section ethernet;
+	union xstorm_ip_context_section_types ip_union;
+	struct xstorm_tcp_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u8 __dcb_val;
+	u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+	u8 reserved;
+	u8 ip_version_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ip_version_1b;
+	u8 reserved;
+	u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+	u8 __dcb_val;
+#endif
+};
+
+/*
+ * Flags used in ISCSI context section
+ */
+struct xstorm_iscsi_context_flags {
+	u8 flags;
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7
+};
+
+struct iscsi_task_context_entry_x {
+	u32 data_out_buffer_offset;
+	u32 itt;
+	u32 data_sn;
+};
+
+struct iscsi_task_context_entry_xuc_x_write_only {
+	u32 tx_r2t_sn;
+};
+
+struct iscsi_task_context_entry_xuc_xu_write_both {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 sge_index;
+	u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_offset;
+	u8 sge_index;
+	u8 sgl_size;
+#endif
+};
+
+/*
+ * iSCSI context section
+ */
+struct xstorm_iscsi_context_section {
+	u32 first_burst_length;
+	u32 max_send_pdu_length;
+	struct regpair sq_pbl_base;
+	struct regpair sq_curr_pbe;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe_base;
+	struct regpair r2tq_pbl_base;
+	struct regpair r2tq_curr_pbe_base;
+	struct regpair task_pbl_base;
+#if defined(__BIG_ENDIAN)
+	u16 data_out_count;
+	struct xstorm_iscsi_context_flags flags;
+	u8 task_pbl_cache_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbl_cache_idx;
+	struct xstorm_iscsi_context_flags flags;
+	u16 data_out_count;
+#endif
+	u32 seq_more_2_send;
+	u32 pdu_more_2_send;
+	struct iscsi_task_context_entry_x temp_tce_x;
+	struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr;
+	struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr;
+	struct regpair lun;
+	u32 exp_data_transfer_len_ttt;
+	u32 pdu_data_2_rxmit;
+	u32 rxmit_bytes_2_dr;
+#if defined(__BIG_ENDIAN)
+	u16 rxmit_sge_offset;
+	u16 hq_rxmit_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_rxmit_cons;
+	u16 rxmit_sge_offset;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 r2tq_cons;
+	u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+	u8 rxmit_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rxmit_sge_idx;
+	u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+	u16 r2tq_cons;
+#endif
+	u32 hq_rxmit_tcp_seq;
+};
+
+/*
+ * Xstorm iSCSI Storm Context
+ */
+struct xstorm_iscsi_st_context {
+	struct xstorm_common_context_section common;
+	struct xstorm_iscsi_context_section iscsi;
+};
+
+/*
+ * Iscsi connection context
+ */
+struct iscsi_context {
+	struct ustorm_iscsi_st_context ustorm_st_context;
+	struct tstorm_iscsi_st_context tstorm_st_context;
+	struct xstorm_iscsi_ag_context xstorm_ag_context;
+	struct tstorm_iscsi_ag_context tstorm_ag_context;
+	struct cstorm_iscsi_ag_context cstorm_ag_context;
+	struct ustorm_iscsi_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct regpair upb_context;
+	struct xstorm_iscsi_st_context xstorm_st_context;
+	struct regpair xpb_context;
+	struct cstorm_iscsi_st_context cstorm_st_context;
+};
+
+
+/*
+ * PDU header of an iSCSI DATA-OUT
+ */
+struct iscsi_data_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 rsrv2;
+	u32 exp_stat_sn;
+	u32 rsrv3;
+	u32 data_sn;
+	u32 buffer_offset;
+	u32 rsrv4;
+};
+
+
+/*
+ * PDU header of an iSCSI login request
+ */
+struct iscsi_login_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_min;
+	u8 version_max;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+	u32 itt;
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv1;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv2[4];
+};
+
+/*
+ * PDU header of an iSCSI logout request
+ */
+struct iscsi_logout_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	u32 rsrv2[2];
+	u32 itt;
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv1;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
+};
+
+/*
+ * PDU header of an iSCSI TMF request
+ */
+struct iscsi_tmf_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 referenced_task_tag;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 ref_cmd_sn;
+	u32 exp_data_sn;
+	u32 rsrv2[2];
+};
+
+/*
+ * PDU header of an iSCSI Text request
+ */
+struct iscsi_text_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
+};
+
+/*
+ * PDU header of an iSCSI Nop-Out
+ */
+struct iscsi_nop_out_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
+};
+
+/*
+ * iscsi pdu headers in little endian form.
+ */
+union iscsi_pdu_headers_little_endian {
+	u32 fullHeaderSize[12];
+	struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr;
+	struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr;
+	struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr;
+	struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr;
+	struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr;
+	struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr;
+	struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr;
+};
+
+struct iscsi_hq_bd {
+	union iscsi_pdu_headers_little_endian pdu_header;
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u16 lcl_cmp_flg;
+#elif defined(__LITTLE_ENDIAN)
+	u16 lcl_cmp_flg;
+	u16 reserved1;
+#endif
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 sge_index;
+	u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_offset;
+	u8 sge_index;
+	u8 sgl_size;
+#endif
+};
+
+
+/*
+ * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$
+ */
+struct iscsi_l2_ooo_data {
+	__le32 iscsi_cid;
+	u8 drop_isle;
+	u8 drop_size;
+	u8 ooo_opcode;
+	u8 ooo_isle;
+	u8 reserved[8];
+};
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_c_write_only {
+	u32 total_data_acked;
+};
+
+struct iscsi_task_context_r2t_table_entry {
+	u32 ttt;
+	u32 desired_data_len;
+};
+
+struct iscsi_task_context_entry_xuc_u_write_only {
+	u32 exp_r2t_sn;
+	struct iscsi_task_context_r2t_table_entry r2t_table[4];
+#if defined(__BIG_ENDIAN)
+	u16 data_in_count;
+	u8 cq_id;
+	u8 valid_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 valid_1b;
+	u8 cq_id;
+	u16 data_in_count;
+#endif
+};
+
+struct iscsi_task_context_entry_xuc {
+	struct iscsi_task_context_entry_xuc_c_write_only write_c;
+	u32 exp_data_transfer_len;
+	struct iscsi_task_context_entry_xuc_x_write_only write_x;
+	u32 lun_lo;
+	struct iscsi_task_context_entry_xuc_xu_write_both write_xu;
+	u32 lun_hi;
+	struct iscsi_task_context_entry_xuc_u_write_only write_u;
+};
+
+struct iscsi_task_context_entry_u {
+	u32 exp_r2t_buff_offset;
+	u32 rem_rcv_len;
+	u32 exp_data_sn;
+};
+
+struct iscsi_task_context_entry {
+	struct iscsi_task_context_entry_x tce_x;
+#if defined(__BIG_ENDIAN)
+	u16 data_out_count;
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u16 data_out_count;
+#endif
+	struct iscsi_task_context_entry_xuc tce_xuc;
+	struct iscsi_task_context_entry_u tce_u;
+	u32 rsrv1[7];
+};
+
+
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_x_init_only {
+	struct regpair lun;
+	u32 exp_data_transfer_len;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * ipv6 structure
+ */
+struct ip_v6_addr {
+	u32 ip_addr_lo_lo;
+	u32 ip_addr_lo_hi;
+	u32 ip_addr_hi_lo;
+	u32 ip_addr_hi_hi;
+};
+
+
+
+/*
+ * l5cm- connection identification params
+ */
+struct l5cm_conn_addr_params {
+	u32 pmtu;
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+#endif
+	struct ip_v6_addr local_ip_addr;
+	struct ip_v6_addr remote_ip_addr;
+	u32 ipv6_flow_label_20b;
+	u32 reserved1;
+#if defined(__BIG_ENDIAN)
+	u16 remote_tcp_port;
+	u16 local_tcp_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_tcp_port;
+	u16 remote_tcp_port;
+#endif
+};
+
+/*
+ * l5cm-xstorm connection buffer
+ */
+struct l5cm_xstorm_conn_buffer {
+#if defined(__BIG_ENDIAN)
+	u16 rsrv1;
+	u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+	u16 rsrv1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u16 pseudo_header_checksum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pseudo_header_checksum;
+	u16 mss;
+#endif
+	u32 rcv_buf;
+	u32 rsrv2;
+	struct regpair context_addr;
+};
+
+/*
+ * l5cm-tstorm connection buffer
+ */
+struct l5cm_tstorm_conn_buffer {
+	u32 rsrv1[2];
+#if defined(__BIG_ENDIAN)
+	u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+	u8 ka_max_probe_count;
+	u8 ka_enable;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_enable;
+	u8 ka_max_probe_count;
+	u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#endif
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 max_rt_time;
+};
+
+/*
+ * l5cm connection buffer for active side
+ */
+struct l5cm_active_conn_buffer {
+	struct l5cm_conn_addr_params conn_addr_buf;
+	struct l5cm_xstorm_conn_buffer xstorm_conn_buffer;
+	struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
+};
+
+
+
+/*
+ * The l5cm opaque buffer passed in add new connection ramrod passive side
+ */
+struct l5cm_hash_input_string {
+	u32 __opaque1;
+#if defined(__BIG_ENDIAN)
+	u16 __opaque3;
+	u16 __opaque2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __opaque2;
+	u16 __opaque3;
+#endif
+	struct ip_v6_addr __opaque4;
+	struct ip_v6_addr __opaque5;
+	u32 __opaque6;
+	u32 __opaque7[5];
+};
+
+
+/*
+ * syn cookie component
+ */
+struct l5cm_syn_cookie_comp {
+	u32 __opaque;
+};
+
+/*
+ * data related to listeners of a TCP port
+ */
+struct l5cm_port_listener_data {
+	u8 params;
+#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0)
+#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1)
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5)
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6)
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7)
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7
+};
+
+/*
+ * Opaque structure passed from U to X when final ack arrives
+ */
+struct l5cm_opaque_buf {
+	u32 __opaque1;
+	u32 __opaque2;
+	u32 __opaque3;
+	u32 __opaque4;
+	struct l5cm_syn_cookie_comp __opaque5;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv2;
+	u8 rsrv;
+	struct l5cm_port_listener_data __opaque6;
+#elif defined(__LITTLE_ENDIAN)
+	struct l5cm_port_listener_data __opaque6;
+	u8 rsrv;
+	u16 rsrv2;
+#endif
+};
+
+
+/*
+ * l5cm slow path element
+ */
+struct l5cm_packet_size {
+	u32 size;
+	u32 rsrv;
+};
+
+
+/*
+ * The final-ack union structure in PCS entry after final ack arrived
+ */
+struct l5cm_pcse_ack {
+	struct l5cm_xstorm_conn_buffer tx_socket_params;
+	struct l5cm_opaque_buf opaque_buf;
+	struct l5cm_tstorm_conn_buffer rx_socket_params;
+};
+
+
+/*
+ * The syn union structure in PCS entry after syn arrived
+ */
+struct l5cm_pcse_syn {
+	struct l5cm_opaque_buf opaque_buf;
+	u32 rsrv[12];
+};
+
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_attributes {
+#if defined(__BIG_ENDIAN)
+	u16 pcs_id;
+	u8 status;
+	u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+	u8 status;
+	u16 pcs_id;
+#endif
+};
+
+
+union l5cm_seg_params {
+	struct l5cm_pcse_syn syn_seg_params;
+	struct l5cm_pcse_ack ack_seg_params;
+};
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_hdr {
+	struct l5cm_hash_input_string hash_input_string;
+	struct l5cm_conn_addr_params conn_addr_buf;
+	u32 cid;
+	u32 hash_result;
+	union l5cm_seg_params seg_params;
+	struct l5cm_pcs_attributes att;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv;
+	u16 rx_seg_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_seg_size;
+	u16 rsrv;
+#endif
+};
+
+/*
+ * pcs entry for passive connections
+ */
+struct l5cm_pcs_entry {
+	struct l5cm_pcs_hdr hdr;
+	u8 rx_segment[1516];
+};
+
+
+
+
+/*
+ * l5cm connection parameters
+ */
+union l5cm_reduce_param_union {
+	u32 opaque1;
+	u32 opaque2;
+};
+
+/*
+ * l5cm connection parameters
+ */
+struct l5cm_reduce_conn {
+	union l5cm_reduce_param_union opaque1;
+	u32 opaque2;
+};
+
+/*
+ * l5cm slow path element
+ */
+union l5cm_specific_data {
+	u8 protocol_data[8];
+	struct regpair phy_address;
+	struct l5cm_packet_size packet_size;
+	struct l5cm_reduce_conn reduced_conn;
+};
+
+/*
+ * l5 slow path element
+ */
+struct l5cm_spe {
+	struct spe_hdr hdr;
+	union l5cm_specific_data data;
+};
+
+
+
+
+/*
+ * Termination variables
+ */
+struct l5cm_term_vars {
+	u8 BitMap;
+#define L5CM_TERM_VARS_TCP_STATE (0xF<<0)
+#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define L5CM_TERM_VARS_RSRV (0x1<<7)
+#define L5CM_TERM_VARS_RSRV_SHIFT 7
+};
+
+
+
+
+/*
+ * Tstorm Tcp flags
+ */
+struct tstorm_l5cm_tcp_flags {
+	u16 flags;
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
+};
+
+
+/*
+ * Xstorm Tcp flags
+ */
+struct xstorm_l5cm_tcp_flags {
+	u8 flags;
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0)
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1)
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2)
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2
+#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3)
+#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
+};
+
+
+
+/*
+ * Out-of-order states
+ */
+enum tcp_ooo_event {
+	TCP_EVENT_ADD_PEN = 0,
+	TCP_EVENT_ADD_NEW_ISLE = 1,
+	TCP_EVENT_ADD_ISLE_RIGHT = 2,
+	TCP_EVENT_ADD_ISLE_LEFT = 3,
+	TCP_EVENT_JOIN = 4,
+	TCP_EVENT_NOP = 5,
+	MAX_TCP_OOO_EVENT
+};
+
+
+/*
+ * OOO support modes
+ */
+enum tcp_tstorm_ooo {
+	TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0,
+	TCP_TSTORM_OOO_SEND_PURE_ACK = 1,
+	TCP_TSTORM_OOO_SUPPORTED = 2,
+	MAX_TCP_TSTORM_OOO
+};
+
+
+
+
+
+
+
+
+
+#endif /* __5710_HSI_CNIC_LE__ */
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
new file mode 100644
index 0000000..789e5c7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -0,0 +1,388 @@
+/* cnic_if.h: QLogic cnic core network driver.
+ *
+ * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014-2015 QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#include "bnx2x/bnx2x_mfw_req.h"
+
+#define CNIC_MODULE_VERSION	"2.5.22"
+#define CNIC_MODULE_RELDATE	"July 20, 2015"
+
+#define CNIC_ULP_RDMA		0
+#define CNIC_ULP_ISCSI		1
+#define CNIC_ULP_FCOE		2
+#define CNIC_ULP_L4		3
+#define MAX_CNIC_ULP_TYPE_EXT	3
+#define MAX_CNIC_ULP_TYPE	4
+
+/* Use CPU native page size up to 16K for cnic ring sizes.  */
+#if (PAGE_SHIFT > 14)
+#define CNIC_PAGE_BITS	14
+#else
+#define CNIC_PAGE_BITS	PAGE_SHIFT
+#endif
+#define CNIC_PAGE_SIZE	(1 << (CNIC_PAGE_BITS))
+#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
+#define CNIC_PAGE_MASK	(~((CNIC_PAGE_SIZE) - 1))
+
+struct kwqe {
+	u32 kwqe_op_flag;
+
+#define KWQE_QID_SHIFT		8
+#define KWQE_OPCODE_MASK	0x00ff0000
+#define KWQE_OPCODE_SHIFT	16
+#define KWQE_OPCODE(x)		((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+#define KWQE_LAYER_MASK			0x70000000
+#define KWQE_LAYER_SHIFT		28
+#define KWQE_FLAGS_LAYER_MASK_L2	(2<<28)
+#define KWQE_FLAGS_LAYER_MASK_L3	(3<<28)
+#define KWQE_FLAGS_LAYER_MASK_L4	(4<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_FCOE	(7<<28)
+
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+	u32 kwqe_info4;
+	u32 kwqe_info5;
+	u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+};
+
+struct kcqe {
+	u32 kcqe_info0;
+	u32 kcqe_info1;
+	u32 kcqe_info2;
+	u32 kcqe_info3;
+	u32 kcqe_info4;
+	u32 kcqe_info5;
+	u32 kcqe_info6;
+	u32 kcqe_op_flag;
+		#define KCQE_RAMROD_COMPLETION		(0x1<<27) /* Everest */
+		#define KCQE_FLAGS_LAYER_MASK		(0x7<<28)
+		#define KCQE_FLAGS_LAYER_MASK_MISC	(0<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L2	(2<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L3	(3<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L4	(4<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_FCOE	(7<<28)
+		#define KCQE_FLAGS_NEXT 		(1<<31)
+		#define KCQE_FLAGS_OPCODE_MASK		(0xff<<16)
+		#define KCQE_FLAGS_OPCODE_SHIFT		(16)
+		#define KCQE_OPCODE(op)			\
+		(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA	64
+#define MAX_DRV_CTL_DATA	64
+
+#define CNIC_CTL_STOP_CMD		1
+#define CNIC_CTL_START_CMD		2
+#define CNIC_CTL_COMPLETION_CMD		3
+#define CNIC_CTL_STOP_ISCSI_CMD		4
+#define CNIC_CTL_FCOE_STATS_GET_CMD	5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD	6
+
+#define DRV_CTL_IO_WR_CMD		0x101
+#define DRV_CTL_IO_RD_CMD		0x102
+#define DRV_CTL_CTX_WR_CMD		0x103
+#define DRV_CTL_CTXTBL_WR_CMD		0x104
+#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD	0x105
+#define DRV_CTL_START_L2_CMD		0x106
+#define DRV_CTL_STOP_L2_CMD		0x107
+#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD	0x10c
+#define DRV_CTL_ISCSI_STOPPED_CMD	0x10d
+#define DRV_CTL_ULP_REGISTER_CMD	0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD	0x10f
+
+struct cnic_ctl_completion {
+	u32	cid;
+	u8	opcode;
+	u8	error;
+};
+
+struct cnic_ctl_info {
+	int	cmd;
+	union {
+		struct cnic_ctl_completion comp;
+		char bytes[MAX_CNIC_CTL_DATA];
+	} data;
+};
+
+struct drv_ctl_spq_credit {
+	u32	credit_count;
+};
+
+struct drv_ctl_io {
+	u32		cid_addr;
+	u32		offset;
+	u32		data;
+	dma_addr_t	dma_addr;
+};
+
+struct drv_ctl_l2_ring {
+	u32		client_id;
+	u32		cid;
+};
+
+struct drv_ctl_register_data {
+	int ulp_type;
+	struct fcoe_capabilities fcoe_features;
+};
+
+struct drv_ctl_info {
+	int	cmd;
+	int     drv_state;
+#define DRV_NOP		0
+#define DRV_ACTIVE	1
+#define DRV_INACTIVE	2
+#define DRV_UNLOADED	3
+	union {
+		struct drv_ctl_spq_credit credit;
+		struct drv_ctl_io io;
+		struct drv_ctl_l2_ring ring;
+		int ulp_type;
+		struct drv_ctl_register_data register_data;
+		char bytes[MAX_DRV_CTL_DATA];
+	} data;
+};
+
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+	u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+	u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+	u32 count;
+};
+
+struct cnic_ops {
+	struct module	*cnic_owner;
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+	int		(*cnic_handler)(void *, void *);
+	int		(*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC	8
+
+struct cnic_irq {
+	unsigned int	vector;
+	void		*status_blk;
+	u32		status_blk_num;
+	u32		status_blk_num2;
+	u32		irq_flags;
+#define CNIC_IRQ_FL_MSIX		0x00000001
+};
+
+struct cnic_eth_dev {
+	struct module	*drv_owner;
+	u32		drv_state;
+#define CNIC_DRV_STATE_REGD		0x00000001
+#define CNIC_DRV_STATE_USING_MSIX	0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO	0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI		0x00000008
+#define CNIC_DRV_STATE_NO_FCOE		0x00000010
+#define CNIC_DRV_STATE_HANDLES_IRQ	0x00000020
+	u32		chip_id;
+	u32		max_kwqe_pending;
+	struct pci_dev	*pdev;
+	void __iomem	*io_base;
+	void __iomem	*io_base2;
+	const void	*iro_arr;
+
+	u32		ctx_tbl_offset;
+	u32		ctx_tbl_len;
+	int		ctx_blk_size;
+	u32		starting_cid;
+	u32		max_iscsi_conn;
+	u32		max_fcoe_conn;
+	u32		max_rdma_conn;
+	u32		fcoe_init_cid;
+	u32		max_fcoe_exchanges;
+	u32		fcoe_wwn_port_name_hi;
+	u32		fcoe_wwn_port_name_lo;
+	u32		fcoe_wwn_node_name_hi;
+	u32		fcoe_wwn_node_name_lo;
+
+	u16		iscsi_l2_client_id;
+	u16		iscsi_l2_cid;
+	u8		iscsi_mac[ETH_ALEN];
+
+	int		num_irq;
+	struct cnic_irq	irq_arr[MAX_CNIC_VEC];
+	int		(*drv_register_cnic)(struct net_device *,
+					     struct cnic_ops *, void *);
+	int		(*drv_unregister_cnic)(struct net_device *);
+	int		(*drv_submit_kwqes_32)(struct net_device *,
+					       struct kwqe *[], u32);
+	int		(*drv_submit_kwqes_16)(struct net_device *,
+					       struct kwqe_16 *[], u32);
+	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	int		(*drv_get_fc_npiv_tbl)(struct net_device *,
+					       struct cnic_fc_npiv_tbl *);
+	unsigned long	reserved1[2];
+	union drv_info_to_mcp	*addr_drv_info_to_mcp;
+};
+
+struct cnic_sockaddr {
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} local;
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} remote;
+};
+
+struct cnic_sock {
+	struct cnic_dev *dev;
+	void	*context;
+	u32	src_ip[4];
+	u32	dst_ip[4];
+	u16	src_port;
+	u16	dst_port;
+	u16	vlan_id;
+	unsigned char old_ha[ETH_ALEN];
+	unsigned char ha[ETH_ALEN];
+	u32	mtu;
+	u32	cid;
+	u32	l5_cid;
+	u32	pg_cid;
+	int	ulp_type;
+
+	u32	ka_timeout;
+	u32	ka_interval;
+	u8	ka_max_probe_count;
+	u8	tos;
+	u8	ttl;
+	u8	snd_seq_scale;
+	u32	rcv_buf;
+	u32	snd_buf;
+	u32	seed;
+
+	unsigned long	tcp_flags;
+#define SK_TCP_NO_DELAY_ACK	0x1
+#define SK_TCP_KEEP_ALIVE	0x2
+#define SK_TCP_NAGLE		0x4
+#define SK_TCP_TIMESTAMP	0x8
+#define SK_TCP_SACK		0x10
+#define SK_TCP_SEG_SCALING	0x20
+	unsigned long	flags;
+#define SK_F_INUSE		0
+#define SK_F_OFFLD_COMPLETE	1
+#define SK_F_OFFLD_SCHED	2
+#define SK_F_PG_OFFLD_COMPLETE	3
+#define SK_F_CONNECT_START	4
+#define SK_F_IPV6		5
+#define SK_F_CLOSING		7
+#define SK_F_HW_ERR		8
+
+	atomic_t ref_count;
+	u32 state;
+	struct kwqe kwqe1;
+	struct kwqe kwqe2;
+	struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+	struct net_device	*netdev;
+	struct pci_dev		*pcidev;
+	void __iomem		*regview;
+	struct list_head	list;
+
+	int (*register_device)(struct cnic_dev *dev, int ulp_type,
+			       void *ulp_ctx);
+	int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+	int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num_wqes);
+	int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+				u32 num_wqes);
+
+	int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+			 void *);
+	int (*cm_destroy)(struct cnic_sock *);
+	int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+	int (*cm_abort)(struct cnic_sock *);
+	int (*cm_close)(struct cnic_sock *);
+	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+				 char *data, u16 data_size);
+	int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
+	unsigned long	flags;
+#define CNIC_F_CNIC_UP		1
+#define CNIC_F_BNX2_CLASS	3
+#define CNIC_F_BNX2X_CLASS	4
+	atomic_t	ref_count;
+	u8		mac_addr[ETH_ALEN];
+
+	int		max_iscsi_conn;
+	int		max_fcoe_conn;
+	int		max_rdma_conn;
+
+	int		max_fcoe_exchanges;
+
+	union drv_info_to_mcp	*stats_addr;
+	struct fcoe_capabilities	*fcoe_cap;
+
+	void		*cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val)		writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val)	writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val)		writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off)		readl(dev->regview + off)
+#define CNIC_RD16(dev, off)		readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+
+	void (*cnic_init)(struct cnic_dev *dev);
+	void (*cnic_exit)(struct cnic_dev *dev);
+	void (*cnic_start)(void *ulp_ctx);
+	void (*cnic_stop)(void *ulp_ctx);
+	void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+				u32 num_cqes);
+	void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
+	void (*cm_connect_complete)(struct cnic_sock *);
+	void (*cm_close_complete)(struct cnic_sock *);
+	void (*cm_abort_complete)(struct cnic_sock *);
+	void (*cm_remote_close)(struct cnic_sock *);
+	void (*cm_remote_abort)(struct cnic_sock *);
+	int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
+				  char *data, u16 data_size);
+	int (*cnic_get_stats)(void *ulp_ctx);
+	struct module *owner;
+	atomic_t ref_count;
+};
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+int cnic_unregister_driver(int ulp_type);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 0000000..9b6885e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMGENET) += genet.o
+genet-objs := bcmgenet.o bcmmii.o bcmgenet_wol.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 0000000..f971d92
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,3628 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)				"bcmgenet: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+#include <linux/platform_data/bcmgenet.h>
+
+#include <asm/unaligned.h>
+
+#include "bcmgenet.h"
+
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT	4
+
+/* Default highest priority queue for multi queue support */
+#define GENET_Q0_PRIORITY	0
+
+#define GENET_Q16_RX_BD_CNT	\
+	(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
+#define GENET_Q16_TX_BD_CNT	\
+	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
+
+#define RX_BUF_LENGTH		2048
+#define SKB_ALIGNMENT		32
+
+/* Tx/Rx DMA register offset, skip 256 descriptors */
+#define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
+#define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32))
+
+#define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \
+				TOTAL_DESC * DMA_DESC_SIZE)
+
+#define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \
+				TOTAL_DESC * DMA_DESC_SIZE)
+
+static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+					     void __iomem *d, u32 value)
+{
+	__raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
+					    void __iomem *d)
+{
+	return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
+				    void __iomem *d,
+				    dma_addr_t addr)
+{
+	__raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+
+	/* Register writes to GISB bus can take couple hundred nanoseconds
+	 * and are done for each packet, save these expensive writes unless
+	 * the platform is explicitly configured for 64-bits/LPAE.
+	 */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+	if (priv->hw_params->flags & GENET_HAS_40BITS)
+		__raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+#endif
+}
+
+/* Combined address + length/status setter */
+static inline void dmadesc_set(struct bcmgenet_priv *priv,
+			       void __iomem *d, dma_addr_t addr, u32 val)
+{
+	dmadesc_set_length_status(priv, d, val);
+	dmadesc_set_addr(priv, d, addr);
+}
+
+static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
+					  void __iomem *d)
+{
+	dma_addr_t addr;
+
+	addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+
+	/* Register writes to GISB bus can take couple hundred nanoseconds
+	 * and are done for each packet, save these expensive writes unless
+	 * the platform is explicitly configured for 64-bits/LPAE.
+	 */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+	if (priv->hw_params->flags & GENET_HAS_40BITS)
+		addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+#endif
+	return addr;
+}
+
+#define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x"
+
+#define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+				NETIF_MSG_LINK)
+
+static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+	if (GENET_IS_V1(priv))
+		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
+	else
+		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
+}
+
+static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+	if (GENET_IS_V1(priv))
+		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
+	else
+		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
+}
+
+/* These macros are defined to deal with register map change
+ * between GENET1.1 and GENET2. Only those currently being used
+ * by driver are defined.
+ */
+static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+	if (GENET_IS_V1(priv))
+		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
+	else
+		return __raw_readl(priv->base +
+				priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+	if (GENET_IS_V1(priv))
+		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
+	else
+		__raw_writel(val, priv->base +
+				priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
+{
+	if (GENET_IS_V1(priv))
+		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
+	else
+		return __raw_readl(priv->base +
+				priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
+{
+	if (GENET_IS_V1(priv))
+		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
+	else
+		__raw_writel(val, priv->base +
+				priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+/* RX/TX DMA register accessors */
+enum dma_reg {
+	DMA_RING_CFG = 0,
+	DMA_CTRL,
+	DMA_STATUS,
+	DMA_SCB_BURST_SIZE,
+	DMA_ARB_CTRL,
+	DMA_PRIORITY_0,
+	DMA_PRIORITY_1,
+	DMA_PRIORITY_2,
+	DMA_INDEX2RING_0,
+	DMA_INDEX2RING_1,
+	DMA_INDEX2RING_2,
+	DMA_INDEX2RING_3,
+	DMA_INDEX2RING_4,
+	DMA_INDEX2RING_5,
+	DMA_INDEX2RING_6,
+	DMA_INDEX2RING_7,
+	DMA_RING0_TIMEOUT,
+	DMA_RING1_TIMEOUT,
+	DMA_RING2_TIMEOUT,
+	DMA_RING3_TIMEOUT,
+	DMA_RING4_TIMEOUT,
+	DMA_RING5_TIMEOUT,
+	DMA_RING6_TIMEOUT,
+	DMA_RING7_TIMEOUT,
+	DMA_RING8_TIMEOUT,
+	DMA_RING9_TIMEOUT,
+	DMA_RING10_TIMEOUT,
+	DMA_RING11_TIMEOUT,
+	DMA_RING12_TIMEOUT,
+	DMA_RING13_TIMEOUT,
+	DMA_RING14_TIMEOUT,
+	DMA_RING15_TIMEOUT,
+	DMA_RING16_TIMEOUT,
+};
+
+static const u8 bcmgenet_dma_regs_v3plus[] = {
+	[DMA_RING_CFG]		= 0x00,
+	[DMA_CTRL]		= 0x04,
+	[DMA_STATUS]		= 0x08,
+	[DMA_SCB_BURST_SIZE]	= 0x0C,
+	[DMA_ARB_CTRL]		= 0x2C,
+	[DMA_PRIORITY_0]	= 0x30,
+	[DMA_PRIORITY_1]	= 0x34,
+	[DMA_PRIORITY_2]	= 0x38,
+	[DMA_RING0_TIMEOUT]	= 0x2C,
+	[DMA_RING1_TIMEOUT]	= 0x30,
+	[DMA_RING2_TIMEOUT]	= 0x34,
+	[DMA_RING3_TIMEOUT]	= 0x38,
+	[DMA_RING4_TIMEOUT]	= 0x3c,
+	[DMA_RING5_TIMEOUT]	= 0x40,
+	[DMA_RING6_TIMEOUT]	= 0x44,
+	[DMA_RING7_TIMEOUT]	= 0x48,
+	[DMA_RING8_TIMEOUT]	= 0x4c,
+	[DMA_RING9_TIMEOUT]	= 0x50,
+	[DMA_RING10_TIMEOUT]	= 0x54,
+	[DMA_RING11_TIMEOUT]	= 0x58,
+	[DMA_RING12_TIMEOUT]	= 0x5c,
+	[DMA_RING13_TIMEOUT]	= 0x60,
+	[DMA_RING14_TIMEOUT]	= 0x64,
+	[DMA_RING15_TIMEOUT]	= 0x68,
+	[DMA_RING16_TIMEOUT]	= 0x6C,
+	[DMA_INDEX2RING_0]	= 0x70,
+	[DMA_INDEX2RING_1]	= 0x74,
+	[DMA_INDEX2RING_2]	= 0x78,
+	[DMA_INDEX2RING_3]	= 0x7C,
+	[DMA_INDEX2RING_4]	= 0x80,
+	[DMA_INDEX2RING_5]	= 0x84,
+	[DMA_INDEX2RING_6]	= 0x88,
+	[DMA_INDEX2RING_7]	= 0x8C,
+};
+
+static const u8 bcmgenet_dma_regs_v2[] = {
+	[DMA_RING_CFG]		= 0x00,
+	[DMA_CTRL]		= 0x04,
+	[DMA_STATUS]		= 0x08,
+	[DMA_SCB_BURST_SIZE]	= 0x0C,
+	[DMA_ARB_CTRL]		= 0x30,
+	[DMA_PRIORITY_0]	= 0x34,
+	[DMA_PRIORITY_1]	= 0x38,
+	[DMA_PRIORITY_2]	= 0x3C,
+	[DMA_RING0_TIMEOUT]	= 0x2C,
+	[DMA_RING1_TIMEOUT]	= 0x30,
+	[DMA_RING2_TIMEOUT]	= 0x34,
+	[DMA_RING3_TIMEOUT]	= 0x38,
+	[DMA_RING4_TIMEOUT]	= 0x3c,
+	[DMA_RING5_TIMEOUT]	= 0x40,
+	[DMA_RING6_TIMEOUT]	= 0x44,
+	[DMA_RING7_TIMEOUT]	= 0x48,
+	[DMA_RING8_TIMEOUT]	= 0x4c,
+	[DMA_RING9_TIMEOUT]	= 0x50,
+	[DMA_RING10_TIMEOUT]	= 0x54,
+	[DMA_RING11_TIMEOUT]	= 0x58,
+	[DMA_RING12_TIMEOUT]	= 0x5c,
+	[DMA_RING13_TIMEOUT]	= 0x60,
+	[DMA_RING14_TIMEOUT]	= 0x64,
+	[DMA_RING15_TIMEOUT]	= 0x68,
+	[DMA_RING16_TIMEOUT]	= 0x6C,
+};
+
+static const u8 bcmgenet_dma_regs_v1[] = {
+	[DMA_CTRL]		= 0x00,
+	[DMA_STATUS]		= 0x04,
+	[DMA_SCB_BURST_SIZE]	= 0x0C,
+	[DMA_ARB_CTRL]		= 0x30,
+	[DMA_PRIORITY_0]	= 0x34,
+	[DMA_PRIORITY_1]	= 0x38,
+	[DMA_PRIORITY_2]	= 0x3C,
+	[DMA_RING0_TIMEOUT]	= 0x2C,
+	[DMA_RING1_TIMEOUT]	= 0x30,
+	[DMA_RING2_TIMEOUT]	= 0x34,
+	[DMA_RING3_TIMEOUT]	= 0x38,
+	[DMA_RING4_TIMEOUT]	= 0x3c,
+	[DMA_RING5_TIMEOUT]	= 0x40,
+	[DMA_RING6_TIMEOUT]	= 0x44,
+	[DMA_RING7_TIMEOUT]	= 0x48,
+	[DMA_RING8_TIMEOUT]	= 0x4c,
+	[DMA_RING9_TIMEOUT]	= 0x50,
+	[DMA_RING10_TIMEOUT]	= 0x54,
+	[DMA_RING11_TIMEOUT]	= 0x58,
+	[DMA_RING12_TIMEOUT]	= 0x5c,
+	[DMA_RING13_TIMEOUT]	= 0x60,
+	[DMA_RING14_TIMEOUT]	= 0x64,
+	[DMA_RING15_TIMEOUT]	= 0x68,
+	[DMA_RING16_TIMEOUT]	= 0x6C,
+};
+
+/* Set at runtime once bcmgenet version is known */
+static const u8 *bcmgenet_dma_regs;
+
+static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
+{
+	return netdev_priv(dev_get_drvdata(dev));
+}
+
+static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
+				      enum dma_reg r)
+{
+	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
+					u32 val, enum dma_reg r)
+{
+	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
+				      enum dma_reg r)
+{
+	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
+					u32 val, enum dma_reg r)
+{
+	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+/* RDMA/TDMA ring registers and accessors
+ * we merge the common fields and just prefix with T/D the registers
+ * having different meaning depending on the direction
+ */
+enum dma_ring_reg {
+	TDMA_READ_PTR = 0,
+	RDMA_WRITE_PTR = TDMA_READ_PTR,
+	TDMA_READ_PTR_HI,
+	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
+	TDMA_CONS_INDEX,
+	RDMA_PROD_INDEX = TDMA_CONS_INDEX,
+	TDMA_PROD_INDEX,
+	RDMA_CONS_INDEX = TDMA_PROD_INDEX,
+	DMA_RING_BUF_SIZE,
+	DMA_START_ADDR,
+	DMA_START_ADDR_HI,
+	DMA_END_ADDR,
+	DMA_END_ADDR_HI,
+	DMA_MBUF_DONE_THRESH,
+	TDMA_FLOW_PERIOD,
+	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
+	TDMA_WRITE_PTR,
+	RDMA_READ_PTR = TDMA_WRITE_PTR,
+	TDMA_WRITE_PTR_HI,
+	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
+};
+
+/* GENET v4 supports 40-bits pointer addressing
+ * for obvious reasons the LO and HI word parts
+ * are contiguous, but this offsets the other
+ * registers.
+ */
+static const u8 genet_dma_ring_regs_v4[] = {
+	[TDMA_READ_PTR]			= 0x00,
+	[TDMA_READ_PTR_HI]		= 0x04,
+	[TDMA_CONS_INDEX]		= 0x08,
+	[TDMA_PROD_INDEX]		= 0x0C,
+	[DMA_RING_BUF_SIZE]		= 0x10,
+	[DMA_START_ADDR]		= 0x14,
+	[DMA_START_ADDR_HI]		= 0x18,
+	[DMA_END_ADDR]			= 0x1C,
+	[DMA_END_ADDR_HI]		= 0x20,
+	[DMA_MBUF_DONE_THRESH]		= 0x24,
+	[TDMA_FLOW_PERIOD]		= 0x28,
+	[TDMA_WRITE_PTR]		= 0x2C,
+	[TDMA_WRITE_PTR_HI]		= 0x30,
+};
+
+static const u8 genet_dma_ring_regs_v123[] = {
+	[TDMA_READ_PTR]			= 0x00,
+	[TDMA_CONS_INDEX]		= 0x04,
+	[TDMA_PROD_INDEX]		= 0x08,
+	[DMA_RING_BUF_SIZE]		= 0x0C,
+	[DMA_START_ADDR]		= 0x10,
+	[DMA_END_ADDR]			= 0x14,
+	[DMA_MBUF_DONE_THRESH]		= 0x18,
+	[TDMA_FLOW_PERIOD]		= 0x1C,
+	[TDMA_WRITE_PTR]		= 0x20,
+};
+
+/* Set at runtime once GENET version is known */
+static const u8 *genet_dma_ring_regs;
+
+static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
+					   unsigned int ring,
+					   enum dma_ring_reg r)
+{
+	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+			(DMA_RING_SIZE * ring) +
+			genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
+					     unsigned int ring, u32 val,
+					     enum dma_ring_reg r)
+{
+	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+			(DMA_RING_SIZE * ring) +
+			genet_dma_ring_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
+					   unsigned int ring,
+					   enum dma_ring_reg r)
+{
+	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+			(DMA_RING_SIZE * ring) +
+			genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
+					     unsigned int ring, u32 val,
+					     enum dma_ring_reg r)
+{
+	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+			(DMA_RING_SIZE * ring) +
+			genet_dma_ring_regs[r]);
+}
+
+static int bcmgenet_get_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	if (!priv->phydev)
+		return -ENODEV;
+
+	return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	if (!priv->phydev)
+		return -ENODEV;
+
+	return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_rx_csum(struct net_device *dev,
+				netdev_features_t wanted)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 rbuf_chk_ctrl;
+	bool rx_csum_en;
+
+	rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+
+	rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+
+	/* enable rx checksumming */
+	if (rx_csum_en)
+		rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+	else
+		rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
+	priv->desc_rxchk_en = rx_csum_en;
+
+	/* If UniMAC forwards CRC, we need to skip over it to get
+	 * a valid CHK bit to be set in the per-packet status word
+	*/
+	if (rx_csum_en && priv->crc_fwd_en)
+		rbuf_chk_ctrl |= RBUF_SKIP_FCS;
+	else
+		rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
+
+	bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
+
+	return 0;
+}
+
+static int bcmgenet_set_tx_csum(struct net_device *dev,
+				netdev_features_t wanted)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	bool desc_64b_en;
+	u32 tbuf_ctrl, rbuf_ctrl;
+
+	tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
+	rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+
+	desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+
+	/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
+	if (desc_64b_en) {
+		tbuf_ctrl |= RBUF_64B_EN;
+		rbuf_ctrl |= RBUF_64B_EN;
+	} else {
+		tbuf_ctrl &= ~RBUF_64B_EN;
+		rbuf_ctrl &= ~RBUF_64B_EN;
+	}
+	priv->desc_64b_en = desc_64b_en;
+
+	bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
+	bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
+
+	return 0;
+}
+
+static int bcmgenet_set_features(struct net_device *dev,
+				 netdev_features_t features)
+{
+	netdev_features_t changed = features ^ dev->features;
+	netdev_features_t wanted = dev->wanted_features;
+	int ret = 0;
+
+	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+		ret = bcmgenet_set_tx_csum(dev, wanted);
+	if (changed & (NETIF_F_RXCSUM))
+		ret = bcmgenet_set_rx_csum(dev, wanted);
+
+	return ret;
+}
+
+static u32 bcmgenet_get_msglevel(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	return priv->msg_enable;
+}
+
+static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	priv->msg_enable = level;
+}
+
+static int bcmgenet_get_coalesce(struct net_device *dev,
+				 struct ethtool_coalesce *ec)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	ec->tx_max_coalesced_frames =
+		bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
+					 DMA_MBUF_DONE_THRESH);
+	ec->rx_max_coalesced_frames =
+		bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
+					 DMA_MBUF_DONE_THRESH);
+	ec->rx_coalesce_usecs =
+		bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+
+	return 0;
+}
+
+static int bcmgenet_set_coalesce(struct net_device *dev,
+				 struct ethtool_coalesce *ec)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	unsigned int i;
+	u32 reg;
+
+	/* Base system clock is 125Mhz, DMA timeout is this reference clock
+	 * divided by 1024, which yields roughly 8.192us, our maximum value
+	 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
+	 */
+	if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+	    ec->tx_max_coalesced_frames == 0 ||
+	    ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+	    ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
+		return -EINVAL;
+
+	if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
+		return -EINVAL;
+
+	/* GENET TDMA hardware does not support a configurable timeout, but will
+	 * always generate an interrupt either after MBDONE packets have been
+	 * transmitted, or when the ring is emtpy.
+	 */
+	if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
+	    ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
+		return -EOPNOTSUPP;
+
+	/* Program all TX queues with the same values, as there is no
+	 * ethtool knob to do coalescing on a per-queue basis
+	 */
+	for (i = 0; i < priv->hw_params->tx_queues; i++)
+		bcmgenet_tdma_ring_writel(priv, i,
+					  ec->tx_max_coalesced_frames,
+					  DMA_MBUF_DONE_THRESH);
+	bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
+				  ec->tx_max_coalesced_frames,
+				  DMA_MBUF_DONE_THRESH);
+
+	for (i = 0; i < priv->hw_params->rx_queues; i++) {
+		bcmgenet_rdma_ring_writel(priv, i,
+					  ec->rx_max_coalesced_frames,
+					  DMA_MBUF_DONE_THRESH);
+
+		reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
+		reg &= ~DMA_TIMEOUT_MASK;
+		reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+		bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
+	}
+
+	bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+				  ec->rx_max_coalesced_frames,
+				  DMA_MBUF_DONE_THRESH);
+
+	reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
+	reg &= ~DMA_TIMEOUT_MASK;
+	reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+	bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
+
+	return 0;
+}
+
+/* standard ethtool support functions. */
+enum bcmgenet_stat_type {
+	BCMGENET_STAT_NETDEV = -1,
+	BCMGENET_STAT_MIB_RX,
+	BCMGENET_STAT_MIB_TX,
+	BCMGENET_STAT_RUNT,
+	BCMGENET_STAT_MISC,
+	BCMGENET_STAT_SOFT,
+};
+
+struct bcmgenet_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int stat_sizeof;
+	int stat_offset;
+	enum bcmgenet_stat_type type;
+	/* reg offset from UMAC base for misc counters */
+	u16 reg_offset;
+};
+
+#define STAT_NETDEV(m) { \
+	.stat_string = __stringify(m), \
+	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+	.stat_offset = offsetof(struct net_device_stats, m), \
+	.type = BCMGENET_STAT_NETDEV, \
+}
+
+#define STAT_GENET_MIB(str, m, _type) { \
+	.stat_string = str, \
+	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+	.stat_offset = offsetof(struct bcmgenet_priv, m), \
+	.type = _type, \
+}
+
+#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
+
+#define STAT_GENET_MISC(str, m, offset) { \
+	.stat_string = str, \
+	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+	.stat_offset = offsetof(struct bcmgenet_priv, m), \
+	.type = BCMGENET_STAT_MISC, \
+	.reg_offset = offset, \
+}
+
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define BCMGENET_STAT_OFFSET	0xc
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+	/* general stats */
+	STAT_NETDEV(rx_packets),
+	STAT_NETDEV(tx_packets),
+	STAT_NETDEV(rx_bytes),
+	STAT_NETDEV(tx_bytes),
+	STAT_NETDEV(rx_errors),
+	STAT_NETDEV(tx_errors),
+	STAT_NETDEV(rx_dropped),
+	STAT_NETDEV(tx_dropped),
+	STAT_NETDEV(multicast),
+	/* UniMAC RSV counters */
+	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
+	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
+	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
+	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
+	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
+	STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
+	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
+	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
+	STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
+	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
+	STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
+	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
+	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
+	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
+	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
+	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
+	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
+	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
+	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
+	/* UniMAC TSV counters */
+	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
+	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
+	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
+	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
+	STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
+	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
+	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
+	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
+	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
+	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
+	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
+	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
+	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
+	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
+	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
+	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
+	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
+	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
+	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
+	/* UniMAC RUNT counters */
+	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+	/* Misc UniMAC counters */
+	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+			UMAC_RBUF_OVFL_CNT),
+	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+};
+
+#define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
+
+static void bcmgenet_get_drvinfo(struct net_device *dev,
+				 struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+	strlcpy(info->version, "v2.0", sizeof(info->version));
+}
+
+static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return BCMGENET_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
+				 u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       bcmgenet_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+{
+	int i, j = 0;
+
+	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+		const struct bcmgenet_stats *s;
+		u8 offset = 0;
+		u32 val = 0;
+		char *p;
+
+		s = &bcmgenet_gstrings_stats[i];
+		switch (s->type) {
+		case BCMGENET_STAT_NETDEV:
+		case BCMGENET_STAT_SOFT:
+			continue;
+		case BCMGENET_STAT_MIB_RX:
+		case BCMGENET_STAT_MIB_TX:
+		case BCMGENET_STAT_RUNT:
+			if (s->type != BCMGENET_STAT_MIB_RX)
+				offset = BCMGENET_STAT_OFFSET;
+			val = bcmgenet_umac_readl(priv,
+						  UMAC_MIB_START + j + offset);
+			break;
+		case BCMGENET_STAT_MISC:
+			val = bcmgenet_umac_readl(priv, s->reg_offset);
+			/* clear if overflowed */
+			if (val == ~0)
+				bcmgenet_umac_writel(priv, 0, s->reg_offset);
+			break;
+		}
+
+		j += s->stat_sizeof;
+		p = (char *)priv + s->stat_offset;
+		*(u32 *)p = val;
+	}
+}
+
+static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+				       struct ethtool_stats *stats,
+				       u64 *data)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (netif_running(dev))
+		bcmgenet_update_mib_counters(priv);
+
+	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+		const struct bcmgenet_stats *s;
+		char *p;
+
+		s = &bcmgenet_gstrings_stats[i];
+		if (s->type == BCMGENET_STAT_NETDEV)
+			p = (char *)&dev->stats;
+		else
+			p = (char *)priv;
+		p += s->stat_offset;
+		data[i] = *(u32 *)p;
+	}
+}
+
+static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
+	u32 reg;
+
+	if (enable && !priv->clk_eee_enabled) {
+		clk_prepare_enable(priv->clk_eee);
+		priv->clk_eee_enabled = true;
+	}
+
+	reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
+	if (enable)
+		reg |= EEE_EN;
+	else
+		reg &= ~EEE_EN;
+	bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
+
+	/* Enable EEE and switch to a 27Mhz clock automatically */
+	reg = __raw_readl(priv->base + off);
+	if (enable)
+		reg |= TBUF_EEE_EN | TBUF_PM_EN;
+	else
+		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
+	__raw_writel(reg, priv->base + off);
+
+	/* Do the same for thing for RBUF */
+	reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
+	if (enable)
+		reg |= RBUF_EEE_EN | RBUF_PM_EN;
+	else
+		reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
+	bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
+
+	if (!enable && priv->clk_eee_enabled) {
+		clk_disable_unprepare(priv->clk_eee);
+		priv->clk_eee_enabled = false;
+	}
+
+	priv->eee.eee_enabled = enable;
+	priv->eee.eee_active = enable;
+}
+
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct ethtool_eee *p = &priv->eee;
+
+	if (GENET_IS_V1(priv))
+		return -EOPNOTSUPP;
+
+	e->eee_enabled = p->eee_enabled;
+	e->eee_active = p->eee_active;
+	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
+
+	return phy_ethtool_get_eee(priv->phydev, e);
+}
+
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct ethtool_eee *p = &priv->eee;
+	int ret = 0;
+
+	if (GENET_IS_V1(priv))
+		return -EOPNOTSUPP;
+
+	p->eee_enabled = e->eee_enabled;
+
+	if (!p->eee_enabled) {
+		bcmgenet_eee_enable_set(dev, false);
+	} else {
+		ret = phy_init_eee(priv->phydev, 0);
+		if (ret) {
+			netif_err(priv, hw, dev, "EEE initialization failed\n");
+			return ret;
+		}
+
+		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
+		bcmgenet_eee_enable_set(dev, true);
+	}
+
+	return phy_ethtool_set_eee(priv->phydev, e);
+}
+
+static int bcmgenet_nway_reset(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	return genphy_restart_aneg(priv->phydev);
+}
+
+/* standard ethtool support functions. */
+static struct ethtool_ops bcmgenet_ethtool_ops = {
+	.get_strings		= bcmgenet_get_strings,
+	.get_sset_count		= bcmgenet_get_sset_count,
+	.get_ethtool_stats	= bcmgenet_get_ethtool_stats,
+	.get_settings		= bcmgenet_get_settings,
+	.set_settings		= bcmgenet_set_settings,
+	.get_drvinfo		= bcmgenet_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_msglevel		= bcmgenet_get_msglevel,
+	.set_msglevel		= bcmgenet_set_msglevel,
+	.get_wol		= bcmgenet_get_wol,
+	.set_wol		= bcmgenet_set_wol,
+	.get_eee		= bcmgenet_get_eee,
+	.set_eee		= bcmgenet_set_eee,
+	.nway_reset		= bcmgenet_nway_reset,
+	.get_coalesce		= bcmgenet_get_coalesce,
+	.set_coalesce		= bcmgenet_set_coalesce,
+};
+
+/* Power down the unimac, based on mode. */
+static int bcmgenet_power_down(struct bcmgenet_priv *priv,
+				enum bcmgenet_power_mode mode)
+{
+	int ret = 0;
+	u32 reg;
+
+	switch (mode) {
+	case GENET_POWER_CABLE_SENSE:
+		phy_detach(priv->phydev);
+		break;
+
+	case GENET_POWER_WOL_MAGIC:
+		ret = bcmgenet_wol_power_down_cfg(priv, mode);
+		break;
+
+	case GENET_POWER_PASSIVE:
+		/* Power down LED */
+		if (priv->hw_params->flags & GENET_HAS_EXT) {
+			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+			reg |= (EXT_PWR_DOWN_PHY |
+				EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+
+			bcmgenet_phy_power_set(priv->dev, false);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+			      enum bcmgenet_power_mode mode)
+{
+	u32 reg;
+
+	if (!(priv->hw_params->flags & GENET_HAS_EXT))
+		return;
+
+	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+
+	switch (mode) {
+	case GENET_POWER_PASSIVE:
+		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+				EXT_PWR_DOWN_BIAS);
+		/* fallthrough */
+	case GENET_POWER_CABLE_SENSE:
+		/* enable APD */
+		reg |= EXT_PWR_DN_EN_LD;
+		break;
+	case GENET_POWER_WOL_MAGIC:
+		bcmgenet_wol_power_up_cfg(priv, mode);
+		return;
+	default:
+		break;
+	}
+
+	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+	if (mode == GENET_POWER_PASSIVE) {
+		bcmgenet_phy_power_set(priv->dev, true);
+		bcmgenet_mii_reset(priv->dev);
+	}
+}
+
+/* ioctl handle special commands that are not present in ethtool. */
+static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int val = 0;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		if (!priv->phydev)
+			val = -ENODEV;
+		else
+			val = phy_mii_ioctl(priv->phydev, rq, cmd);
+		break;
+
+	default:
+		val = -EINVAL;
+		break;
+	}
+
+	return val;
+}
+
+static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+					 struct bcmgenet_tx_ring *ring)
+{
+	struct enet_cb *tx_cb_ptr;
+
+	tx_cb_ptr = ring->cbs;
+	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+
+	/* Advancing local write pointer */
+	if (ring->write_ptr == ring->end_ptr)
+		ring->write_ptr = ring->cb_ptr;
+	else
+		ring->write_ptr++;
+
+	return tx_cb_ptr;
+}
+
+/* Simple helper to free a control block's resources */
+static void bcmgenet_free_cb(struct enet_cb *cb)
+{
+	dev_kfree_skb_any(cb->skb);
+	cb->skb = NULL;
+	dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
+{
+	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
+				 INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
+{
+	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
+				 INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
+{
+	bcmgenet_intrl2_1_writel(ring->priv,
+				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
+				 INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
+{
+	bcmgenet_intrl2_1_writel(ring->priv,
+				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
+				 INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
+{
+	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
+				 INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
+{
+	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
+				 INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
+{
+	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
+				 INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
+{
+	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
+				 INTRL2_CPU_MASK_SET);
+}
+
+/* Unlocked version of the reclaim routine */
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+					  struct bcmgenet_tx_ring *ring)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
+	struct enet_cb *tx_cb_ptr;
+	struct netdev_queue *txq;
+	unsigned int pkts_compl = 0;
+	unsigned int c_index;
+	unsigned int txbds_ready;
+	unsigned int txbds_processed = 0;
+
+	/* Compute how many buffers are transmitted since last xmit call */
+	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+	c_index &= DMA_C_INDEX_MASK;
+
+	if (likely(c_index >= ring->c_index))
+		txbds_ready = c_index - ring->c_index;
+	else
+		txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+
+	netif_dbg(priv, tx_done, dev,
+		  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
+
+	/* Reclaim transmitted buffers */
+	while (txbds_processed < txbds_ready) {
+		tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
+		if (tx_cb_ptr->skb) {
+			pkts_compl++;
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+			dma_unmap_single(kdev,
+					 dma_unmap_addr(tx_cb_ptr, dma_addr),
+					 dma_unmap_len(tx_cb_ptr, dma_len),
+					 DMA_TO_DEVICE);
+			bcmgenet_free_cb(tx_cb_ptr);
+		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+			dev->stats.tx_bytes +=
+				dma_unmap_len(tx_cb_ptr, dma_len);
+			dma_unmap_page(kdev,
+				       dma_unmap_addr(tx_cb_ptr, dma_addr),
+				       dma_unmap_len(tx_cb_ptr, dma_len),
+				       DMA_TO_DEVICE);
+			dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+		}
+
+		txbds_processed++;
+		if (likely(ring->clean_ptr < ring->end_ptr))
+			ring->clean_ptr++;
+		else
+			ring->clean_ptr = ring->cb_ptr;
+	}
+
+	ring->free_bds += txbds_processed;
+	ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+
+	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+		txq = netdev_get_tx_queue(dev, ring->queue);
+		if (netif_tx_queue_stopped(txq))
+			netif_tx_wake_queue(txq);
+	}
+
+	return pkts_compl;
+}
+
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
+				struct bcmgenet_tx_ring *ring)
+{
+	unsigned int released;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	released = __bcmgenet_tx_reclaim(dev, ring);
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+	struct bcmgenet_tx_ring *ring =
+		container_of(napi, struct bcmgenet_tx_ring, napi);
+	unsigned int work_done = 0;
+
+	work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+	if (work_done == 0) {
+		napi_complete(napi);
+		ring->int_enable(ring);
+
+		return 0;
+	}
+
+	return budget;
+}
+
+static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (netif_is_multiqueue(dev)) {
+		for (i = 0; i < priv->hw_params->tx_queues; i++)
+			bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
+	}
+
+	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+}
+
+/* Transmits a single SKB (either head of a fragment or a single SKB)
+ * caller must hold priv->lock
+ */
+static int bcmgenet_xmit_single(struct net_device *dev,
+				struct sk_buff *skb,
+				u16 dma_desc_flags,
+				struct bcmgenet_tx_ring *ring)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
+	struct enet_cb *tx_cb_ptr;
+	unsigned int skb_len;
+	dma_addr_t mapping;
+	u32 length_status;
+	int ret;
+
+	tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+	if (unlikely(!tx_cb_ptr))
+		BUG();
+
+	tx_cb_ptr->skb = skb;
+
+	skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+
+	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+	ret = dma_mapping_error(kdev, mapping);
+	if (ret) {
+		priv->mib.tx_dma_failed++;
+		netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
+	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+			DMA_TX_APPEND_CRC;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		length_status |= DMA_TX_DO_CSUM;
+
+	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
+
+	return 0;
+}
+
+/* Transmit a SKB fragment */
+static int bcmgenet_xmit_frag(struct net_device *dev,
+			      skb_frag_t *frag,
+			      u16 dma_desc_flags,
+			      struct bcmgenet_tx_ring *ring)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
+	struct enet_cb *tx_cb_ptr;
+	dma_addr_t mapping;
+	int ret;
+
+	tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+	if (unlikely(!tx_cb_ptr))
+		BUG();
+	tx_cb_ptr->skb = NULL;
+
+	mapping = skb_frag_dma_map(kdev, frag, 0,
+				   skb_frag_size(frag), DMA_TO_DEVICE);
+	ret = dma_mapping_error(kdev, mapping);
+	if (ret) {
+		priv->mib.tx_dma_failed++;
+		netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
+			  __func__);
+		return ret;
+	}
+
+	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+	dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+
+	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
+		    (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+		    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+
+	return 0;
+}
+
+/* Reallocate the SKB to put enough headroom in front of it and insert
+ * the transmit checksum offsets in the descriptors
+ */
+static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
+					    struct sk_buff *skb)
+{
+	struct status_64 *status = NULL;
+	struct sk_buff *new_skb;
+	u16 offset;
+	u8 ip_proto;
+	u16 ip_ver;
+	u32 tx_csum_info;
+
+	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
+		/* If 64 byte status block enabled, must make sure skb has
+		 * enough headroom for us to insert 64B status block.
+		 */
+		new_skb = skb_realloc_headroom(skb, sizeof(*status));
+		dev_kfree_skb(skb);
+		if (!new_skb) {
+			dev->stats.tx_dropped++;
+			return NULL;
+		}
+		skb = new_skb;
+	}
+
+	skb_push(skb, sizeof(*status));
+	status = (struct status_64 *)skb->data;
+
+	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
+		ip_ver = htons(skb->protocol);
+		switch (ip_ver) {
+		case ETH_P_IP:
+			ip_proto = ip_hdr(skb)->protocol;
+			break;
+		case ETH_P_IPV6:
+			ip_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		default:
+			return skb;
+		}
+
+		offset = skb_checksum_start_offset(skb) - sizeof(*status);
+		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
+				(offset + skb->csum_offset);
+
+		/* Set the length valid bit for TCP and UDP and just set
+		 * the special UDP flag for IPv4, else just set to 0.
+		 */
+		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+			tx_csum_info |= STATUS_TX_CSUM_LV;
+			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+				tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
+		} else {
+			tx_csum_info = 0;
+		}
+
+		status->tx_csum_info = tx_csum_info;
+	}
+
+	return skb;
+}
+
+static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct bcmgenet_tx_ring *ring = NULL;
+	struct netdev_queue *txq;
+	unsigned long flags = 0;
+	int nr_frags, index;
+	u16 dma_desc_flags;
+	int ret;
+	int i;
+
+	index = skb_get_queue_mapping(skb);
+	/* Mapping strategy:
+	 * queue_mapping = 0, unclassified, packet xmited through ring16
+	 * queue_mapping = 1, goes to ring 0. (highest priority queue
+	 * queue_mapping = 2, goes to ring 1.
+	 * queue_mapping = 3, goes to ring 2.
+	 * queue_mapping = 4, goes to ring 3.
+	 */
+	if (index == 0)
+		index = DESC_INDEX;
+	else
+		index -= 1;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	ring = &priv->tx_rings[index];
+	txq = netdev_get_tx_queue(dev, ring->queue);
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (ring->free_bds <= nr_frags + 1) {
+		netif_tx_stop_queue(txq);
+		netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
+			   __func__, index, ring->queue);
+		ret = NETDEV_TX_BUSY;
+		goto out;
+	}
+
+	if (skb_padto(skb, ETH_ZLEN)) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
+	/* set the SKB transmit checksum */
+	if (priv->desc_64b_en) {
+		skb = bcmgenet_put_tx_csum(dev, skb);
+		if (!skb) {
+			ret = NETDEV_TX_OK;
+			goto out;
+		}
+	}
+
+	dma_desc_flags = DMA_SOP;
+	if (nr_frags == 0)
+		dma_desc_flags |= DMA_EOP;
+
+	/* Transmit single SKB or head of fragment list */
+	ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
+	if (ret) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
+	/* xmit fragment */
+	for (i = 0; i < nr_frags; i++) {
+		ret = bcmgenet_xmit_frag(dev,
+					 &skb_shinfo(skb)->frags[i],
+					 (i == nr_frags - 1) ? DMA_EOP : 0,
+					 ring);
+		if (ret) {
+			ret = NETDEV_TX_OK;
+			goto out;
+		}
+	}
+
+	skb_tx_timestamp(skb);
+
+	/* Decrement total BD count and advance our write pointer */
+	ring->free_bds -= nr_frags + 1;
+	ring->prod_index += nr_frags + 1;
+	ring->prod_index &= DMA_P_INDEX_MASK;
+
+	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
+		netif_tx_stop_queue(txq);
+
+	if (!skb->xmit_more || netif_xmit_stopped(txq))
+		/* Packets are ready, update producer index */
+		bcmgenet_tdma_ring_writel(priv, ring->index,
+					  ring->prod_index, TDMA_PROD_INDEX);
+out:
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	return ret;
+}
+
+static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+					  struct enet_cb *cb)
+{
+	struct device *kdev = &priv->pdev->dev;
+	struct sk_buff *skb;
+	struct sk_buff *rx_skb;
+	dma_addr_t mapping;
+
+	/* Allocate a new Rx skb */
+	skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
+	if (!skb) {
+		priv->mib.alloc_rx_buff_failed++;
+		netif_err(priv, rx_err, priv->dev,
+			  "%s: Rx skb allocation failed\n", __func__);
+		return NULL;
+	}
+
+	/* DMA-map the new Rx skb */
+	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
+				 DMA_FROM_DEVICE);
+	if (dma_mapping_error(kdev, mapping)) {
+		priv->mib.rx_dma_failed++;
+		dev_kfree_skb_any(skb);
+		netif_err(priv, rx_err, priv->dev,
+			  "%s: Rx skb DMA mapping failed\n", __func__);
+		return NULL;
+	}
+
+	/* Grab the current Rx skb from the ring and DMA-unmap it */
+	rx_skb = cb->skb;
+	if (likely(rx_skb))
+		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+				 priv->rx_buf_len, DMA_FROM_DEVICE);
+
+	/* Put the new Rx skb on the ring */
+	cb->skb = skb;
+	dma_unmap_addr_set(cb, dma_addr, mapping);
+	dmadesc_set_addr(priv, cb->bd_addr, mapping);
+
+	/* Return the current Rx skb to caller */
+	return rx_skb;
+}
+
+/* bcmgenet_desc_rx - descriptor based rx process.
+ * this could be called from bottom half, or from NAPI polling method.
+ */
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
+				     unsigned int budget)
+{
+	struct bcmgenet_priv *priv = ring->priv;
+	struct net_device *dev = priv->dev;
+	struct enet_cb *cb;
+	struct sk_buff *skb;
+	u32 dma_length_status;
+	unsigned long dma_flag;
+	int len;
+	unsigned int rxpktprocessed = 0, rxpkttoprocess;
+	unsigned int p_index;
+	unsigned int discards;
+	unsigned int chksum_ok = 0;
+
+	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
+
+	discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
+		   DMA_P_INDEX_DISCARD_CNT_MASK;
+	if (discards > ring->old_discards) {
+		discards = discards - ring->old_discards;
+		dev->stats.rx_missed_errors += discards;
+		dev->stats.rx_errors += discards;
+		ring->old_discards += discards;
+
+		/* Clear HW register when we reach 75% of maximum 0xFFFF */
+		if (ring->old_discards >= 0xC000) {
+			ring->old_discards = 0;
+			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
+						  RDMA_PROD_INDEX);
+		}
+	}
+
+	p_index &= DMA_P_INDEX_MASK;
+
+	if (likely(p_index >= ring->c_index))
+		rxpkttoprocess = p_index - ring->c_index;
+	else
+		rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
+				 p_index;
+
+	netif_dbg(priv, rx_status, dev,
+		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+
+	while ((rxpktprocessed < rxpkttoprocess) &&
+	       (rxpktprocessed < budget)) {
+		cb = &priv->rx_cbs[ring->read_ptr];
+		skb = bcmgenet_rx_refill(priv, cb);
+
+		if (unlikely(!skb)) {
+			dev->stats.rx_dropped++;
+			goto next;
+		}
+
+		if (!priv->desc_64b_en) {
+			dma_length_status =
+				dmadesc_get_length_status(priv, cb->bd_addr);
+		} else {
+			struct status_64 *status;
+
+			status = (struct status_64 *)skb->data;
+			dma_length_status = status->length_status;
+		}
+
+		/* DMA flags and length are still valid no matter how
+		 * we got the Receive Status Vector (64B RSB or register)
+		 */
+		dma_flag = dma_length_status & 0xffff;
+		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
+
+		netif_dbg(priv, rx_status, dev,
+			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+			  __func__, p_index, ring->c_index,
+			  ring->read_ptr, dma_length_status);
+
+		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+			netif_err(priv, rx_status, dev,
+				  "dropping fragmented packet!\n");
+			dev->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto next;
+		}
+
+		/* report errors */
+		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
+						DMA_RX_OV |
+						DMA_RX_NO |
+						DMA_RX_LG |
+						DMA_RX_RXER))) {
+			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
+				  (unsigned int)dma_flag);
+			if (dma_flag & DMA_RX_CRC_ERROR)
+				dev->stats.rx_crc_errors++;
+			if (dma_flag & DMA_RX_OV)
+				dev->stats.rx_over_errors++;
+			if (dma_flag & DMA_RX_NO)
+				dev->stats.rx_frame_errors++;
+			if (dma_flag & DMA_RX_LG)
+				dev->stats.rx_length_errors++;
+			dev->stats.rx_errors++;
+			dev_kfree_skb_any(skb);
+			goto next;
+		} /* error packet */
+
+		chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
+			     priv->desc_rxchk_en;
+
+		skb_put(skb, len);
+		if (priv->desc_64b_en) {
+			skb_pull(skb, 64);
+			len -= 64;
+		}
+
+		if (likely(chksum_ok))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		/* remove hardware 2bytes added for IP alignment */
+		skb_pull(skb, 2);
+		len -= 2;
+
+		if (priv->crc_fwd_en) {
+			skb_trim(skb, len - ETH_FCS_LEN);
+			len -= ETH_FCS_LEN;
+		}
+
+		/*Finish setting up the received SKB and send it to the kernel*/
+		skb->protocol = eth_type_trans(skb, priv->dev);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
+		if (dma_flag & DMA_RX_MULT)
+			dev->stats.multicast++;
+
+		/* Notify kernel */
+		napi_gro_receive(&ring->napi, skb);
+		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
+
+next:
+		rxpktprocessed++;
+		if (likely(ring->read_ptr < ring->end_ptr))
+			ring->read_ptr++;
+		else
+			ring->read_ptr = ring->cb_ptr;
+
+		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
+		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
+	}
+
+	return rxpktprocessed;
+}
+
+/* Rx NAPI polling method */
+static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct bcmgenet_rx_ring *ring = container_of(napi,
+			struct bcmgenet_rx_ring, napi);
+	unsigned int work_done;
+
+	work_done = bcmgenet_desc_rx(ring, budget);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		ring->int_enable(ring);
+	}
+
+	return work_done;
+}
+
+/* Assign skb to RX DMA descriptor. */
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
+				     struct bcmgenet_rx_ring *ring)
+{
+	struct enet_cb *cb;
+	struct sk_buff *skb;
+	int i;
+
+	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+
+	/* loop here for each buffer needing assign */
+	for (i = 0; i < ring->size; i++) {
+		cb = ring->cbs + i;
+		skb = bcmgenet_rx_refill(priv, cb);
+		if (skb)
+			dev_kfree_skb_any(skb);
+		if (!cb->skb)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+{
+	struct device *kdev = &priv->pdev->dev;
+	struct enet_cb *cb;
+	int i;
+
+	for (i = 0; i < priv->num_rx_bds; i++) {
+		cb = &priv->rx_cbs[i];
+
+		if (dma_unmap_addr(cb, dma_addr)) {
+			dma_unmap_single(kdev,
+					 dma_unmap_addr(cb, dma_addr),
+					 priv->rx_buf_len, DMA_FROM_DEVICE);
+			dma_unmap_addr_set(cb, dma_addr, 0);
+		}
+
+		if (cb->skb)
+			bcmgenet_free_cb(cb);
+	}
+}
+
+static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
+{
+	u32 reg;
+
+	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+	if (enable)
+		reg |= mask;
+	else
+		reg &= ~mask;
+	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+	/* UniMAC stops on a packet boundary, wait for a full-size packet
+	 * to be processed
+	 */
+	if (enable == 0)
+		usleep_range(1000, 2000);
+}
+
+static int reset_umac(struct bcmgenet_priv *priv)
+{
+	struct device *kdev = &priv->pdev->dev;
+	unsigned int timeout = 0;
+	u32 reg;
+
+	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
+	bcmgenet_rbuf_ctrl_set(priv, 0);
+	udelay(10);
+
+	/* disable MAC while updating its registers */
+	bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+
+	/* issue soft reset, wait for it to complete */
+	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+	while (timeout++ < 1000) {
+		reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+		if (!(reg & CMD_SW_RESET))
+			return 0;
+
+		udelay(1);
+	}
+
+	if (timeout == 1000) {
+		dev_err(kdev,
+			"timeout waiting for MAC to come out of reset\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+{
+	/* Mask all interrupts.*/
+	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+	bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+}
+
+static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
+{
+	u32 int0_enable = 0;
+
+	/* Monitor cable plug/unplugged event for internal PHY, external PHY
+	 * and MoCA PHY
+	 */
+	if (priv->internal_phy) {
+		int0_enable |= UMAC_IRQ_LINK_EVENT;
+	} else if (priv->ext_phy) {
+		int0_enable |= UMAC_IRQ_LINK_EVENT;
+	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+			int0_enable |= UMAC_IRQ_LINK_EVENT;
+	}
+	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+}
+
+static int init_umac(struct bcmgenet_priv *priv)
+{
+	struct device *kdev = &priv->pdev->dev;
+	int ret;
+	u32 reg;
+	u32 int0_enable = 0;
+	u32 int1_enable = 0;
+	int i;
+
+	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+	ret = reset_umac(priv);
+	if (ret)
+		return ret;
+
+	bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+	/* clear tx/rx counter */
+	bcmgenet_umac_writel(priv,
+			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
+			     UMAC_MIB_CTRL);
+	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
+
+	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+	/* init rx registers, enable ip header optimization */
+	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+	reg |= RBUF_ALIGN_2B;
+	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+
+	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
+		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
+
+	bcmgenet_intr_disable(priv);
+
+	/* Enable Rx default queue 16 interrupts */
+	int0_enable |= UMAC_IRQ_RXDMA_DONE;
+
+	/* Enable Tx default queue 16 interrupts */
+	int0_enable |= UMAC_IRQ_TXDMA_DONE;
+
+	/* Configure backpressure vectors for MoCA */
+	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+		reg = bcmgenet_bp_mc_get(priv);
+		reg |= BIT(priv->hw_params->bp_in_en_shift);
+
+		/* bp_mask: back pressure mask */
+		if (netif_is_multiqueue(priv->dev))
+			reg |= priv->hw_params->bp_in_mask;
+		else
+			reg &= ~priv->hw_params->bp_in_mask;
+		bcmgenet_bp_mc_set(priv, reg);
+	}
+
+	/* Enable MDIO interrupts on GENET v3+ */
+	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
+		int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+
+	/* Enable Rx priority queue interrupts */
+	for (i = 0; i < priv->hw_params->rx_queues; ++i)
+		int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
+
+	/* Enable Tx priority queue interrupts */
+	for (i = 0; i < priv->hw_params->tx_queues; ++i)
+		int1_enable |= (1 << i);
+
+	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+	/* Enable rx/tx engine.*/
+	dev_dbg(kdev, "done init umac\n");
+
+	return 0;
+}
+
+/* Initialize a Tx ring along with corresponding hardware registers */
+static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+				  unsigned int index, unsigned int size,
+				  unsigned int start_ptr, unsigned int end_ptr)
+{
+	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+	u32 words_per_bd = WORDS_PER_BD(priv);
+	u32 flow_period_val = 0;
+
+	spin_lock_init(&ring->lock);
+	ring->priv = priv;
+	ring->index = index;
+	if (index == DESC_INDEX) {
+		ring->queue = 0;
+		ring->int_enable = bcmgenet_tx_ring16_int_enable;
+		ring->int_disable = bcmgenet_tx_ring16_int_disable;
+	} else {
+		ring->queue = index + 1;
+		ring->int_enable = bcmgenet_tx_ring_int_enable;
+		ring->int_disable = bcmgenet_tx_ring_int_disable;
+	}
+	ring->cbs = priv->tx_cbs + start_ptr;
+	ring->size = size;
+	ring->clean_ptr = start_ptr;
+	ring->c_index = 0;
+	ring->free_bds = size;
+	ring->write_ptr = start_ptr;
+	ring->cb_ptr = start_ptr;
+	ring->end_ptr = end_ptr - 1;
+	ring->prod_index = 0;
+
+	/* Set flow period for ring != 16 */
+	if (index != DESC_INDEX)
+		flow_period_val = ENET_MAX_MTU_SIZE << 16;
+
+	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
+	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
+	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+	/* Disable rate control for now */
+	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
+				  TDMA_FLOW_PERIOD);
+	bcmgenet_tdma_ring_writel(priv, index,
+				  ((size << DMA_RING_SIZE_SHIFT) |
+				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
+
+	/* Set start and end address, read and write pointers */
+	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
+				  DMA_START_ADDR);
+	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
+				  TDMA_READ_PTR);
+	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
+				  TDMA_WRITE_PTR);
+	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+				  DMA_END_ADDR);
+}
+
+/* Initialize a RDMA ring */
+static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
+				 unsigned int index, unsigned int size,
+				 unsigned int start_ptr, unsigned int end_ptr)
+{
+	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
+	u32 words_per_bd = WORDS_PER_BD(priv);
+	int ret;
+
+	ring->priv = priv;
+	ring->index = index;
+	if (index == DESC_INDEX) {
+		ring->int_enable = bcmgenet_rx_ring16_int_enable;
+		ring->int_disable = bcmgenet_rx_ring16_int_disable;
+	} else {
+		ring->int_enable = bcmgenet_rx_ring_int_enable;
+		ring->int_disable = bcmgenet_rx_ring_int_disable;
+	}
+	ring->cbs = priv->rx_cbs + start_ptr;
+	ring->size = size;
+	ring->c_index = 0;
+	ring->read_ptr = start_ptr;
+	ring->cb_ptr = start_ptr;
+	ring->end_ptr = end_ptr - 1;
+
+	ret = bcmgenet_alloc_rx_buffers(priv, ring);
+	if (ret)
+		return ret;
+
+	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
+	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+	bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+	bcmgenet_rdma_ring_writel(priv, index,
+				  ((size << DMA_RING_SIZE_SHIFT) |
+				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
+	bcmgenet_rdma_ring_writel(priv, index,
+				  (DMA_FC_THRESH_LO <<
+				   DMA_XOFF_THRESHOLD_SHIFT) |
+				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+
+	/* Set start and end address, read and write pointers */
+	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+				  DMA_START_ADDR);
+	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+				  RDMA_READ_PTR);
+	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
+				  RDMA_WRITE_PTR);
+	bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+				  DMA_END_ADDR);
+
+	return ret;
+}
+
+static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_tx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+		ring = &priv->tx_rings[i];
+		netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+	}
+
+	ring = &priv->tx_rings[DESC_INDEX];
+	netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+}
+
+static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_tx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+		ring = &priv->tx_rings[i];
+		napi_enable(&ring->napi);
+	}
+
+	ring = &priv->tx_rings[DESC_INDEX];
+	napi_enable(&ring->napi);
+}
+
+static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_tx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+		ring = &priv->tx_rings[i];
+		napi_disable(&ring->napi);
+	}
+
+	ring = &priv->tx_rings[DESC_INDEX];
+	napi_disable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_tx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+		ring = &priv->tx_rings[i];
+		netif_napi_del(&ring->napi);
+	}
+
+	ring = &priv->tx_rings[DESC_INDEX];
+	netif_napi_del(&ring->napi);
+}
+
+/* Initialize Tx queues
+ *
+ * Queues 0-3 are priority-based, each one has 32 descriptors,
+ * with queue 0 being the highest priority queue.
+ *
+ * Queue 16 is the default Tx queue with
+ * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ *
+ * The transmit control block pool is then partitioned as follows:
+ * - Tx queue 0 uses tx_cbs[0..31]
+ * - Tx queue 1 uses tx_cbs[32..63]
+ * - Tx queue 2 uses tx_cbs[64..95]
+ * - Tx queue 3 uses tx_cbs[96..127]
+ * - Tx queue 16 uses tx_cbs[128..255]
+ */
+static void bcmgenet_init_tx_queues(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 i, dma_enable;
+	u32 dma_ctrl, ring_cfg;
+	u32 dma_priority[3] = {0, 0, 0};
+
+	dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
+	dma_enable = dma_ctrl & DMA_EN;
+	dma_ctrl &= ~DMA_EN;
+	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+	dma_ctrl = 0;
+	ring_cfg = 0;
+
+	/* Enable strict priority arbiter mode */
+	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+
+	/* Initialize Tx priority queues */
+	for (i = 0; i < priv->hw_params->tx_queues; i++) {
+		bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
+				      i * priv->hw_params->tx_bds_per_q,
+				      (i + 1) * priv->hw_params->tx_bds_per_q);
+		ring_cfg |= (1 << i);
+		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+		dma_priority[DMA_PRIO_REG_INDEX(i)] |=
+			((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+	}
+
+	/* Initialize Tx default queue 16 */
+	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
+			      priv->hw_params->tx_queues *
+			      priv->hw_params->tx_bds_per_q,
+			      TOTAL_DESC);
+	ring_cfg |= (1 << DESC_INDEX);
+	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
+	dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
+		((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
+		 DMA_PRIO_REG_SHIFT(DESC_INDEX));
+
+	/* Set Tx queue priorities */
+	bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
+	bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
+	bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
+
+	/* Initialize Tx NAPI */
+	bcmgenet_init_tx_napi(priv);
+
+	/* Enable Tx queues */
+	bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+
+	/* Enable Tx DMA */
+	if (dma_enable)
+		dma_ctrl |= DMA_EN;
+	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+}
+
+static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_rx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+		ring = &priv->rx_rings[i];
+		netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
+	}
+
+	ring = &priv->rx_rings[DESC_INDEX];
+	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
+}
+
+static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_rx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+		ring = &priv->rx_rings[i];
+		napi_enable(&ring->napi);
+	}
+
+	ring = &priv->rx_rings[DESC_INDEX];
+	napi_enable(&ring->napi);
+}
+
+static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_rx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+		ring = &priv->rx_rings[i];
+		napi_disable(&ring->napi);
+	}
+
+	ring = &priv->rx_rings[DESC_INDEX];
+	napi_disable(&ring->napi);
+}
+
+static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
+{
+	unsigned int i;
+	struct bcmgenet_rx_ring *ring;
+
+	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+		ring = &priv->rx_rings[i];
+		netif_napi_del(&ring->napi);
+	}
+
+	ring = &priv->rx_rings[DESC_INDEX];
+	netif_napi_del(&ring->napi);
+}
+
+/* Initialize Rx queues
+ *
+ * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
+ * used to direct traffic to these queues.
+ *
+ * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ */
+static int bcmgenet_init_rx_queues(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 i;
+	u32 dma_enable;
+	u32 dma_ctrl;
+	u32 ring_cfg;
+	int ret;
+
+	dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
+	dma_enable = dma_ctrl & DMA_EN;
+	dma_ctrl &= ~DMA_EN;
+	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+	dma_ctrl = 0;
+	ring_cfg = 0;
+
+	/* Initialize Rx priority queues */
+	for (i = 0; i < priv->hw_params->rx_queues; i++) {
+		ret = bcmgenet_init_rx_ring(priv, i,
+					    priv->hw_params->rx_bds_per_q,
+					    i * priv->hw_params->rx_bds_per_q,
+					    (i + 1) *
+					    priv->hw_params->rx_bds_per_q);
+		if (ret)
+			return ret;
+
+		ring_cfg |= (1 << i);
+		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+	}
+
+	/* Initialize Rx default queue 16 */
+	ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
+				    priv->hw_params->rx_queues *
+				    priv->hw_params->rx_bds_per_q,
+				    TOTAL_DESC);
+	if (ret)
+		return ret;
+
+	ring_cfg |= (1 << DESC_INDEX);
+	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
+
+	/* Initialize Rx NAPI */
+	bcmgenet_init_rx_napi(priv);
+
+	/* Enable rings */
+	bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
+
+	/* Configure ring as descriptor ring and re-enable DMA if enabled */
+	if (dma_enable)
+		dma_ctrl |= DMA_EN;
+	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+	return 0;
+}
+
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+	int ret = 0;
+	int timeout = 0;
+	u32 reg;
+	u32 dma_ctrl;
+	int i;
+
+	/* Disable TDMA to stop add more frames in TX DMA */
+	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+	reg &= ~DMA_EN;
+	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+	/* Check TDMA status register to confirm TDMA is disabled */
+	while (timeout++ < DMA_TIMEOUT_VAL) {
+		reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+		if (reg & DMA_DISABLED)
+			break;
+
+		udelay(1);
+	}
+
+	if (timeout == DMA_TIMEOUT_VAL) {
+		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+		ret = -ETIMEDOUT;
+	}
+
+	/* Wait 10ms for packet drain in both tx and rx dma */
+	usleep_range(10000, 20000);
+
+	/* Disable RDMA */
+	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+	reg &= ~DMA_EN;
+	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+	timeout = 0;
+	/* Check RDMA status register to confirm RDMA is disabled */
+	while (timeout++ < DMA_TIMEOUT_VAL) {
+		reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+		if (reg & DMA_DISABLED)
+			break;
+
+		udelay(1);
+	}
+
+	if (timeout == DMA_TIMEOUT_VAL) {
+		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+		ret = -ETIMEDOUT;
+	}
+
+	dma_ctrl = 0;
+	for (i = 0; i < priv->hw_params->rx_queues; i++)
+		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+	reg &= ~dma_ctrl;
+	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+	dma_ctrl = 0;
+	for (i = 0; i < priv->hw_params->tx_queues; i++)
+		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+	reg &= ~dma_ctrl;
+	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+	return ret;
+}
+
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+	int i;
+
+	bcmgenet_fini_rx_napi(priv);
+	bcmgenet_fini_tx_napi(priv);
+
+	/* disable DMA */
+	bcmgenet_dma_teardown(priv);
+
+	for (i = 0; i < priv->num_tx_bds; i++) {
+		if (priv->tx_cbs[i].skb != NULL) {
+			dev_kfree_skb(priv->tx_cbs[i].skb);
+			priv->tx_cbs[i].skb = NULL;
+		}
+	}
+
+	bcmgenet_free_rx_buffers(priv);
+	kfree(priv->rx_cbs);
+	kfree(priv->tx_cbs);
+}
+
+/* init_edma: Initialize DMA control register */
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+{
+	int ret;
+	unsigned int i;
+	struct enet_cb *cb;
+
+	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+
+	/* Initialize common Rx ring structures */
+	priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+	priv->num_rx_bds = TOTAL_DESC;
+	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+			       GFP_KERNEL);
+	if (!priv->rx_cbs)
+		return -ENOMEM;
+
+	for (i = 0; i < priv->num_rx_bds; i++) {
+		cb = priv->rx_cbs + i;
+		cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
+	}
+
+	/* Initialize common TX ring structures */
+	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
+	priv->num_tx_bds = TOTAL_DESC;
+	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+			       GFP_KERNEL);
+	if (!priv->tx_cbs) {
+		kfree(priv->rx_cbs);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < priv->num_tx_bds; i++) {
+		cb = priv->tx_cbs + i;
+		cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
+	}
+
+	/* Init rDma */
+	bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+	/* Initialize Rx queues */
+	ret = bcmgenet_init_rx_queues(priv->dev);
+	if (ret) {
+		netdev_err(priv->dev, "failed to initialize Rx queues\n");
+		bcmgenet_free_rx_buffers(priv);
+		kfree(priv->rx_cbs);
+		kfree(priv->tx_cbs);
+		return ret;
+	}
+
+	/* Init tDma */
+	bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+	/* Initialize Tx queues */
+	bcmgenet_init_tx_queues(priv->dev);
+
+	return 0;
+}
+
+/* Interrupt bottom half */
+static void bcmgenet_irq_task(struct work_struct *work)
+{
+	struct bcmgenet_priv *priv = container_of(
+			work, struct bcmgenet_priv, bcmgenet_irq_work);
+
+	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
+
+	if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
+		priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+		netif_dbg(priv, wol, priv->dev,
+			  "magic packet detected, waking up\n");
+		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+	}
+
+	/* Link UP/DOWN event */
+	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+	    (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
+		phy_mac_interrupt(priv->phydev,
+				  !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
+		priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
+	}
+}
+
+/* bcmgenet_isr1: handle Rx and Tx priority queues */
+static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+{
+	struct bcmgenet_priv *priv = dev_id;
+	struct bcmgenet_rx_ring *rx_ring;
+	struct bcmgenet_tx_ring *tx_ring;
+	unsigned int index;
+
+	/* Save irq status for bottom-half processing. */
+	priv->irq1_stat =
+		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+
+	/* clear interrupts */
+	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+	netif_dbg(priv, intr, priv->dev,
+		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
+	/* Check Rx priority queue interrupts */
+	for (index = 0; index < priv->hw_params->rx_queues; index++) {
+		if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+			continue;
+
+		rx_ring = &priv->rx_rings[index];
+
+		if (likely(napi_schedule_prep(&rx_ring->napi))) {
+			rx_ring->int_disable(rx_ring);
+			__napi_schedule(&rx_ring->napi);
+		}
+	}
+
+	/* Check Tx priority queue interrupts */
+	for (index = 0; index < priv->hw_params->tx_queues; index++) {
+		if (!(priv->irq1_stat & BIT(index)))
+			continue;
+
+		tx_ring = &priv->tx_rings[index];
+
+		if (likely(napi_schedule_prep(&tx_ring->napi))) {
+			tx_ring->int_disable(tx_ring);
+			__napi_schedule(&tx_ring->napi);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+{
+	struct bcmgenet_priv *priv = dev_id;
+	struct bcmgenet_rx_ring *rx_ring;
+	struct bcmgenet_tx_ring *tx_ring;
+
+	/* Save irq status for bottom-half processing. */
+	priv->irq0_stat =
+		bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+
+	/* clear interrupts */
+	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+	netif_dbg(priv, intr, priv->dev,
+		  "IRQ=0x%x\n", priv->irq0_stat);
+
+	if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+		rx_ring = &priv->rx_rings[DESC_INDEX];
+
+		if (likely(napi_schedule_prep(&rx_ring->napi))) {
+			rx_ring->int_disable(rx_ring);
+			__napi_schedule(&rx_ring->napi);
+		}
+	}
+
+	if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+		tx_ring = &priv->tx_rings[DESC_INDEX];
+
+		if (likely(napi_schedule_prep(&tx_ring->napi))) {
+			tx_ring->int_disable(tx_ring);
+			__napi_schedule(&tx_ring->napi);
+		}
+	}
+
+	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+				UMAC_IRQ_PHY_DET_F |
+				UMAC_IRQ_LINK_EVENT |
+				UMAC_IRQ_HFB_SM |
+				UMAC_IRQ_HFB_MM |
+				UMAC_IRQ_MPD_R)) {
+		/* all other interested interrupts handled in bottom half */
+		schedule_work(&priv->bcmgenet_irq_work);
+	}
+
+	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+	    priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+		wake_up(&priv->wq);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
+{
+	struct bcmgenet_priv *priv = dev_id;
+
+	pm_wakeup_event(&priv->pdev->dev, 0);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	/* Invoke the main RX/TX interrupt handler */
+	disable_irq(priv->irq0);
+	bcmgenet_isr0(priv->irq0, priv);
+	enable_irq(priv->irq0);
+
+	/* And the interrupt handler for RX/TX priority queues */
+	disable_irq(priv->irq1);
+	bcmgenet_isr1(priv->irq1, priv);
+	enable_irq(priv->irq1);
+}
+#endif
+
+static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
+{
+	u32 reg;
+
+	reg = bcmgenet_rbuf_ctrl_get(priv);
+	reg |= BIT(1);
+	bcmgenet_rbuf_ctrl_set(priv, reg);
+	udelay(10);
+
+	reg &= ~BIT(1);
+	bcmgenet_rbuf_ctrl_set(priv, reg);
+	udelay(10);
+}
+
+static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+				 unsigned char *addr)
+{
+	bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+			(addr[2] << 8) | addr[3], UMAC_MAC0);
+	bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+/* Returns a reusable dma control register value */
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+{
+	u32 reg;
+	u32 dma_ctrl;
+
+	/* disable DMA */
+	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+	reg &= ~dma_ctrl;
+	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+	reg &= ~dma_ctrl;
+	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+	udelay(10);
+	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+	return dma_ctrl;
+}
+
+static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
+{
+	u32 reg;
+
+	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+	reg |= dma_ctrl;
+	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+	reg |= dma_ctrl;
+	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
+					   u32 f_index)
+{
+	u32 offset;
+	u32 reg;
+
+	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+	reg = bcmgenet_hfb_reg_readl(priv, offset);
+	return !!(reg & (1 << (f_index % 32)));
+}
+
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+	u32 offset;
+	u32 reg;
+
+	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+	reg = bcmgenet_hfb_reg_readl(priv, offset);
+	reg |= (1 << (f_index % 32));
+	bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
+						     u32 f_index, u32 rx_queue)
+{
+	u32 offset;
+	u32 reg;
+
+	offset = f_index / 8;
+	reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
+	reg &= ~(0xF << (4 * (f_index % 8)));
+	reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
+	bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
+}
+
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
+					   u32 f_index, u32 f_length)
+{
+	u32 offset;
+	u32 reg;
+
+	offset = HFB_FLT_LEN_V3PLUS +
+		 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
+		 sizeof(u32);
+	reg = bcmgenet_hfb_reg_readl(priv, offset);
+	reg &= ~(0xFF << (8 * (f_index % 4)));
+	reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
+	bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
+{
+	u32 f_index;
+
+	for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
+		if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
+			return f_index;
+
+	return -ENOMEM;
+}
+
+/* bcmgenet_hfb_add_filter
+ *
+ * Add new filter to Hardware Filter Block to match and direct Rx traffic to
+ * desired Rx queue.
+ *
+ * f_data is an array of unsigned 32-bit integers where each 32-bit integer
+ * provides filter data for 2 bytes (4 nibbles) of Rx frame:
+ *
+ * bits 31:20 - unused
+ * bit  19    - nibble 0 match enable
+ * bit  18    - nibble 1 match enable
+ * bit  17    - nibble 2 match enable
+ * bit  16    - nibble 3 match enable
+ * bits 15:12 - nibble 0 data
+ * bits 11:8  - nibble 1 data
+ * bits 7:4   - nibble 2 data
+ * bits 3:0   - nibble 3 data
+ *
+ * Example:
+ * In order to match:
+ * - Ethernet frame type = 0x0800 (IP)
+ * - IP version field = 4
+ * - IP protocol field = 0x11 (UDP)
+ *
+ * The following filter is needed:
+ * u32 hfb_filter_ipv4_udp[] = {
+ *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
+ *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
+ * };
+ *
+ * To add the filter to HFB and direct the traffic to Rx queue 0, call:
+ * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
+ *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ */
+int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
+			    u32 f_length, u32 rx_queue)
+{
+	int f_index;
+	u32 i;
+
+	f_index = bcmgenet_hfb_find_unused_filter(priv);
+	if (f_index < 0)
+		return -ENOMEM;
+
+	if (f_length > priv->hw_params->hfb_filter_size)
+		return -EINVAL;
+
+	for (i = 0; i < f_length; i++)
+		bcmgenet_hfb_writel(priv, f_data[i],
+			(f_index * priv->hw_params->hfb_filter_size + i) *
+			sizeof(u32));
+
+	bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
+	bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
+	bcmgenet_hfb_enable_filter(priv, f_index);
+	bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
+
+	return 0;
+}
+
+/* bcmgenet_hfb_clear
+ *
+ * Clear Hardware Filter Block and disable all filtering.
+ */
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
+{
+	u32 i;
+
+	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
+	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
+	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
+
+	for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+		bcmgenet_rdma_writel(priv, 0x0, i);
+
+	for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
+		bcmgenet_hfb_reg_writel(priv, 0x0,
+					HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+
+	for (i = 0; i < priv->hw_params->hfb_filter_cnt *
+			priv->hw_params->hfb_filter_size; i++)
+		bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+}
+
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
+{
+	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+		return;
+
+	bcmgenet_hfb_clear(priv);
+}
+
+static void bcmgenet_netif_start(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	/* Start the network engine */
+	bcmgenet_enable_rx_napi(priv);
+	bcmgenet_enable_tx_napi(priv);
+
+	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
+
+	netif_tx_start_all_queues(dev);
+
+	/* Monitor link interrupts now */
+	bcmgenet_link_intr_enable(priv);
+
+	phy_start(priv->phydev);
+}
+
+static int bcmgenet_open(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	unsigned long dma_ctrl;
+	u32 reg;
+	int ret;
+
+	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
+
+	/* Turn on the clock */
+	clk_prepare_enable(priv->clk);
+
+	/* If this is an internal GPHY, power it back on now, before UniMAC is
+	 * brought out of reset as absolutely no UniMAC activity is allowed
+	 */
+	if (priv->internal_phy)
+		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+	/* take MAC out of reset */
+	bcmgenet_umac_reset(priv);
+
+	ret = init_umac(priv);
+	if (ret)
+		goto err_clk_disable;
+
+	/* disable ethernet MAC while updating its registers */
+	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+
+	/* Make sure we reflect the value of CRC_CMD_FWD */
+	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+	bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+	if (priv->internal_phy) {
+		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+		reg |= EXT_ENERGY_DET_MASK;
+		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+	}
+
+	/* Disable RX/TX DMA and flush TX queues */
+	dma_ctrl = bcmgenet_dma_disable(priv);
+
+	/* Reinitialize TDMA and RDMA and SW housekeeping */
+	ret = bcmgenet_init_dma(priv);
+	if (ret) {
+		netdev_err(dev, "failed to initialize DMA\n");
+		goto err_clk_disable;
+	}
+
+	/* Always enable ring 16 - descriptor ring */
+	bcmgenet_enable_dma(priv, dma_ctrl);
+
+	/* HFB init */
+	bcmgenet_hfb_init(priv);
+
+	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
+			  dev->name, priv);
+	if (ret < 0) {
+		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
+		goto err_fini_dma;
+	}
+
+	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
+			  dev->name, priv);
+	if (ret < 0) {
+		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
+		goto err_irq0;
+	}
+
+	ret = bcmgenet_mii_probe(dev);
+	if (ret) {
+		netdev_err(dev, "failed to connect to PHY\n");
+		goto err_irq1;
+	}
+
+	bcmgenet_netif_start(dev);
+
+	return 0;
+
+err_irq1:
+	free_irq(priv->irq1, priv);
+err_irq0:
+	free_irq(priv->irq0, priv);
+err_fini_dma:
+	bcmgenet_fini_dma(priv);
+err_clk_disable:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static void bcmgenet_netif_stop(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	netif_tx_stop_all_queues(dev);
+	phy_stop(priv->phydev);
+	bcmgenet_intr_disable(priv);
+	bcmgenet_disable_rx_napi(priv);
+	bcmgenet_disable_tx_napi(priv);
+
+	/* Wait for pending work items to complete. Since interrupts are
+	 * disabled no new work will be scheduled.
+	 */
+	cancel_work_sync(&priv->bcmgenet_irq_work);
+
+	priv->old_link = -1;
+	priv->old_speed = -1;
+	priv->old_duplex = -1;
+	priv->old_pause = -1;
+}
+
+static int bcmgenet_close(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int ret;
+
+	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+	bcmgenet_netif_stop(dev);
+
+	/* Really kill the PHY state machine and disconnect from it */
+	phy_disconnect(priv->phydev);
+
+	/* Disable MAC receive */
+	umac_enable_set(priv, CMD_RX_EN, false);
+
+	ret = bcmgenet_dma_teardown(priv);
+	if (ret)
+		return ret;
+
+	/* Disable MAC transmit. TX DMA disabled have to done before this */
+	umac_enable_set(priv, CMD_TX_EN, false);
+
+	/* tx reclaim */
+	bcmgenet_tx_reclaim_all(dev);
+	bcmgenet_fini_dma(priv);
+
+	free_irq(priv->irq0, priv);
+	free_irq(priv->irq1, priv);
+
+	if (priv->internal_phy)
+		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+	struct bcmgenet_priv *priv = ring->priv;
+	u32 p_index, c_index, intsts, intmsk;
+	struct netdev_queue *txq;
+	unsigned int free_bds;
+	unsigned long flags;
+	bool txq_stopped;
+
+	if (!netif_msg_tx_err(priv))
+		return;
+
+	txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (ring->index == DESC_INDEX) {
+		intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+		intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+	} else {
+		intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+		intmsk = 1 << ring->index;
+	}
+	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+	txq_stopped = netif_tx_queue_stopped(txq);
+	free_bds = ring->free_bds;
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+		  "TX queue status: %s, interrupts: %s\n"
+		  "(sw)free_bds: %d (sw)size: %d\n"
+		  "(sw)p_index: %d (hw)p_index: %d\n"
+		  "(sw)c_index: %d (hw)c_index: %d\n"
+		  "(sw)clean_p: %d (sw)write_p: %d\n"
+		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+		  ring->index, ring->queue,
+		  txq_stopped ? "stopped" : "active",
+		  intsts & intmsk ? "enabled" : "disabled",
+		  free_bds, ring->size,
+		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
+		  ring->c_index, c_index & DMA_C_INDEX_MASK,
+		  ring->clean_ptr, ring->write_ptr,
+		  ring->cb_ptr, ring->end_ptr);
+}
+
+static void bcmgenet_timeout(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 int0_enable = 0;
+	u32 int1_enable = 0;
+	unsigned int q;
+
+	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+
+	for (q = 0; q < priv->hw_params->tx_queues; q++)
+		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+	bcmgenet_tx_reclaim_all(dev);
+
+	for (q = 0; q < priv->hw_params->tx_queues; q++)
+		int1_enable |= (1 << q);
+
+	int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+	/* Re-enable TX interrupts if disabled */
+	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+	dev->trans_start = jiffies;
+
+	dev->stats.tx_errors++;
+
+	netif_tx_wake_all_queues(dev);
+}
+
+#define MAX_MC_COUNT	16
+
+static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+					 unsigned char *addr,
+					 int *i,
+					 int *mc)
+{
+	u32 reg;
+
+	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+			     UMAC_MDF_ADDR + (*i * 4));
+	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+			     addr[4] << 8 | addr[5],
+			     UMAC_MDF_ADDR + ((*i + 1) * 4));
+	reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+	reg |= (1 << (MAX_MC_COUNT - *mc));
+	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+	*i += 2;
+	(*mc)++;
+}
+
+static void bcmgenet_set_rx_mode(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct netdev_hw_addr *ha;
+	int i, mc;
+	u32 reg;
+
+	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+	/* Promiscuous mode */
+	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+	if (dev->flags & IFF_PROMISC) {
+		reg |= CMD_PROMISC;
+		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+		return;
+	} else {
+		reg &= ~CMD_PROMISC;
+		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+	}
+
+	/* UniMac doesn't support ALLMULTI */
+	if (dev->flags & IFF_ALLMULTI) {
+		netdev_warn(dev, "ALLMULTI is not supported\n");
+		return;
+	}
+
+	/* update MDF filter */
+	i = 0;
+	mc = 0;
+	/* Broadcast */
+	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+	/* my own address.*/
+	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+	/* Unicast list*/
+	if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+		return;
+
+	if (!netdev_uc_empty(dev))
+		netdev_for_each_uc_addr(ha, dev)
+			bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+	/* Multicast */
+	if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+		return;
+
+	netdev_for_each_mc_addr(ha, dev)
+		bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+}
+
+/* Set the hardware MAC address. */
+static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	/* Setting the MAC address at the hardware level is not possible
+	 * without disabling the UniMAC RX/TX enable bits.
+	 */
+	if (netif_running(dev))
+		return -EBUSY;
+
+	ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+	return 0;
+}
+
+static const struct net_device_ops bcmgenet_netdev_ops = {
+	.ndo_open		= bcmgenet_open,
+	.ndo_stop		= bcmgenet_close,
+	.ndo_start_xmit		= bcmgenet_xmit,
+	.ndo_tx_timeout		= bcmgenet_timeout,
+	.ndo_set_rx_mode	= bcmgenet_set_rx_mode,
+	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
+	.ndo_do_ioctl		= bcmgenet_ioctl,
+	.ndo_set_features	= bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bcmgenet_poll_controller,
+#endif
+};
+
+/* Array of GENET hardware parameters/characteristics */
+static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
+	[GENET_V1] = {
+		.tx_queues = 0,
+		.tx_bds_per_q = 0,
+		.rx_queues = 0,
+		.rx_bds_per_q = 0,
+		.bp_in_en_shift = 16,
+		.bp_in_mask = 0xffff,
+		.hfb_filter_cnt = 16,
+		.qtag_mask = 0x1F,
+		.hfb_offset = 0x1000,
+		.rdma_offset = 0x2000,
+		.tdma_offset = 0x3000,
+		.words_per_bd = 2,
+	},
+	[GENET_V2] = {
+		.tx_queues = 4,
+		.tx_bds_per_q = 32,
+		.rx_queues = 0,
+		.rx_bds_per_q = 0,
+		.bp_in_en_shift = 16,
+		.bp_in_mask = 0xffff,
+		.hfb_filter_cnt = 16,
+		.qtag_mask = 0x1F,
+		.tbuf_offset = 0x0600,
+		.hfb_offset = 0x1000,
+		.hfb_reg_offset = 0x2000,
+		.rdma_offset = 0x3000,
+		.tdma_offset = 0x4000,
+		.words_per_bd = 2,
+		.flags = GENET_HAS_EXT,
+	},
+	[GENET_V3] = {
+		.tx_queues = 4,
+		.tx_bds_per_q = 32,
+		.rx_queues = 0,
+		.rx_bds_per_q = 0,
+		.bp_in_en_shift = 17,
+		.bp_in_mask = 0x1ffff,
+		.hfb_filter_cnt = 48,
+		.hfb_filter_size = 128,
+		.qtag_mask = 0x3F,
+		.tbuf_offset = 0x0600,
+		.hfb_offset = 0x8000,
+		.hfb_reg_offset = 0xfc00,
+		.rdma_offset = 0x10000,
+		.tdma_offset = 0x11000,
+		.words_per_bd = 2,
+		.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+			 GENET_HAS_MOCA_LINK_DET,
+	},
+	[GENET_V4] = {
+		.tx_queues = 4,
+		.tx_bds_per_q = 32,
+		.rx_queues = 0,
+		.rx_bds_per_q = 0,
+		.bp_in_en_shift = 17,
+		.bp_in_mask = 0x1ffff,
+		.hfb_filter_cnt = 48,
+		.hfb_filter_size = 128,
+		.qtag_mask = 0x3F,
+		.tbuf_offset = 0x0600,
+		.hfb_offset = 0x8000,
+		.hfb_reg_offset = 0xfc00,
+		.rdma_offset = 0x2000,
+		.tdma_offset = 0x4000,
+		.words_per_bd = 3,
+		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+	},
+};
+
+/* Infer hardware parameters from the detected GENET version */
+static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+{
+	struct bcmgenet_hw_params *params;
+	u32 reg;
+	u8 major;
+	u16 gphy_rev;
+
+	if (GENET_IS_V4(priv)) {
+		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+		genet_dma_ring_regs = genet_dma_ring_regs_v4;
+		priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+		priv->version = GENET_V4;
+	} else if (GENET_IS_V3(priv)) {
+		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+		genet_dma_ring_regs = genet_dma_ring_regs_v123;
+		priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+		priv->version = GENET_V3;
+	} else if (GENET_IS_V2(priv)) {
+		bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
+		genet_dma_ring_regs = genet_dma_ring_regs_v123;
+		priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+		priv->version = GENET_V2;
+	} else if (GENET_IS_V1(priv)) {
+		bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
+		genet_dma_ring_regs = genet_dma_ring_regs_v123;
+		priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+		priv->version = GENET_V1;
+	}
+
+	/* enum genet_version starts at 1 */
+	priv->hw_params = &bcmgenet_hw_params[priv->version];
+	params = priv->hw_params;
+
+	/* Read GENET HW version */
+	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
+	major = (reg >> 24 & 0x0f);
+	if (major == 5)
+		major = 4;
+	else if (major == 0)
+		major = 1;
+	if (major != priv->version) {
+		dev_err(&priv->pdev->dev,
+			"GENET version mismatch, got: %d, configured for: %d\n",
+			major, priv->version);
+	}
+
+	/* Print the GENET core version */
+	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
+		 major, (reg >> 16) & 0x0f, reg & 0xffff);
+
+	/* Store the integrated PHY revision for the MDIO probing function
+	 * to pass this information to the PHY driver. The PHY driver expects
+	 * to find the PHY major revision in bits 15:8 while the GENET register
+	 * stores that information in bits 7:0, account for that.
+	 *
+	 * On newer chips, starting with PHY revision G0, a new scheme is
+	 * deployed similar to the Starfighter 2 switch with GPHY major
+	 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
+	 * is reserved as well as special value 0x01ff, we have a small
+	 * heuristic to check for the new GPHY revision and re-arrange things
+	 * so the GPHY driver is happy.
+	 */
+	gphy_rev = reg & 0xffff;
+
+	/* This is the good old scheme, just GPHY major, no minor nor patch */
+	if ((gphy_rev & 0xf0) != 0)
+		priv->gphy_rev = gphy_rev << 8;
+
+	/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
+	else if ((gphy_rev & 0xff00) != 0)
+		priv->gphy_rev = gphy_rev;
+
+	/* This is reserved so should require special treatment */
+	else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+		return;
+	}
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+	if (!(params->flags & GENET_HAS_40BITS))
+		pr_warn("GENET does not support 40-bits PA\n");
+#endif
+
+	pr_debug("Configuration for version: %d\n"
+		"TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
+		"BP << en: %2d, BP msk: 0x%05x\n"
+		"HFB count: %2d, QTAQ msk: 0x%05x\n"
+		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
+		"RDMA: 0x%05x, TDMA: 0x%05x\n"
+		"Words/BD: %d\n",
+		priv->version,
+		params->tx_queues, params->tx_bds_per_q,
+		params->rx_queues, params->rx_bds_per_q,
+		params->bp_in_en_shift, params->bp_in_mask,
+		params->hfb_filter_cnt, params->qtag_mask,
+		params->tbuf_offset, params->hfb_offset,
+		params->hfb_reg_offset,
+		params->rdma_offset, params->tdma_offset,
+		params->words_per_bd);
+}
+
+static const struct of_device_id bcmgenet_match[] = {
+	{ .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
+	{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
+	{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
+	{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, bcmgenet_match);
+
+static int bcmgenet_probe(struct platform_device *pdev)
+{
+	struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
+	struct device_node *dn = pdev->dev.of_node;
+	const struct of_device_id *of_id = NULL;
+	struct bcmgenet_priv *priv;
+	struct net_device *dev;
+	const void *macaddr;
+	struct resource *r;
+	int err = -EIO;
+
+	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
+	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
+				 GENET_MAX_MQ_CNT + 1);
+	if (!dev) {
+		dev_err(&pdev->dev, "can't allocate net device\n");
+		return -ENOMEM;
+	}
+
+	if (dn) {
+		of_id = of_match_node(bcmgenet_match, dn);
+		if (!of_id)
+			return -EINVAL;
+	}
+
+	priv = netdev_priv(dev);
+	priv->irq0 = platform_get_irq(pdev, 0);
+	priv->irq1 = platform_get_irq(pdev, 1);
+	priv->wol_irq = platform_get_irq(pdev, 2);
+	if (!priv->irq0 || !priv->irq1) {
+		dev_err(&pdev->dev, "can't find IRQs\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	if (dn) {
+		macaddr = of_get_mac_address(dn);
+		if (!macaddr) {
+			dev_err(&pdev->dev, "can't find MAC address\n");
+			err = -EINVAL;
+			goto err;
+		}
+	} else {
+		macaddr = pd->mac_address;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(priv->base)) {
+		err = PTR_ERR(priv->base);
+		goto err;
+	}
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	dev_set_drvdata(&pdev->dev, dev);
+	ether_addr_copy(dev->dev_addr, macaddr);
+	dev->watchdog_timeo = 2 * HZ;
+	dev->ethtool_ops = &bcmgenet_ethtool_ops;
+	dev->netdev_ops = &bcmgenet_netdev_ops;
+
+	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
+
+	/* Set hardware features */
+	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+		NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+	/* Request the WOL interrupt and advertise suspend if available */
+	priv->wol_irq_disabled = true;
+	err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
+			       dev->name, priv);
+	if (!err)
+		device_set_wakeup_capable(&pdev->dev, 1);
+
+	/* Set the needed headroom to account for any possible
+	 * features enabling/disabling at runtime
+	 */
+	dev->needed_headroom += 64;
+
+	netdev_boot_setup_check(dev);
+
+	priv->dev = dev;
+	priv->pdev = pdev;
+	if (of_id)
+		priv->version = (enum bcmgenet_version)of_id->data;
+	else
+		priv->version = pd->genet_version;
+
+	priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+	if (IS_ERR(priv->clk)) {
+		dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+		priv->clk = NULL;
+	}
+
+	clk_prepare_enable(priv->clk);
+
+	bcmgenet_set_hw_params(priv);
+
+	/* Mii wait queue */
+	init_waitqueue_head(&priv->wq);
+	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
+	priv->rx_buf_len = RX_BUF_LENGTH;
+	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
+
+	priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+	if (IS_ERR(priv->clk_wol)) {
+		dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+		priv->clk_wol = NULL;
+	}
+
+	priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+	if (IS_ERR(priv->clk_eee)) {
+		dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
+		priv->clk_eee = NULL;
+	}
+
+	err = reset_umac(priv);
+	if (err)
+		goto err_clk_disable;
+
+	err = bcmgenet_mii_init(dev);
+	if (err)
+		goto err_clk_disable;
+
+	/* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
+	 * just the ring 16 descriptor based TX
+	 */
+	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
+	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+
+	/* libphy will determine the link state */
+	netif_carrier_off(dev);
+
+	/* Turn off the main clock, WOL clock is handled separately */
+	clk_disable_unprepare(priv->clk);
+
+	err = register_netdev(dev);
+	if (err)
+		goto err;
+
+	return err;
+
+err_clk_disable:
+	clk_disable_unprepare(priv->clk);
+err:
+	free_netdev(dev);
+	return err;
+}
+
+static int bcmgenet_remove(struct platform_device *pdev)
+{
+	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	unregister_netdev(priv->dev);
+	bcmgenet_mii_exit(priv->dev);
+	free_netdev(priv->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcmgenet_suspend(struct device *d)
+{
+	struct net_device *dev = dev_get_drvdata(d);
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int ret;
+
+	if (!netif_running(dev))
+		return 0;
+
+	bcmgenet_netif_stop(dev);
+
+	if (!device_may_wakeup(d))
+		phy_suspend(priv->phydev);
+
+	netif_device_detach(dev);
+
+	/* Disable MAC receive */
+	umac_enable_set(priv, CMD_RX_EN, false);
+
+	ret = bcmgenet_dma_teardown(priv);
+	if (ret)
+		return ret;
+
+	/* Disable MAC transmit. TX DMA disabled have to done before this */
+	umac_enable_set(priv, CMD_TX_EN, false);
+
+	/* tx reclaim */
+	bcmgenet_tx_reclaim_all(dev);
+	bcmgenet_fini_dma(priv);
+
+	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
+	if (device_may_wakeup(d) && priv->wolopts) {
+		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+		clk_prepare_enable(priv->clk_wol);
+	} else if (priv->internal_phy) {
+		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+	}
+
+	/* Turn off the clocks */
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static int bcmgenet_resume(struct device *d)
+{
+	struct net_device *dev = dev_get_drvdata(d);
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	unsigned long dma_ctrl;
+	int ret;
+	u32 reg;
+
+	if (!netif_running(dev))
+		return 0;
+
+	/* Turn on the clock */
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	/* If this is an internal GPHY, power it back on now, before UniMAC is
+	 * brought out of reset as absolutely no UniMAC activity is allowed
+	 */
+	if (priv->internal_phy)
+		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+	bcmgenet_umac_reset(priv);
+
+	ret = init_umac(priv);
+	if (ret)
+		goto out_clk_disable;
+
+	/* From WOL-enabled suspend, switch to regular clock */
+	if (priv->wolopts)
+		clk_disable_unprepare(priv->clk_wol);
+
+	phy_init_hw(priv->phydev);
+	/* Speed settings must be restored */
+	bcmgenet_mii_config(priv->dev);
+
+	/* disable ethernet MAC while updating its registers */
+	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+
+	bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+	if (priv->internal_phy) {
+		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+		reg |= EXT_ENERGY_DET_MASK;
+		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+	}
+
+	if (priv->wolopts)
+		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+
+	/* Disable RX/TX DMA and flush TX queues */
+	dma_ctrl = bcmgenet_dma_disable(priv);
+
+	/* Reinitialize TDMA and RDMA and SW housekeeping */
+	ret = bcmgenet_init_dma(priv);
+	if (ret) {
+		netdev_err(dev, "failed to initialize DMA\n");
+		goto out_clk_disable;
+	}
+
+	/* Always enable ring 16 - descriptor ring */
+	bcmgenet_enable_dma(priv, dma_ctrl);
+
+	netif_device_attach(dev);
+
+	if (!device_may_wakeup(d))
+		phy_resume(priv->phydev);
+
+	if (priv->eee.eee_enabled)
+		bcmgenet_eee_enable_set(dev, true);
+
+	bcmgenet_netif_start(dev);
+
+	return 0;
+
+out_clk_disable:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
+
+static struct platform_driver bcmgenet_driver = {
+	.probe	= bcmgenet_probe,
+	.remove	= bcmgenet_remove,
+	.driver	= {
+		.name	= "bcmgenet",
+		.of_match_table = bcmgenet_match,
+		.pm	= &bcmgenet_pm_ops,
+	},
+};
+module_platform_driver(bcmgenet_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+MODULE_ALIAS("platform:bcmgenet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 0000000..9673675
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCMGENET_H__
+#define __BCMGENET_H__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+/* total number of Buffer Descriptors, same for Rx/Tx */
+#define TOTAL_DESC				256
+
+/* which ring is descriptor based */
+#define DESC_INDEX				16
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN	6
+#define ENET_PAD		8
+#define ENET_MAX_MTU_SIZE	(ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+				 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+#define DMA_MAX_BURST_LENGTH    0x10
+
+/* misc. configuration */
+#define CLEAR_ALL_HFB			0xFF
+#define DMA_FC_THRESH_HI		(TOTAL_DESC >> 4)
+#define DMA_FC_THRESH_LO		5
+
+/* 64B receive/transmit status block */
+struct status_64 {
+	u32	length_status;		/* length and peripheral status */
+	u32	ext_status;		/* Extended status*/
+	u32	rx_csum;		/* partial rx checksum */
+	u32	unused1[9];		/* unused */
+	u32	tx_csum_info;		/* Tx checksum info. */
+	u32	unused2[3];		/* unused */
+};
+
+/* Rx status bits */
+#define STATUS_RX_EXT_MASK		0x1FFFFF
+#define STATUS_RX_CSUM_MASK		0xFFFF
+#define STATUS_RX_CSUM_OK		0x10000
+#define STATUS_RX_CSUM_FR		0x20000
+#define STATUS_RX_PROTO_TCP		0
+#define STATUS_RX_PROTO_UDP		1
+#define STATUS_RX_PROTO_ICMP		2
+#define STATUS_RX_PROTO_OTHER		3
+#define STATUS_RX_PROTO_MASK		3
+#define STATUS_RX_PROTO_SHIFT		18
+#define STATUS_FILTER_INDEX_MASK	0xFFFF
+/* Tx status bits */
+#define STATUS_TX_CSUM_START_MASK	0X7FFF
+#define STATUS_TX_CSUM_START_SHIFT	16
+#define STATUS_TX_CSUM_PROTO_UDP	0x8000
+#define STATUS_TX_CSUM_OFFSET_MASK	0x7FFF
+#define STATUS_TX_CSUM_LV		0x80000000
+
+/* DMA Descriptor */
+#define DMA_DESC_LENGTH_STATUS	0x00	/* in bytes of data in buffer */
+#define DMA_DESC_ADDRESS_LO	0x04	/* lower bits of PA */
+#define DMA_DESC_ADDRESS_HI	0x08	/* upper 32 bits of PA, GENETv4+ */
+
+/* Rx/Tx common counter group */
+struct bcmgenet_pkt_counters {
+	u32	cnt_64;		/* RO Received/Transmited 64 bytes packet */
+	u32	cnt_127;	/* RO Rx/Tx 127 bytes packet */
+	u32	cnt_255;	/* RO Rx/Tx 65-255 bytes packet */
+	u32	cnt_511;	/* RO Rx/Tx 256-511 bytes packet */
+	u32	cnt_1023;	/* RO Rx/Tx 512-1023 bytes packet */
+	u32	cnt_1518;	/* RO Rx/Tx 1024-1518 bytes packet */
+	u32	cnt_mgv;	/* RO Rx/Tx 1519-1522 good VLAN packet */
+	u32	cnt_2047;	/* RO Rx/Tx 1522-2047 bytes packet*/
+	u32	cnt_4095;	/* RO Rx/Tx 2048-4095 bytes packet*/
+	u32	cnt_9216;	/* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcmgenet_rx_counters {
+	struct  bcmgenet_pkt_counters pkt_cnt;
+	u32	pkt;		/* RO (0x428) Received pkt count*/
+	u32	bytes;		/* RO Received byte count */
+	u32	mca;		/* RO # of Received multicast pkt */
+	u32	bca;		/* RO # of Receive broadcast pkt */
+	u32	fcs;		/* RO # of Received FCS error  */
+	u32	cf;		/* RO # of Received control frame pkt*/
+	u32	pf;		/* RO # of Received pause frame pkt */
+	u32	uo;		/* RO # of unknown op code pkt */
+	u32	aln;		/* RO # of alignment error count */
+	u32	flr;		/* RO # of frame length out of range count */
+	u32	cde;		/* RO # of code error pkt */
+	u32	fcr;		/* RO # of carrier sense error pkt */
+	u32	ovr;		/* RO # of oversize pkt*/
+	u32	jbr;		/* RO # of jabber count */
+	u32	mtue;		/* RO # of MTU error pkt*/
+	u32	pok;		/* RO # of Received good pkt */
+	u32	uc;		/* RO # of unicast pkt */
+	u32	ppp;		/* RO # of PPP pkt */
+	u32	rcrc;		/* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcmgenet_tx_counters {
+	struct bcmgenet_pkt_counters pkt_cnt;
+	u32	pkts;		/* RO (0x4a8) Transmited pkt */
+	u32	mca;		/* RO # of xmited multicast pkt */
+	u32	bca;		/* RO # of xmited broadcast pkt */
+	u32	pf;		/* RO # of xmited pause frame count */
+	u32	cf;		/* RO # of xmited control frame count */
+	u32	fcs;		/* RO # of xmited FCS error count */
+	u32	ovr;		/* RO # of xmited oversize pkt */
+	u32	drf;		/* RO # of xmited deferral pkt */
+	u32	edf;		/* RO # of xmited Excessive deferral pkt*/
+	u32	scl;		/* RO # of xmited single collision pkt */
+	u32	mcl;		/* RO # of xmited multiple collision pkt*/
+	u32	lcl;		/* RO # of xmited late collision pkt */
+	u32	ecl;		/* RO # of xmited excessive collision pkt*/
+	u32	frg;		/* RO # of xmited fragments pkt*/
+	u32	ncl;		/* RO # of xmited total collision count */
+	u32	jbr;		/* RO # of xmited jabber count*/
+	u32	bytes;		/* RO # of xmited byte count */
+	u32	pok;		/* RO # of xmited good pkt */
+	u32	uc;		/* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcmgenet_mib_counters {
+	struct bcmgenet_rx_counters rx;
+	struct bcmgenet_tx_counters tx;
+	u32	rx_runt_cnt;
+	u32	rx_runt_fcs;
+	u32	rx_runt_fcs_align;
+	u32	rx_runt_bytes;
+	u32	rbuf_ovflow_cnt;
+	u32	rbuf_err_cnt;
+	u32	mdf_err_cnt;
+	u32	alloc_rx_buff_failed;
+	u32	rx_dma_failed;
+	u32	tx_dma_failed;
+};
+
+#define UMAC_HD_BKP_CTRL		0x004
+#define	 HD_FC_EN			(1 << 0)
+#define  HD_FC_BKOFF_OK			(1 << 1)
+#define  IPG_CONFIG_RX_SHIFT		2
+#define  IPG_CONFIG_RX_MASK		0x1F
+
+#define UMAC_CMD			0x008
+#define  CMD_TX_EN			(1 << 0)
+#define  CMD_RX_EN			(1 << 1)
+#define  UMAC_SPEED_10			0
+#define  UMAC_SPEED_100			1
+#define  UMAC_SPEED_1000		2
+#define  UMAC_SPEED_2500		3
+#define  CMD_SPEED_SHIFT		2
+#define  CMD_SPEED_MASK			3
+#define  CMD_PROMISC			(1 << 4)
+#define  CMD_PAD_EN			(1 << 5)
+#define  CMD_CRC_FWD			(1 << 6)
+#define  CMD_PAUSE_FWD			(1 << 7)
+#define  CMD_RX_PAUSE_IGNORE		(1 << 8)
+#define  CMD_TX_ADDR_INS		(1 << 9)
+#define  CMD_HD_EN			(1 << 10)
+#define  CMD_SW_RESET			(1 << 13)
+#define  CMD_LCL_LOOP_EN		(1 << 15)
+#define  CMD_AUTO_CONFIG		(1 << 22)
+#define  CMD_CNTL_FRM_EN		(1 << 23)
+#define  CMD_NO_LEN_CHK			(1 << 24)
+#define  CMD_RMT_LOOP_EN		(1 << 25)
+#define  CMD_PRBL_EN			(1 << 27)
+#define  CMD_TX_PAUSE_IGNORE		(1 << 28)
+#define  CMD_TX_RX_EN			(1 << 29)
+#define  CMD_RUNT_FILTER_DIS		(1 << 30)
+
+#define UMAC_MAC0			0x00C
+#define UMAC_MAC1			0x010
+#define UMAC_MAX_FRAME_LEN		0x014
+
+#define UMAC_EEE_CTRL			0x064
+#define  EN_LPI_RX_PAUSE		(1 << 0)
+#define  EN_LPI_TX_PFC			(1 << 1)
+#define  EN_LPI_TX_PAUSE		(1 << 2)
+#define  EEE_EN				(1 << 3)
+#define  RX_FIFO_CHECK			(1 << 4)
+#define  EEE_TX_CLK_DIS			(1 << 5)
+#define  DIS_EEE_10M			(1 << 6)
+#define  LP_IDLE_PREDICTION_MODE	(1 << 7)
+
+#define UMAC_EEE_LPI_TIMER		0x068
+#define UMAC_EEE_WAKE_TIMER		0x06C
+#define UMAC_EEE_REF_COUNT		0x070
+#define  EEE_REFERENCE_COUNT_MASK	0xffff
+
+#define UMAC_TX_FLUSH			0x334
+
+#define UMAC_MIB_START			0x400
+
+#define UMAC_MDIO_CMD			0x614
+#define  MDIO_START_BUSY		(1 << 29)
+#define  MDIO_READ_FAIL			(1 << 28)
+#define  MDIO_RD			(2 << 26)
+#define  MDIO_WR			(1 << 26)
+#define  MDIO_PMD_SHIFT			21
+#define  MDIO_PMD_MASK			0x1F
+#define  MDIO_REG_SHIFT			16
+#define  MDIO_REG_MASK			0x1F
+
+#define UMAC_RBUF_OVFL_CNT		0x61C
+
+#define UMAC_MPD_CTRL			0x620
+#define  MPD_EN				(1 << 0)
+#define  MPD_PW_EN			(1 << 27)
+#define  MPD_MSEQ_LEN_SHIFT		16
+#define  MPD_MSEQ_LEN_MASK		0xFF
+
+#define UMAC_MPD_PW_MS			0x624
+#define UMAC_MPD_PW_LS			0x628
+#define UMAC_RBUF_ERR_CNT		0x634
+#define UMAC_MDF_ERR_CNT		0x638
+#define UMAC_MDF_CTRL			0x650
+#define UMAC_MDF_ADDR			0x654
+#define UMAC_MIB_CTRL			0x580
+#define  MIB_RESET_RX			(1 << 0)
+#define  MIB_RESET_RUNT			(1 << 1)
+#define  MIB_RESET_TX			(1 << 2)
+
+#define RBUF_CTRL			0x00
+#define  RBUF_64B_EN			(1 << 0)
+#define  RBUF_ALIGN_2B			(1 << 1)
+#define  RBUF_BAD_DIS			(1 << 2)
+
+#define RBUF_STATUS			0x0C
+#define  RBUF_STATUS_WOL		(1 << 0)
+#define  RBUF_STATUS_MPD_INTR_ACTIVE	(1 << 1)
+#define  RBUF_STATUS_ACPI_INTR_ACTIVE	(1 << 2)
+
+#define RBUF_CHK_CTRL			0x14
+#define  RBUF_RXCHK_EN			(1 << 0)
+#define  RBUF_SKIP_FCS			(1 << 4)
+
+#define RBUF_ENERGY_CTRL		0x9c
+#define  RBUF_EEE_EN			(1 << 0)
+#define  RBUF_PM_EN			(1 << 1)
+
+#define RBUF_TBUF_SIZE_CTRL		0xb4
+
+#define RBUF_HFB_CTRL_V1		0x38
+#define  RBUF_HFB_FILTER_EN_SHIFT	16
+#define  RBUF_HFB_FILTER_EN_MASK	0xffff0000
+#define  RBUF_HFB_EN			(1 << 0)
+#define  RBUF_HFB_256B			(1 << 1)
+#define  RBUF_ACPI_EN			(1 << 2)
+
+#define RBUF_HFB_LEN_V1			0x3C
+#define  RBUF_FLTR_LEN_MASK		0xFF
+#define  RBUF_FLTR_LEN_SHIFT		8
+
+#define TBUF_CTRL			0x00
+#define TBUF_BP_MC			0x0C
+#define TBUF_ENERGY_CTRL		0x14
+#define  TBUF_EEE_EN			(1 << 0)
+#define  TBUF_PM_EN			(1 << 1)
+
+#define TBUF_CTRL_V1			0x80
+#define TBUF_BP_MC_V1			0xA0
+
+#define HFB_CTRL			0x00
+#define HFB_FLT_ENABLE_V3PLUS		0x04
+#define HFB_FLT_LEN_V2			0x04
+#define HFB_FLT_LEN_V3PLUS		0x1C
+
+/* uniMac intrl2 registers */
+#define INTRL2_CPU_STAT			0x00
+#define INTRL2_CPU_SET			0x04
+#define INTRL2_CPU_CLEAR		0x08
+#define INTRL2_CPU_MASK_STATUS		0x0C
+#define INTRL2_CPU_MASK_SET		0x10
+#define INTRL2_CPU_MASK_CLEAR		0x14
+
+/* INTRL2 instance 0 definitions */
+#define UMAC_IRQ_SCB			(1 << 0)
+#define UMAC_IRQ_EPHY			(1 << 1)
+#define UMAC_IRQ_PHY_DET_R		(1 << 2)
+#define UMAC_IRQ_PHY_DET_F		(1 << 3)
+#define UMAC_IRQ_LINK_UP		(1 << 4)
+#define UMAC_IRQ_LINK_DOWN		(1 << 5)
+#define UMAC_IRQ_LINK_EVENT		(UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN)
+#define UMAC_IRQ_UMAC			(1 << 6)
+#define UMAC_IRQ_UMAC_TSV		(1 << 7)
+#define UMAC_IRQ_TBUF_UNDERRUN		(1 << 8)
+#define UMAC_IRQ_RBUF_OVERFLOW		(1 << 9)
+#define UMAC_IRQ_HFB_SM			(1 << 10)
+#define UMAC_IRQ_HFB_MM			(1 << 11)
+#define UMAC_IRQ_MPD_R			(1 << 12)
+#define UMAC_IRQ_RXDMA_MBDONE		(1 << 13)
+#define UMAC_IRQ_RXDMA_PDONE		(1 << 14)
+#define UMAC_IRQ_RXDMA_BDONE		(1 << 15)
+#define UMAC_IRQ_RXDMA_DONE		UMAC_IRQ_RXDMA_MBDONE
+#define UMAC_IRQ_TXDMA_MBDONE		(1 << 16)
+#define UMAC_IRQ_TXDMA_PDONE		(1 << 17)
+#define UMAC_IRQ_TXDMA_BDONE		(1 << 18)
+#define UMAC_IRQ_TXDMA_DONE		UMAC_IRQ_TXDMA_MBDONE
+
+/* Only valid for GENETv3+ */
+#define UMAC_IRQ_MDIO_DONE		(1 << 23)
+#define UMAC_IRQ_MDIO_ERROR		(1 << 24)
+
+/* INTRL2 instance 1 definitions */
+#define UMAC_IRQ1_TX_INTR_MASK		0xFFFF
+#define UMAC_IRQ1_RX_INTR_MASK		0xFFFF
+#define UMAC_IRQ1_RX_INTR_SHIFT		16
+
+/* Register block offsets */
+#define GENET_SYS_OFF			0x0000
+#define GENET_GR_BRIDGE_OFF		0x0040
+#define GENET_EXT_OFF			0x0080
+#define GENET_INTRL2_0_OFF		0x0200
+#define GENET_INTRL2_1_OFF		0x0240
+#define GENET_RBUF_OFF			0x0300
+#define GENET_UMAC_OFF			0x0800
+
+/* SYS block offsets and register definitions */
+#define SYS_REV_CTRL			0x00
+#define SYS_PORT_CTRL			0x04
+#define  PORT_MODE_INT_EPHY		0
+#define  PORT_MODE_INT_GPHY		1
+#define  PORT_MODE_EXT_EPHY		2
+#define  PORT_MODE_EXT_GPHY		3
+#define  PORT_MODE_EXT_RVMII_25		(4 | BIT(4))
+#define  PORT_MODE_EXT_RVMII_50		4
+#define  LED_ACT_SOURCE_MAC		(1 << 9)
+
+#define SYS_RBUF_FLUSH_CTRL		0x08
+#define SYS_TBUF_FLUSH_CTRL		0x0C
+#define RBUF_FLUSH_CTRL_V1		0x04
+
+/* Ext block register offsets and definitions */
+#define EXT_EXT_PWR_MGMT		0x00
+#define  EXT_PWR_DOWN_BIAS		(1 << 0)
+#define  EXT_PWR_DOWN_DLL		(1 << 1)
+#define  EXT_PWR_DOWN_PHY		(1 << 2)
+#define  EXT_PWR_DN_EN_LD		(1 << 3)
+#define  EXT_ENERGY_DET			(1 << 4)
+#define  EXT_IDDQ_FROM_PHY		(1 << 5)
+#define  EXT_PHY_RESET			(1 << 8)
+#define  EXT_ENERGY_DET_MASK		(1 << 12)
+
+#define EXT_RGMII_OOB_CTRL		0x0C
+#define  RGMII_LINK			(1 << 4)
+#define  OOB_DISABLE			(1 << 5)
+#define  RGMII_MODE_EN			(1 << 6)
+#define  ID_MODE_DIS			(1 << 16)
+
+#define EXT_GPHY_CTRL			0x1C
+#define  EXT_CFG_IDDQ_BIAS		(1 << 0)
+#define  EXT_CFG_PWR_DOWN		(1 << 1)
+#define  EXT_CK25_DIS			(1 << 4)
+#define  EXT_GPHY_RESET			(1 << 5)
+
+/* DMA rings size */
+#define DMA_RING_SIZE			(0x40)
+#define DMA_RINGS_SIZE			(DMA_RING_SIZE * (DESC_INDEX + 1))
+
+/* DMA registers common definitions */
+#define DMA_RW_POINTER_MASK		0x1FF
+#define DMA_P_INDEX_DISCARD_CNT_MASK	0xFFFF
+#define DMA_P_INDEX_DISCARD_CNT_SHIFT	16
+#define DMA_BUFFER_DONE_CNT_MASK	0xFFFF
+#define DMA_BUFFER_DONE_CNT_SHIFT	16
+#define DMA_P_INDEX_MASK		0xFFFF
+#define DMA_C_INDEX_MASK		0xFFFF
+
+/* DMA ring size register */
+#define DMA_RING_SIZE_MASK		0xFFFF
+#define DMA_RING_SIZE_SHIFT		16
+#define DMA_RING_BUFFER_SIZE_MASK	0xFFFF
+
+/* DMA interrupt threshold register */
+#define DMA_INTR_THRESHOLD_MASK		0x01FF
+
+/* DMA XON/XOFF register */
+#define DMA_XON_THREHOLD_MASK		0xFFFF
+#define DMA_XOFF_THRESHOLD_MASK		0xFFFF
+#define DMA_XOFF_THRESHOLD_SHIFT	16
+
+/* DMA flow period register */
+#define DMA_FLOW_PERIOD_MASK		0xFFFF
+#define DMA_MAX_PKT_SIZE_MASK		0xFFFF
+#define DMA_MAX_PKT_SIZE_SHIFT		16
+
+
+/* DMA control register */
+#define DMA_EN				(1 << 0)
+#define DMA_RING_BUF_EN_SHIFT		0x01
+#define DMA_RING_BUF_EN_MASK		0xFFFF
+#define DMA_TSB_SWAP_EN			(1 << 20)
+
+/* DMA status register */
+#define DMA_DISABLED			(1 << 0)
+#define DMA_DESC_RAM_INIT_BUSY		(1 << 1)
+
+/* DMA SCB burst size register */
+#define DMA_SCB_BURST_SIZE_MASK		0x1F
+
+/* DMA activity vector register */
+#define DMA_ACTIVITY_VECTOR_MASK	0x1FFFF
+
+/* DMA backpressure mask register */
+#define DMA_BACKPRESSURE_MASK		0x1FFFF
+#define DMA_PFC_ENABLE			(1 << 31)
+
+/* DMA backpressure status register */
+#define DMA_BACKPRESSURE_STATUS_MASK	0x1FFFF
+
+/* DMA override register */
+#define DMA_LITTLE_ENDIAN_MODE		(1 << 0)
+#define DMA_REGISTER_MODE		(1 << 1)
+
+/* DMA timeout register */
+#define DMA_TIMEOUT_MASK		0xFFFF
+#define DMA_TIMEOUT_VAL			5000	/* micro seconds */
+
+/* TDMA rate limiting control register */
+#define DMA_RATE_LIMIT_EN_MASK		0xFFFF
+
+/* TDMA arbitration control register */
+#define DMA_ARBITER_MODE_MASK		0x03
+#define DMA_RING_BUF_PRIORITY_MASK	0x1F
+#define DMA_RING_BUF_PRIORITY_SHIFT	5
+#define DMA_PRIO_REG_INDEX(q)		((q) / 6)
+#define DMA_PRIO_REG_SHIFT(q)		(((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT)
+#define DMA_RATE_ADJ_MASK		0xFF
+
+/* Tx/Rx Dma Descriptor common bits*/
+#define DMA_BUFLENGTH_MASK		0x0fff
+#define DMA_BUFLENGTH_SHIFT		16
+#define DMA_OWN				0x8000
+#define DMA_EOP				0x4000
+#define DMA_SOP				0x2000
+#define DMA_WRAP			0x1000
+/* Tx specific Dma descriptor bits */
+#define DMA_TX_UNDERRUN			0x0200
+#define DMA_TX_APPEND_CRC		0x0040
+#define DMA_TX_OW_CRC			0x0020
+#define DMA_TX_DO_CSUM			0x0010
+#define DMA_TX_QTAG_SHIFT		7
+
+/* Rx Specific Dma descriptor bits */
+#define DMA_RX_CHK_V3PLUS		0x8000
+#define DMA_RX_CHK_V12			0x1000
+#define DMA_RX_BRDCAST			0x0040
+#define DMA_RX_MULT			0x0020
+#define DMA_RX_LG			0x0010
+#define DMA_RX_NO			0x0008
+#define DMA_RX_RXER			0x0004
+#define DMA_RX_CRC_ERROR		0x0002
+#define DMA_RX_OV			0x0001
+#define DMA_RX_FI_MASK			0x001F
+#define DMA_RX_FI_SHIFT			0x0007
+#define DMA_DESC_ALLOC_MASK		0x00FF
+
+#define DMA_ARBITER_RR			0x00
+#define DMA_ARBITER_WRR			0x01
+#define DMA_ARBITER_SP			0x02
+
+struct enet_cb {
+	struct sk_buff      *skb;
+	void __iomem *bd_addr;
+	DEFINE_DMA_UNMAP_ADDR(dma_addr);
+	DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* power management mode */
+enum bcmgenet_power_mode {
+	GENET_POWER_CABLE_SENSE = 0,
+	GENET_POWER_PASSIVE,
+	GENET_POWER_WOL_MAGIC,
+};
+
+struct bcmgenet_priv;
+
+/* We support both runtime GENET detection and compile-time
+ * to optimize code-paths for a given hardware
+ */
+enum bcmgenet_version {
+	GENET_V1 = 1,
+	GENET_V2,
+	GENET_V3,
+	GENET_V4
+};
+
+#define GENET_IS_V1(p)	((p)->version == GENET_V1)
+#define GENET_IS_V2(p)	((p)->version == GENET_V2)
+#define GENET_IS_V3(p)	((p)->version == GENET_V3)
+#define GENET_IS_V4(p)	((p)->version == GENET_V4)
+
+/* Hardware flags */
+#define GENET_HAS_40BITS	(1 << 0)
+#define GENET_HAS_EXT		(1 << 1)
+#define GENET_HAS_MDIO_INTR	(1 << 2)
+#define GENET_HAS_MOCA_LINK_DET	(1 << 3)
+
+/* BCMGENET hardware parameters, keep this structure nicely aligned
+ * since it is going to be used in hot paths
+ */
+struct bcmgenet_hw_params {
+	u8		tx_queues;
+	u8		tx_bds_per_q;
+	u8		rx_queues;
+	u8		rx_bds_per_q;
+	u8		bp_in_en_shift;
+	u32		bp_in_mask;
+	u8		hfb_filter_cnt;
+	u8		hfb_filter_size;
+	u8		qtag_mask;
+	u16		tbuf_offset;
+	u32		hfb_offset;
+	u32		hfb_reg_offset;
+	u32		rdma_offset;
+	u32		tdma_offset;
+	u32		words_per_bd;
+	u32		flags;
+};
+
+struct bcmgenet_tx_ring {
+	spinlock_t	lock;		/* ring lock */
+	struct napi_struct napi;	/* NAPI per tx queue */
+	unsigned int	index;		/* ring index */
+	unsigned int	queue;		/* queue index */
+	struct enet_cb	*cbs;		/* tx ring buffer control block*/
+	unsigned int	size;		/* size of each tx ring */
+	unsigned int    clean_ptr;      /* Tx ring clean pointer */
+	unsigned int	c_index;	/* last consumer index of each ring*/
+	unsigned int	free_bds;	/* # of free bds for each ring */
+	unsigned int	write_ptr;	/* Tx ring write pointer SW copy */
+	unsigned int	prod_index;	/* Tx ring producer index SW copy */
+	unsigned int	cb_ptr;		/* Tx ring initial CB ptr */
+	unsigned int	end_ptr;	/* Tx ring end CB ptr */
+	void (*int_enable)(struct bcmgenet_tx_ring *);
+	void (*int_disable)(struct bcmgenet_tx_ring *);
+	struct bcmgenet_priv *priv;
+};
+
+struct bcmgenet_rx_ring {
+	struct napi_struct napi;	/* Rx NAPI struct */
+	unsigned int	index;		/* Rx ring index */
+	struct enet_cb	*cbs;		/* Rx ring buffer control block */
+	unsigned int	size;		/* Rx ring size */
+	unsigned int	c_index;	/* Rx last consumer index */
+	unsigned int	read_ptr;	/* Rx ring read pointer */
+	unsigned int	cb_ptr;		/* Rx ring initial CB ptr */
+	unsigned int	end_ptr;	/* Rx ring end CB ptr */
+	unsigned int	old_discards;
+	void (*int_enable)(struct bcmgenet_rx_ring *);
+	void (*int_disable)(struct bcmgenet_rx_ring *);
+	struct bcmgenet_priv *priv;
+};
+
+/* device context */
+struct bcmgenet_priv {
+	void __iomem *base;
+	enum bcmgenet_version version;
+	struct net_device *dev;
+
+	/* transmit variables */
+	void __iomem *tx_bds;
+	struct enet_cb *tx_cbs;
+	unsigned int num_tx_bds;
+
+	struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+
+	/* receive variables */
+	void __iomem *rx_bds;
+	struct enet_cb *rx_cbs;
+	unsigned int num_rx_bds;
+	unsigned int rx_buf_len;
+
+	struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+
+	/* other misc variables */
+	struct bcmgenet_hw_params *hw_params;
+
+	/* MDIO bus variables */
+	wait_queue_head_t wq;
+	struct phy_device *phydev;
+	bool internal_phy;
+	struct device_node *phy_dn;
+	struct device_node *mdio_dn;
+	struct mii_bus *mii_bus;
+	u16 gphy_rev;
+	struct clk *clk_eee;
+	bool clk_eee_enabled;
+
+	/* PHY device variables */
+	int old_link;
+	int old_speed;
+	int old_duplex;
+	int old_pause;
+	phy_interface_t phy_interface;
+	int phy_addr;
+	int ext_phy;
+
+	/* Interrupt variables */
+	struct work_struct bcmgenet_irq_work;
+	int irq0;
+	int irq1;
+	unsigned int irq0_stat;
+	unsigned int irq1_stat;
+	int wol_irq;
+	bool wol_irq_disabled;
+
+	/* HW descriptors/checksum variables */
+	bool desc_64b_en;
+	bool desc_rxchk_en;
+	bool crc_fwd_en;
+
+	unsigned int dma_rx_chk_bit;
+
+	u32 msg_enable;
+
+	struct clk *clk;
+	struct platform_device *pdev;
+
+	/* WOL */
+	struct clk *clk_wol;
+	u32 wolopts;
+
+	struct bcmgenet_mib_counters mib;
+
+	struct ethtool_eee eee;
+};
+
+#define GENET_IO_MACRO(name, offset)					\
+static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv,	\
+					u32 off)			\
+{									\
+	return __raw_readl(priv->base + offset + off);			\
+}									\
+static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv,	\
+					u32 val, u32 off)		\
+{									\
+	__raw_writel(val, priv->base + offset + off);			\
+}
+
+GENET_IO_MACRO(ext, GENET_EXT_OFF);
+GENET_IO_MACRO(umac, GENET_UMAC_OFF);
+GENET_IO_MACRO(sys, GENET_SYS_OFF);
+
+/* interrupt l2 registers accessors */
+GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
+GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
+
+/* HFB register accessors  */
+GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
+
+/* GENET v2+ HFB control and filter len helpers */
+GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
+
+/* RBUF register accessors */
+GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
+
+/* MDIO routines */
+int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
+void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
+void bcmgenet_mii_setup(struct net_device *dev);
+
+/* Wake-on-LAN routines */
+void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
+int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
+int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+				enum bcmgenet_power_mode mode);
+void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+			       enum bcmgenet_power_mode mode);
+
+#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
new file mode 100644
index 0000000..b971229
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -0,0 +1,210 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)				"bcmgenet_wol: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include "bcmgenet.h"
+
+/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet
+ * Detection is supported through ethtool
+ */
+void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+	wol->wolopts = priv->wolopts;
+	memset(wol->sopass, 0, sizeof(wol->sopass));
+
+	if (wol->wolopts & WAKE_MAGICSECURE) {
+		reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
+		put_unaligned_be16(reg, &wol->sopass[0]);
+		reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
+		put_unaligned_be32(reg, &wol->sopass[2]);
+	}
+}
+
+/* ethtool function - set WOL (Wake on LAN) settings.
+ * Only for magic packet detection mode.
+ */
+int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
+	u32 reg;
+
+	if (!device_can_wakeup(kdev))
+		return -ENOTSUPP;
+
+	if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
+		return -EINVAL;
+
+	reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+	if (wol->wolopts & WAKE_MAGICSECURE) {
+		bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+				     UMAC_MPD_PW_MS);
+		bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+				     UMAC_MPD_PW_LS);
+		reg |= MPD_PW_EN;
+	} else {
+		reg &= ~MPD_PW_EN;
+	}
+	bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+	/* Flag the device and relevant IRQ as wakeup capable */
+	if (wol->wolopts) {
+		device_set_wakeup_enable(kdev, 1);
+		/* Avoid unbalanced enable_irq_wake calls */
+		if (priv->wol_irq_disabled)
+			enable_irq_wake(priv->wol_irq);
+		priv->wol_irq_disabled = false;
+	} else {
+		device_set_wakeup_enable(kdev, 0);
+		/* Avoid unbalanced disable_irq_wake calls */
+		if (!priv->wol_irq_disabled)
+			disable_irq_wake(priv->wol_irq);
+		priv->wol_irq_disabled = true;
+	}
+
+	priv->wolopts = wol->wolopts;
+
+	return 0;
+}
+
+static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
+{
+	struct net_device *dev = priv->dev;
+	int retries = 0;
+
+	while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS)
+		& RBUF_STATUS_WOL)) {
+		retries++;
+		if (retries > 5) {
+			netdev_crit(dev, "polling wol mode timeout\n");
+			return -ETIMEDOUT;
+		}
+		mdelay(1);
+	}
+
+	return retries;
+}
+
+int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+				enum bcmgenet_power_mode mode)
+{
+	struct net_device *dev = priv->dev;
+	u32 cpu_mask_clear;
+	int retries = 0;
+	u32 reg;
+
+	if (mode != GENET_POWER_WOL_MAGIC) {
+		netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
+		return -EINVAL;
+	}
+
+	/* disable RX */
+	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+	reg &= ~CMD_RX_EN;
+	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+	mdelay(10);
+
+	reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+	reg |= MPD_EN;
+	bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+	/* Do not leave UniMAC in MPD mode only */
+	retries = bcmgenet_poll_wol_status(priv);
+	if (retries < 0) {
+		reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+		reg &= ~MPD_EN;
+		bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+		return retries;
+	}
+
+	netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
+		  retries);
+
+	/* Enable CRC forward */
+	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+	priv->crc_fwd_en = 1;
+	reg |= CMD_CRC_FWD;
+
+	/* Receiver must be enabled for WOL MP detection */
+	reg |= CMD_RX_EN;
+	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+	if (priv->hw_params->flags & GENET_HAS_EXT) {
+		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+		reg &= ~EXT_ENERGY_DET_MASK;
+		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+	}
+
+	/* Enable the MPD interrupt */
+	cpu_mask_clear = UMAC_IRQ_MPD_R;
+
+	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+
+	return 0;
+}
+
+void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+			       enum bcmgenet_power_mode mode)
+{
+	u32 cpu_mask_set;
+	u32 reg;
+
+	if (mode != GENET_POWER_WOL_MAGIC) {
+		netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
+		return;
+	}
+
+	reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+	reg &= ~MPD_EN;
+	bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+	/* Disable CRC Forward */
+	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+	reg &= ~CMD_CRC_FWD;
+	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+	priv->crc_fwd_en = 0;
+
+	/* Stop monitoring magic packet IRQ */
+	cpu_mask_set = UMAC_IRQ_MPD_R;
+
+	/* Stop monitoring magic packet IRQ */
+	bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
+}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 0000000..e96d1f9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,649 @@
+/*
+ * Broadcom GENET MDIO routines
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/brcmphy.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_data/bcmgenet.h>
+
+#include "bcmgenet.h"
+
+/* read a value from the MII */
+static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+	int ret;
+	struct net_device *dev = bus->priv;
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
+			     (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+	/* Start MDIO transaction*/
+	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+	reg |= MDIO_START_BUSY;
+	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+	wait_event_timeout(priv->wq,
+			   !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+			   & MDIO_START_BUSY),
+			   HZ / 100);
+	ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+
+	/* Some broken devices are known not to release the line during
+	 * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+	 * that condition here and ignore the MDIO controller read failure
+	 * indication.
+	 */
+	if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL))
+		return -EIO;
+
+	return ret & 0xffff;
+}
+
+/* write a value to the MII */
+static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
+			      int location, u16 val)
+{
+	struct net_device *dev = bus->priv;
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 reg;
+
+	bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
+			     (location << MDIO_REG_SHIFT) | (0xffff & val)),
+			     UMAC_MDIO_CMD);
+	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+	reg |= MDIO_START_BUSY;
+	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+	wait_event_timeout(priv->wq,
+			   !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+			   MDIO_START_BUSY),
+			   HZ / 100);
+
+	return 0;
+}
+
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+void bcmgenet_mii_setup(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = priv->phydev;
+	u32 reg, cmd_bits = 0;
+	bool status_changed = false;
+
+	if (priv->old_link != phydev->link) {
+		status_changed = true;
+		priv->old_link = phydev->link;
+	}
+
+	if (phydev->link) {
+		/* check speed/duplex/pause changes */
+		if (priv->old_speed != phydev->speed) {
+			status_changed = true;
+			priv->old_speed = phydev->speed;
+		}
+
+		if (priv->old_duplex != phydev->duplex) {
+			status_changed = true;
+			priv->old_duplex = phydev->duplex;
+		}
+
+		if (priv->old_pause != phydev->pause) {
+			status_changed = true;
+			priv->old_pause = phydev->pause;
+		}
+
+		/* done if nothing has changed */
+		if (!status_changed)
+			return;
+
+		/* speed */
+		if (phydev->speed == SPEED_1000)
+			cmd_bits = UMAC_SPEED_1000;
+		else if (phydev->speed == SPEED_100)
+			cmd_bits = UMAC_SPEED_100;
+		else
+			cmd_bits = UMAC_SPEED_10;
+		cmd_bits <<= CMD_SPEED_SHIFT;
+
+		/* duplex */
+		if (phydev->duplex != DUPLEX_FULL)
+			cmd_bits |= CMD_HD_EN;
+
+		/* pause capability */
+		if (!phydev->pause)
+			cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+		/*
+		 * Program UMAC and RGMII block based on established
+		 * link speed, duplex, and pause. The speed set in
+		 * umac->cmd tell RGMII block which clock to use for
+		 * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
+		 * Receive clock is provided by the PHY.
+		 */
+		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+		reg &= ~OOB_DISABLE;
+		reg |= RGMII_LINK;
+		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+		reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+			       CMD_HD_EN |
+			       CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+		reg |= cmd_bits;
+		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+	} else {
+		/* done if nothing has changed */
+		if (!status_changed)
+			return;
+
+		/* needed for MoCA fixed PHY to reflect correct link status */
+		netif_carrier_off(dev);
+	}
+
+	phy_print_status(phydev);
+}
+
+
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+					  struct fixed_phy_status *status)
+{
+	if (dev && dev->phydev && status)
+		status->link = dev->phydev->link;
+
+	return 0;
+}
+
+/* Perform a voluntary PHY software reset, since the EPHY is very finicky about
+ * not doing it and will start corrupting packets
+ */
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	if (GENET_IS_V4(priv))
+		return;
+
+	if (priv->phydev) {
+		phy_init_hw(priv->phydev);
+		phy_start_aneg(priv->phydev);
+	}
+}
+
+void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	u32 reg = 0;
+
+	/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
+	if (!GENET_IS_V4(priv))
+		return;
+
+	reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+	if (enable) {
+		reg &= ~EXT_CK25_DIS;
+		bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+		mdelay(1);
+
+		reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+		reg |= EXT_GPHY_RESET;
+		bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+		mdelay(1);
+
+		reg &= ~EXT_GPHY_RESET;
+	} else {
+		reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
+		bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+		mdelay(1);
+		reg |= EXT_CK25_DIS;
+	}
+	bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+	udelay(60);
+}
+
+static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+{
+	u32 reg;
+
+	/* Speed settings are set in bcmgenet_mii_setup() */
+	reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+	reg |= LED_ACT_SOURCE_MAC;
+	bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+	if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+		fixed_phy_set_link_update(priv->phydev,
+					  bcmgenet_fixed_phy_link_update);
+}
+
+int bcmgenet_mii_config(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = priv->phydev;
+	struct device *kdev = &priv->pdev->dev;
+	const char *phy_name = NULL;
+	u32 id_mode_dis = 0;
+	u32 port_ctrl;
+	u32 reg;
+
+	priv->ext_phy = !priv->internal_phy &&
+			(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
+	if (priv->internal_phy)
+		priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
+	switch (priv->phy_interface) {
+	case PHY_INTERFACE_MODE_NA:
+	case PHY_INTERFACE_MODE_MOCA:
+		/* Irrespective of the actually configured PHY speed (100 or
+		 * 1000) GENETv4 only has an internal GPHY so we will just end
+		 * up masking the Gigabit features from what we support, not
+		 * switching to the EPHY
+		 */
+		if (GENET_IS_V4(priv))
+			port_ctrl = PORT_MODE_INT_GPHY;
+		else
+			port_ctrl = PORT_MODE_INT_EPHY;
+
+		bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+		if (priv->internal_phy) {
+			phy_name = "internal PHY";
+		} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+			phy_name = "MoCA";
+			bcmgenet_moca_phy_setup(priv);
+		}
+		break;
+
+	case PHY_INTERFACE_MODE_MII:
+		phy_name = "external MII";
+		phydev->supported &= PHY_BASIC_FEATURES;
+		bcmgenet_sys_writel(priv,
+				    PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+		break;
+
+	case PHY_INTERFACE_MODE_REVMII:
+		phy_name = "external RvMII";
+		/* of_mdiobus_register took care of reading the 'max-speed'
+		 * PHY property for us, effectively limiting the PHY supported
+		 * capabilities, use that knowledge to also configure the
+		 * Reverse MII interface correctly.
+		 */
+		if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+				PHY_BASIC_FEATURES)
+			port_ctrl = PORT_MODE_EXT_RVMII_25;
+		else
+			port_ctrl = PORT_MODE_EXT_RVMII_50;
+		bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+		break;
+
+	case PHY_INTERFACE_MODE_RGMII:
+		/* RGMII_NO_ID: TXC transitions at the same time as TXD
+		 *		(requires PCB or receiver-side delay)
+		 * RGMII:	Add 2ns delay on TXC (90 degree shift)
+		 *
+		 * ID is implicitly disabled for 100Mbps (RG)MII operation.
+		 */
+		id_mode_dis = BIT(16);
+		/* fall through */
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		if (id_mode_dis)
+			phy_name = "external RGMII (no delay)";
+		else
+			phy_name = "external RGMII (TX delay)";
+		bcmgenet_sys_writel(priv,
+				    PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+		break;
+	default:
+		dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
+		return -EINVAL;
+	}
+
+	/* This is an external PHY (xMII), so we need to enable the RGMII
+	 * block for the interface to work
+	 */
+	if (priv->ext_phy) {
+		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+		reg |= RGMII_MODE_EN | id_mode_dis;
+		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+	}
+
+	dev_info_once(kdev, "configuring instance for %s\n", phy_name);
+
+	return 0;
+}
+
+int bcmgenet_mii_probe(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device_node *dn = priv->pdev->dev.of_node;
+	struct phy_device *phydev;
+	u32 phy_flags;
+	int ret;
+
+	/* Communicate the integrated PHY revision */
+	phy_flags = priv->gphy_rev;
+
+	/* Initialize link state variables that bcmgenet_mii_setup() uses */
+	priv->old_link = -1;
+	priv->old_speed = -1;
+	priv->old_duplex = -1;
+	priv->old_pause = -1;
+
+	if (dn) {
+		phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+					phy_flags, priv->phy_interface);
+		if (!phydev) {
+			pr_err("could not attach to PHY\n");
+			return -ENODEV;
+		}
+	} else {
+		phydev = priv->phydev;
+		phydev->dev_flags = phy_flags;
+
+		ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+					 priv->phy_interface);
+		if (ret) {
+			pr_err("could not attach to PHY\n");
+			return -ENODEV;
+		}
+	}
+
+	priv->phydev = phydev;
+
+	/* Configure port multiplexer based on what the probed PHY device since
+	 * reading the 'max-speed' property determines the maximum supported
+	 * PHY speed which is needed for bcmgenet_mii_config() to configure
+	 * things appropriately.
+	 */
+	ret = bcmgenet_mii_config(dev);
+	if (ret) {
+		phy_disconnect(priv->phydev);
+		return ret;
+	}
+
+	phydev->advertising = phydev->supported;
+
+	/* The internal PHY has its link interrupts routed to the
+	 * Ethernet MAC ISRs
+	 */
+	if (priv->internal_phy)
+		priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
+	else
+		priv->mii_bus->irq[phydev->addr] = PHY_POLL;
+
+	return 0;
+}
+
+/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
+ * their internal MDIO management controller making them fail to successfully
+ * be read from or written to for the first transaction.  We insert a dummy
+ * BMSR read here to make sure that phy_get_device() and get_phy_id() can
+ * correctly read the PHY MII_PHYSID1/2 registers and successfully register a
+ * PHY device for this peripheral.
+ *
+ * Once the PHY driver is registered, we can workaround subsequent reads from
+ * there (e.g: during system-wide power management).
+ *
+ * bus->reset is invoked before mdiobus_scan during mdiobus_register and is
+ * therefore the right location to stick that workaround. Since we do not want
+ * to read from non-existing PHYs, we either use bus->phy_mask or do a manual
+ * Device Tree scan to limit the search area.
+ */
+static int bcmgenet_mii_bus_reset(struct mii_bus *bus)
+{
+	struct net_device *dev = bus->priv;
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device_node *np = priv->mdio_dn;
+	struct device_node *child = NULL;
+	u32 read_mask = 0;
+	int addr = 0;
+
+	if (!np) {
+		read_mask = 1 << priv->phy_addr;
+	} else {
+		for_each_available_child_of_node(np, child) {
+			addr = of_mdio_parse_addr(&dev->dev, child);
+			if (addr < 0)
+				continue;
+
+			read_mask |= 1 << addr;
+		}
+	}
+
+	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+		if (read_mask & 1 << addr) {
+			dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr);
+			mdiobus_read(bus, addr, MII_BMSR);
+		}
+	}
+
+	return 0;
+}
+
+static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+{
+	struct mii_bus *bus;
+
+	if (priv->mii_bus)
+		return 0;
+
+	priv->mii_bus = mdiobus_alloc();
+	if (!priv->mii_bus) {
+		pr_err("failed to allocate\n");
+		return -ENOMEM;
+	}
+
+	bus = priv->mii_bus;
+	bus->priv = priv->dev;
+	bus->name = "bcmgenet MII bus";
+	bus->parent = &priv->pdev->dev;
+	bus->read = bcmgenet_mii_read;
+	bus->write = bcmgenet_mii_write;
+	bus->reset = bcmgenet_mii_bus_reset;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
+		 priv->pdev->name, priv->pdev->id);
+
+	bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+	if (!bus->irq) {
+		mdiobus_free(priv->mii_bus);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
+{
+	struct device_node *dn = priv->pdev->dev.of_node;
+	struct device *kdev = &priv->pdev->dev;
+	const char *phy_mode_str = NULL;
+	struct phy_device *phydev = NULL;
+	char *compat;
+	int phy_mode;
+	int ret;
+
+	compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+	if (!compat)
+		return -ENOMEM;
+
+	priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+	kfree(compat);
+	if (!priv->mdio_dn) {
+		dev_err(kdev, "unable to find MDIO bus node\n");
+		return -ENODEV;
+	}
+
+	ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn);
+	if (ret) {
+		dev_err(kdev, "failed to register MDIO bus\n");
+		return ret;
+	}
+
+	/* Fetch the PHY phandle */
+	priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+
+	/* In the case of a fixed PHY, the DT node associated
+	 * to the PHY is the Ethernet MAC DT node.
+	 */
+	if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+		ret = of_phy_register_fixed_link(dn);
+		if (ret)
+			return ret;
+
+		priv->phy_dn = of_node_get(dn);
+	}
+
+	/* Get the link mode */
+	phy_mode = of_get_phy_mode(dn);
+	priv->phy_interface = phy_mode;
+
+	/* We need to specifically look up whether this PHY interface is internal
+	 * or not *before* we even try to probe the PHY driver over MDIO as we
+	 * may have shut down the internal PHY for power saving purposes.
+	 */
+	if (phy_mode < 0) {
+		ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+		if (ret < 0) {
+			dev_err(kdev, "invalid PHY mode property\n");
+			return ret;
+		}
+
+		priv->phy_interface = PHY_INTERFACE_MODE_NA;
+		if (!strcasecmp(phy_mode_str, "internal"))
+			priv->internal_phy = true;
+	}
+
+	/* Make sure we initialize MoCA PHYs with a link down */
+	if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+		phydev = of_phy_find_device(dn);
+		if (phydev)
+			phydev->link = 0;
+	}
+
+	return 0;
+}
+
+static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+{
+	struct device *kdev = &priv->pdev->dev;
+	struct bcmgenet_platform_data *pd = kdev->platform_data;
+	struct mii_bus *mdio = priv->mii_bus;
+	struct phy_device *phydev;
+	int ret;
+
+	if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
+		/*
+		 * Internal or external PHY with MDIO access
+		 */
+		if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+			mdio->phy_mask = ~(1 << pd->phy_address);
+		else
+			mdio->phy_mask = 0;
+
+		ret = mdiobus_register(mdio);
+		if (ret) {
+			dev_err(kdev, "failed to register MDIO bus\n");
+			return ret;
+		}
+
+		if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+			phydev = mdio->phy_map[pd->phy_address];
+		else
+			phydev = phy_find_first(mdio);
+
+		if (!phydev) {
+			dev_err(kdev, "failed to register PHY device\n");
+			mdiobus_unregister(mdio);
+			return -ENODEV;
+		}
+	} else {
+		/*
+		 * MoCA port or no MDIO access.
+		 * Use fixed PHY to represent the link layer.
+		 */
+		struct fixed_phy_status fphy_status = {
+			.link = 1,
+			.speed = pd->phy_speed,
+			.duplex = pd->phy_duplex,
+			.pause = 0,
+			.asym_pause = 0,
+		};
+
+		phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
+		if (!phydev || IS_ERR(phydev)) {
+			dev_err(kdev, "failed to register fixed PHY device\n");
+			return -ENODEV;
+		}
+
+		/* Make sure we initialize MoCA PHYs with a link down */
+		phydev->link = 0;
+
+	}
+
+	priv->phydev = phydev;
+	priv->phy_interface = pd->phy_interface;
+
+	return 0;
+}
+
+static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
+{
+	struct device_node *dn = priv->pdev->dev.of_node;
+
+	if (dn)
+		return bcmgenet_mii_of_init(priv);
+	else
+		return bcmgenet_mii_pd_init(priv);
+}
+
+int bcmgenet_mii_init(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+	int ret;
+
+	ret = bcmgenet_mii_alloc(priv);
+	if (ret)
+		return ret;
+
+	ret = bcmgenet_mii_bus_init(priv);
+	if (ret)
+		goto out;
+
+	return 0;
+
+out:
+	of_node_put(priv->phy_dn);
+	mdiobus_unregister(priv->mii_bus);
+	kfree(priv->mii_bus->irq);
+	mdiobus_free(priv->mii_bus);
+	return ret;
+}
+
+void bcmgenet_mii_exit(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	of_node_put(priv->phy_dn);
+	mdiobus_unregister(priv->mii_bus);
+	kfree(priv->mii_bus->irq);
+	mdiobus_free(priv->mii_bus);
+}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
new file mode 100644
index 0000000..f557a2a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -0,0 +1,2661 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
+ * Copyright (c) 2006, 2007  Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This driver is designed for the Broadcom SiByte SOC built-in
+ * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
+ *
+ * Updated to the driver model and the PHY abstraction layer
+ * by Maciej W. Rozycki.
+ */
+
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/processor.h>	/* Processor type for cache alignment. */
+
+/* Operational parameters that usually are not changed. */
+
+#define CONFIG_SBMAC_COALESCE
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (2*HZ)
+
+
+MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
+MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
+
+/* A few user-configurable values which may be modified when a driver
+   module is loaded. */
+
+/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int debug = 1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug messages");
+
+#ifdef CONFIG_SBMAC_COALESCE
+static int int_pktcnt_tx = 255;
+module_param(int_pktcnt_tx, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
+
+static int int_timeout_tx = 255;
+module_param(int_timeout_tx, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
+
+static int int_pktcnt_rx = 64;
+module_param(int_pktcnt_rx, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
+
+static int int_timeout_rx = 64;
+module_param(int_timeout_rx, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
+#endif
+
+#include <asm/sibyte/board.h>
+#include <asm/sibyte/sb1250.h>
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#define R_MAC_DMA_OODPKTLOST_RX	R_MAC_DMA_OODPKTLOST
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#else
+#error invalid SiByte MAC configuration
+#endif
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/sb1250_mac.h>
+#include <asm/sibyte/sb1250_dma.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#define UNIT_INT(n)		(K_BCM1480_INT_MAC_0 + ((n) * 2))
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#define UNIT_INT(n)		(K_INT_MAC_0 + (n))
+#else
+#error invalid SiByte MAC configuration
+#endif
+
+#ifdef K_INT_PHY
+#define SBMAC_PHY_INT			K_INT_PHY
+#else
+#define SBMAC_PHY_INT			PHY_POLL
+#endif
+
+/**********************************************************************
+ *  Simple types
+ ********************************************************************* */
+
+enum sbmac_speed {
+	sbmac_speed_none = 0,
+	sbmac_speed_10 = SPEED_10,
+	sbmac_speed_100 = SPEED_100,
+	sbmac_speed_1000 = SPEED_1000,
+};
+
+enum sbmac_duplex {
+	sbmac_duplex_none = -1,
+	sbmac_duplex_half = DUPLEX_HALF,
+	sbmac_duplex_full = DUPLEX_FULL,
+};
+
+enum sbmac_fc {
+	sbmac_fc_none,
+	sbmac_fc_disabled,
+	sbmac_fc_frame,
+	sbmac_fc_collision,
+	sbmac_fc_carrier,
+};
+
+enum sbmac_state {
+	sbmac_state_uninit,
+	sbmac_state_off,
+	sbmac_state_on,
+	sbmac_state_broken,
+};
+
+
+/**********************************************************************
+ *  Macros
+ ********************************************************************* */
+
+
+#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
+			  (d)->sbdma_dscrtable : (d)->f+1)
+
+
+#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
+
+#define SBMAC_MAX_TXDESCR	256
+#define SBMAC_MAX_RXDESCR	256
+
+#define ENET_PACKET_SIZE	1518
+/*#define ENET_PACKET_SIZE	9216 */
+
+/**********************************************************************
+ *  DMA Descriptor structure
+ ********************************************************************* */
+
+struct sbdmadscr {
+	uint64_t  dscr_a;
+	uint64_t  dscr_b;
+};
+
+/**********************************************************************
+ *  DMA Controller structure
+ ********************************************************************* */
+
+struct sbmacdma {
+
+	/*
+	 * This stuff is used to identify the channel and the registers
+	 * associated with it.
+	 */
+	struct sbmac_softc	*sbdma_eth;	/* back pointer to associated
+						   MAC */
+	int			sbdma_channel;	/* channel number */
+	int			sbdma_txdir;	/* direction (1=transmit) */
+	int			sbdma_maxdescr;	/* total # of descriptors
+						   in ring */
+#ifdef CONFIG_SBMAC_COALESCE
+	int			sbdma_int_pktcnt;
+						/* # descriptors rx/tx
+						   before interrupt */
+	int			sbdma_int_timeout;
+						/* # usec rx/tx interrupt */
+#endif
+	void __iomem		*sbdma_config0;	/* DMA config register 0 */
+	void __iomem		*sbdma_config1;	/* DMA config register 1 */
+	void __iomem		*sbdma_dscrbase;
+						/* descriptor base address */
+	void __iomem		*sbdma_dscrcnt;	/* descriptor count register */
+	void __iomem		*sbdma_curdscr;	/* current descriptor
+						   address */
+	void __iomem		*sbdma_oodpktlost;
+						/* pkt drop (rx only) */
+
+	/*
+	 * This stuff is for maintenance of the ring
+	 */
+	void			*sbdma_dscrtable_unaligned;
+	struct sbdmadscr	*sbdma_dscrtable;
+						/* base of descriptor table */
+	struct sbdmadscr	*sbdma_dscrtable_end;
+						/* end of descriptor table */
+	struct sk_buff		**sbdma_ctxtable;
+						/* context table, one
+						   per descr */
+	dma_addr_t		sbdma_dscrtable_phys;
+						/* and also the phys addr */
+	struct sbdmadscr	*sbdma_addptr;	/* next dscr for sw to add */
+	struct sbdmadscr	*sbdma_remptr;	/* next dscr for sw
+						   to remove */
+};
+
+
+/**********************************************************************
+ *  Ethernet softc structure
+ ********************************************************************* */
+
+struct sbmac_softc {
+
+	/*
+	 * Linux-specific things
+	 */
+	struct net_device	*sbm_dev;	/* pointer to linux device */
+	struct napi_struct	napi;
+	struct phy_device	*phy_dev;	/* the associated PHY device */
+	struct mii_bus		*mii_bus;	/* the MII bus */
+	int			phy_irq[PHY_MAX_ADDR];
+	spinlock_t		sbm_lock;	/* spin lock */
+	int			sbm_devflags;	/* current device flags */
+
+	/*
+	 * Controller-specific things
+	 */
+	void __iomem		*sbm_base;	/* MAC's base address */
+	enum sbmac_state	sbm_state;	/* current state */
+
+	void __iomem		*sbm_macenable;	/* MAC Enable Register */
+	void __iomem		*sbm_maccfg;	/* MAC Config Register */
+	void __iomem		*sbm_fifocfg;	/* FIFO Config Register */
+	void __iomem		*sbm_framecfg;	/* Frame Config Register */
+	void __iomem		*sbm_rxfilter;	/* Receive Filter Register */
+	void __iomem		*sbm_isr;	/* Interrupt Status Register */
+	void __iomem		*sbm_imr;	/* Interrupt Mask Register */
+	void __iomem		*sbm_mdio;	/* MDIO Register */
+
+	enum sbmac_speed	sbm_speed;	/* current speed */
+	enum sbmac_duplex	sbm_duplex;	/* current duplex */
+	enum sbmac_fc		sbm_fc;		/* cur. flow control setting */
+	int			sbm_pause;	/* current pause setting */
+	int			sbm_link;	/* current link state */
+
+	unsigned char		sbm_hwaddr[ETH_ALEN];
+
+	struct sbmacdma		sbm_txdma;	/* only channel 0 for now */
+	struct sbmacdma		sbm_rxdma;
+	int			rx_hw_checksum;
+	int			sbe_idx;
+};
+
+
+/**********************************************************************
+ *  Externs
+ ********************************************************************* */
+
+/**********************************************************************
+ *  Prototypes
+ ********************************************************************* */
+
+static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
+			  int txrx, int maxdescr);
+static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
+static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+			       struct sk_buff *m);
+static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
+static void sbdma_emptyring(struct sbmacdma *d);
+static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
+static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			    int work_to_do, int poll);
+static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			     int poll);
+static int sbmac_initctx(struct sbmac_softc *s);
+static void sbmac_channel_start(struct sbmac_softc *s);
+static void sbmac_channel_stop(struct sbmac_softc *s);
+static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
+						enum sbmac_state);
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
+static uint64_t sbmac_addr2reg(unsigned char *ptr);
+static irqreturn_t sbmac_intr(int irq, void *dev_instance);
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
+static void sbmac_setmulti(struct sbmac_softc *sc);
+static int sbmac_init(struct platform_device *pldev, long long base);
+static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
+static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
+			    enum sbmac_fc fc);
+
+static int sbmac_open(struct net_device *dev);
+static void sbmac_tx_timeout (struct net_device *dev);
+static void sbmac_set_rx_mode(struct net_device *dev);
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int sbmac_close(struct net_device *dev);
+static int sbmac_poll(struct napi_struct *napi, int budget);
+
+static void sbmac_mii_poll(struct net_device *dev);
+static int sbmac_mii_probe(struct net_device *dev);
+
+static void sbmac_mii_sync(void __iomem *sbm_mdio);
+static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
+			       int bitcnt);
+static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx);
+static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+			   u16 val);
+
+
+/**********************************************************************
+ *  Globals
+ ********************************************************************* */
+
+static char sbmac_string[] = "sb1250-mac";
+
+static char sbmac_mdio_string[] = "sb1250-mac-mdio";
+
+
+/**********************************************************************
+ *  MDIO constants
+ ********************************************************************* */
+
+#define	MII_COMMAND_START	0x01
+#define	MII_COMMAND_READ	0x02
+#define	MII_COMMAND_WRITE	0x01
+#define	MII_COMMAND_ACK		0x02
+
+#define M_MAC_MDIO_DIR_OUTPUT	0		/* for clarity */
+
+#define ENABLE 		1
+#define DISABLE		0
+
+/**********************************************************************
+ *  SBMAC_MII_SYNC(sbm_mdio)
+ *
+ *  Synchronize with the MII - send a pattern of bits to the MII
+ *  that will guarantee that it is ready to accept a command.
+ *
+ *  Input parameters:
+ *  	   sbm_mdio - address of the MAC's MDIO register
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_mii_sync(void __iomem *sbm_mdio)
+{
+	int cnt;
+	uint64_t bits;
+	int mac_mdio_genc;
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
+
+	__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+
+	for (cnt = 0; cnt < 32; cnt++) {
+		__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
+		__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+	}
+}
+
+/**********************************************************************
+ *  SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt)
+ *
+ *  Send some bits to the MII.  The bits to be sent are right-
+ *  justified in the 'data' parameter.
+ *
+ *  Input parameters:
+ *  	   sbm_mdio - address of the MAC's MDIO register
+ *  	   data     - data to send
+ *  	   bitcnt   - number of bits to send
+ ********************************************************************* */
+
+static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
+			       int bitcnt)
+{
+	int i;
+	uint64_t bits;
+	unsigned int curmask;
+	int mac_mdio_genc;
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	bits = M_MAC_MDIO_DIR_OUTPUT;
+	__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+
+	curmask = 1 << (bitcnt - 1);
+
+	for (i = 0; i < bitcnt; i++) {
+		if (data & curmask)
+			bits |= M_MAC_MDIO_OUT;
+		else bits &= ~M_MAC_MDIO_OUT;
+		__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+		__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
+		__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+		curmask >>= 1;
+	}
+}
+
+
+
+/**********************************************************************
+ *  SBMAC_MII_READ(bus, phyaddr, regidx)
+ *  Read a PHY register.
+ *
+ *  Input parameters:
+ *  	   bus     - MDIO bus handle
+ *  	   phyaddr - PHY's address
+ *  	   regnum  - index of register to read
+ *
+ *  Return value:
+ *  	   value read, or 0xffff if an error occurred.
+ ********************************************************************* */
+
+static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
+{
+	struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
+	void __iomem *sbm_mdio = sc->sbm_mdio;
+	int idx;
+	int error;
+	int regval;
+	int mac_mdio_genc;
+
+	/*
+	 * Synchronize ourselves so that the PHY knows the next
+	 * thing coming down is a command
+	 */
+	sbmac_mii_sync(sbm_mdio);
+
+	/*
+	 * Send the data to the PHY.  The sequence is
+	 * a "start" command (2 bits)
+	 * a "read" command (2 bits)
+	 * the PHY addr (5 bits)
+	 * the register index (5 bits)
+	 */
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
+	sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
+	sbmac_mii_senddata(sbm_mdio, regidx, 5);
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	/*
+	 * Switch the port around without a clock transition.
+	 */
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+	/*
+	 * Send out a clock pulse to signal we want the status
+	 */
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+		     sbm_mdio);
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+	/*
+	 * If an error occurred, the PHY will signal '1' back
+	 */
+	error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
+
+	/*
+	 * Issue an 'idle' clock pulse, but keep the direction
+	 * the same.
+	 */
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+		     sbm_mdio);
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+	regval = 0;
+
+	for (idx = 0; idx < 16; idx++) {
+		regval <<= 1;
+
+		if (error == 0) {
+			if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
+				regval |= 1;
+		}
+
+		__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+			     sbm_mdio);
+		__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+	}
+
+	/* Switch back to output */
+	__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
+
+	if (error == 0)
+		return regval;
+	return 0xffff;
+}
+
+
+/**********************************************************************
+ *  SBMAC_MII_WRITE(bus, phyaddr, regidx, regval)
+ *
+ *  Write a value to a PHY register.
+ *
+ *  Input parameters:
+ *  	   bus     - MDIO bus handle
+ *  	   phyaddr - PHY to use
+ *  	   regidx  - register within the PHY
+ *  	   regval  - data to write to register
+ *
+ *  Return value:
+ *  	   0 for success
+ ********************************************************************* */
+
+static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+			   u16 regval)
+{
+	struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
+	void __iomem *sbm_mdio = sc->sbm_mdio;
+	int mac_mdio_genc;
+
+	sbmac_mii_sync(sbm_mdio);
+
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2);
+	sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
+	sbmac_mii_senddata(sbm_mdio, regidx, 5);
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2);
+	sbmac_mii_senddata(sbm_mdio, regval, 16);
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
+
+	return 0;
+}
+
+
+
+/**********************************************************************
+ *  SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
+ *
+ *  Initialize a DMA channel context.  Since there are potentially
+ *  eight DMA channels per MAC, it's nice to do this in a standard
+ *  way.
+ *
+ *  Input parameters:
+ *  	   d - struct sbmacdma (DMA channel context)
+ *  	   s - struct sbmac_softc (pointer to a MAC)
+ *  	   chan - channel number (0..1 right now)
+ *  	   txrx - Identifies DMA_TX or DMA_RX for channel direction
+ *      maxdescr - number of descriptors
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
+			  int txrx, int maxdescr)
+{
+#ifdef CONFIG_SBMAC_COALESCE
+	int int_pktcnt, int_timeout;
+#endif
+
+	/*
+	 * Save away interesting stuff in the structure
+	 */
+
+	d->sbdma_eth       = s;
+	d->sbdma_channel   = chan;
+	d->sbdma_txdir     = txrx;
+
+#if 0
+	/* RMON clearing */
+	s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
+#endif
+
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR);
+
+	/*
+	 * initialize register pointers
+	 */
+
+	d->sbdma_config0 =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
+	d->sbdma_config1 =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
+	d->sbdma_dscrbase =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
+	d->sbdma_dscrcnt =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
+	d->sbdma_curdscr =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
+	if (d->sbdma_txdir)
+		d->sbdma_oodpktlost = NULL;
+	else
+		d->sbdma_oodpktlost =
+			s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
+
+	/*
+	 * Allocate memory for the ring
+	 */
+
+	d->sbdma_maxdescr = maxdescr;
+
+	d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1,
+					       sizeof(*d->sbdma_dscrtable),
+					       GFP_KERNEL);
+
+	/*
+	 * The descriptor table must be aligned to at least 16 bytes or the
+	 * MAC will corrupt it.
+	 */
+	d->sbdma_dscrtable = (struct sbdmadscr *)
+			     ALIGN((unsigned long)d->sbdma_dscrtable_unaligned,
+				   sizeof(*d->sbdma_dscrtable));
+
+	d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
+
+	d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
+
+	/*
+	 * And context table
+	 */
+
+	d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr,
+				    sizeof(*d->sbdma_ctxtable), GFP_KERNEL);
+
+#ifdef CONFIG_SBMAC_COALESCE
+	/*
+	 * Setup Rx/Tx DMA coalescing defaults
+	 */
+
+	int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
+	if ( int_pktcnt ) {
+		d->sbdma_int_pktcnt = int_pktcnt;
+	} else {
+		d->sbdma_int_pktcnt = 1;
+	}
+
+	int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
+	if ( int_timeout ) {
+		d->sbdma_int_timeout = int_timeout;
+	} else {
+		d->sbdma_int_timeout = 0;
+	}
+#endif
+
+}
+
+/**********************************************************************
+ *  SBDMA_CHANNEL_START(d)
+ *
+ *  Initialize the hardware registers for a DMA channel.
+ *
+ *  Input parameters:
+ *  	   d - DMA channel to init (context must be previously init'd
+ *         rxtx - DMA_RX or DMA_TX depending on what type of channel
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_channel_start(struct sbmacdma *d, int rxtx)
+{
+	/*
+	 * Turn on the DMA channel
+	 */
+
+#ifdef CONFIG_SBMAC_COALESCE
+	__raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
+		       0, d->sbdma_config1);
+	__raw_writeq(M_DMA_EOP_INT_EN |
+		       V_DMA_RINGSZ(d->sbdma_maxdescr) |
+		       V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
+		       0, d->sbdma_config0);
+#else
+	__raw_writeq(0, d->sbdma_config1);
+	__raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
+		       0, d->sbdma_config0);
+#endif
+
+	__raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
+
+	/*
+	 * Initialize ring pointers
+	 */
+
+	d->sbdma_addptr = d->sbdma_dscrtable;
+	d->sbdma_remptr = d->sbdma_dscrtable;
+}
+
+/**********************************************************************
+ *  SBDMA_CHANNEL_STOP(d)
+ *
+ *  Initialize the hardware registers for a DMA channel.
+ *
+ *  Input parameters:
+ *  	   d - DMA channel to init (context must be previously init'd
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_channel_stop(struct sbmacdma *d)
+{
+	/*
+	 * Turn off the DMA channel
+	 */
+
+	__raw_writeq(0, d->sbdma_config1);
+
+	__raw_writeq(0, d->sbdma_dscrbase);
+
+	__raw_writeq(0, d->sbdma_config0);
+
+	/*
+	 * Zero ring pointers
+	 */
+
+	d->sbdma_addptr = NULL;
+	d->sbdma_remptr = NULL;
+}
+
+static inline void sbdma_align_skb(struct sk_buff *skb,
+				   unsigned int power2, unsigned int offset)
+{
+	unsigned char *addr = skb->data;
+	unsigned char *newaddr = PTR_ALIGN(addr, power2);
+
+	skb_reserve(skb, newaddr - addr + offset);
+}
+
+
+/**********************************************************************
+ *  SBDMA_ADD_RCVBUFFER(d,sb)
+ *
+ *  Add a buffer to the specified DMA channel.   For receive channels,
+ *  this queues a buffer for inbound packets.
+ *
+ *  Input parameters:
+ *	   sc - softc structure
+ *  	    d - DMA channel descriptor
+ * 	   sb - sk_buff to add, or NULL if we should allocate one
+ *
+ *  Return value:
+ *  	   0 if buffer could not be added (ring is full)
+ *  	   1 if buffer added successfully
+ ********************************************************************* */
+
+
+static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+			       struct sk_buff *sb)
+{
+	struct net_device *dev = sc->sbm_dev;
+	struct sbdmadscr *dsc;
+	struct sbdmadscr *nextdsc;
+	struct sk_buff *sb_new = NULL;
+	int pktsize = ENET_PACKET_SIZE;
+
+	/* get pointer to our current place in the ring */
+
+	dsc = d->sbdma_addptr;
+	nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+	/*
+	 * figure out if the ring is full - if the next descriptor
+	 * is the same as the one that we're going to remove from
+	 * the ring, the ring is full
+	 */
+
+	if (nextdsc == d->sbdma_remptr) {
+		return -ENOSPC;
+	}
+
+	/*
+	 * Allocate a sk_buff if we don't already have one.
+	 * If we do have an sk_buff, reset it so that it's empty.
+	 *
+	 * Note: sk_buffs don't seem to be guaranteed to have any sort
+	 * of alignment when they are allocated.  Therefore, allocate enough
+	 * extra space to make sure that:
+	 *
+	 *    1. the data does not start in the middle of a cache line.
+	 *    2. The data does not end in the middle of a cache line
+	 *    3. The buffer can be aligned such that the IP addresses are
+	 *       naturally aligned.
+	 *
+	 *  Remember, the SOCs MAC writes whole cache lines at a time,
+	 *  without reading the old contents first.  So, if the sk_buff's
+	 *  data portion starts in the middle of a cache line, the SOC
+	 *  DMA will trash the beginning (and ending) portions.
+	 */
+
+	if (sb == NULL) {
+		sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
+					       SMP_CACHE_BYTES * 2 +
+					       NET_IP_ALIGN);
+		if (sb_new == NULL)
+			return -ENOBUFS;
+
+		sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
+	}
+	else {
+		sb_new = sb;
+		/*
+		 * nothing special to reinit buffer, it's already aligned
+		 * and sb->data already points to a good place.
+		 */
+	}
+
+	/*
+	 * fill in the descriptor
+	 */
+
+#ifdef CONFIG_SBMAC_COALESCE
+	/*
+	 * Do not interrupt per DMA transfer.
+	 */
+	dsc->dscr_a = virt_to_phys(sb_new->data) |
+		V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
+#else
+	dsc->dscr_a = virt_to_phys(sb_new->data) |
+		V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
+		M_DMA_DSCRA_INTERRUPT;
+#endif
+
+	/* receiving: no options */
+	dsc->dscr_b = 0;
+
+	/*
+	 * fill in the context
+	 */
+
+	d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
+
+	/*
+	 * point at next packet
+	 */
+
+	d->sbdma_addptr = nextdsc;
+
+	/*
+	 * Give the buffer to the DMA engine.
+	 */
+
+	__raw_writeq(1, d->sbdma_dscrcnt);
+
+	return 0;					/* we did it */
+}
+
+/**********************************************************************
+ *  SBDMA_ADD_TXBUFFER(d,sb)
+ *
+ *  Add a transmit buffer to the specified DMA channel, causing a
+ *  transmit to start.
+ *
+ *  Input parameters:
+ *  	   d - DMA channel descriptor
+ * 	   sb - sk_buff to add
+ *
+ *  Return value:
+ *  	   0 transmit queued successfully
+ *  	   otherwise error code
+ ********************************************************************* */
+
+
+static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb)
+{
+	struct sbdmadscr *dsc;
+	struct sbdmadscr *nextdsc;
+	uint64_t phys;
+	uint64_t ncb;
+	int length;
+
+	/* get pointer to our current place in the ring */
+
+	dsc = d->sbdma_addptr;
+	nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+	/*
+	 * figure out if the ring is full - if the next descriptor
+	 * is the same as the one that we're going to remove from
+	 * the ring, the ring is full
+	 */
+
+	if (nextdsc == d->sbdma_remptr) {
+		return -ENOSPC;
+	}
+
+	/*
+	 * Under Linux, it's not necessary to copy/coalesce buffers
+	 * like it is on NetBSD.  We think they're all contiguous,
+	 * but that may not be true for GBE.
+	 */
+
+	length = sb->len;
+
+	/*
+	 * fill in the descriptor.  Note that the number of cache
+	 * blocks in the descriptor is the number of blocks
+	 * *spanned*, so we need to add in the offset (if any)
+	 * while doing the calculation.
+	 */
+
+	phys = virt_to_phys(sb->data);
+	ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
+
+	dsc->dscr_a = phys |
+		V_DMA_DSCRA_A_SIZE(ncb) |
+#ifndef CONFIG_SBMAC_COALESCE
+		M_DMA_DSCRA_INTERRUPT |
+#endif
+		M_DMA_ETHTX_SOP;
+
+	/* transmitting: set outbound options and length */
+
+	dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
+		V_DMA_DSCRB_PKT_SIZE(length);
+
+	/*
+	 * fill in the context
+	 */
+
+	d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
+
+	/*
+	 * point at next packet
+	 */
+
+	d->sbdma_addptr = nextdsc;
+
+	/*
+	 * Give the buffer to the DMA engine.
+	 */
+
+	__raw_writeq(1, d->sbdma_dscrcnt);
+
+	return 0;					/* we did it */
+}
+
+
+
+
+/**********************************************************************
+ *  SBDMA_EMPTYRING(d)
+ *
+ *  Free all allocated sk_buffs on the specified DMA channel;
+ *
+ *  Input parameters:
+ *  	   d  - DMA channel
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_emptyring(struct sbmacdma *d)
+{
+	int idx;
+	struct sk_buff *sb;
+
+	for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
+		sb = d->sbdma_ctxtable[idx];
+		if (sb) {
+			dev_kfree_skb(sb);
+			d->sbdma_ctxtable[idx] = NULL;
+		}
+	}
+}
+
+
+/**********************************************************************
+ *  SBDMA_FILLRING(d)
+ *
+ *  Fill the specified DMA channel (must be receive channel)
+ *  with sk_buffs
+ *
+ *  Input parameters:
+ *	   sc - softc structure
+ *  	    d - DMA channel
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
+{
+	int idx;
+
+	for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
+		if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
+			break;
+	}
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sbmac_netpoll(struct net_device *netdev)
+{
+	struct sbmac_softc *sc = netdev_priv(netdev);
+	int irq = sc->sbm_dev->irq;
+
+	__raw_writeq(0, sc->sbm_imr);
+
+	sbmac_intr(irq, netdev);
+
+#ifdef CONFIG_SBMAC_COALESCE
+	__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+	((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
+	sc->sbm_imr);
+#else
+	__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+	(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
+#endif
+}
+#endif
+
+/**********************************************************************
+ *  SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
+ *
+ *  Process "completed" receive buffers on the specified DMA channel.
+ *
+ *  Input parameters:
+ *            sc - softc structure
+ *  	       d - DMA channel context
+ *    work_to_do - no. of packets to process before enabling interrupt
+ *                 again (for NAPI)
+ *          poll - 1: using polling (for NAPI)
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			    int work_to_do, int poll)
+{
+	struct net_device *dev = sc->sbm_dev;
+	int curidx;
+	int hwidx;
+	struct sbdmadscr *dsc;
+	struct sk_buff *sb;
+	int len;
+	int work_done = 0;
+	int dropped = 0;
+
+	prefetch(d);
+
+again:
+	/* Check if the HW dropped any frames */
+	dev->stats.rx_fifo_errors
+	    += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
+	__raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
+
+	while (work_to_do-- > 0) {
+		/*
+		 * figure out where we are (as an index) and where
+		 * the hardware is (also as an index)
+		 *
+		 * This could be done faster if (for example) the
+		 * descriptor table was page-aligned and contiguous in
+		 * both virtual and physical memory -- you could then
+		 * just compare the low-order bits of the virtual address
+		 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+		 */
+
+		dsc = d->sbdma_remptr;
+		curidx = dsc - d->sbdma_dscrtable;
+
+		prefetch(dsc);
+		prefetch(&d->sbdma_ctxtable[curidx]);
+
+		hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+			 d->sbdma_dscrtable_phys) /
+			sizeof(*d->sbdma_dscrtable);
+
+		/*
+		 * If they're the same, that means we've processed all
+		 * of the descriptors up to (but not including) the one that
+		 * the hardware is working on right now.
+		 */
+
+		if (curidx == hwidx)
+			goto done;
+
+		/*
+		 * Otherwise, get the packet's sk_buff ptr back
+		 */
+
+		sb = d->sbdma_ctxtable[curidx];
+		d->sbdma_ctxtable[curidx] = NULL;
+
+		len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
+
+		/*
+		 * Check packet status.  If good, process it.
+		 * If not, silently drop it and put it back on the
+		 * receive ring.
+		 */
+
+		if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
+
+			/*
+			 * Add a new buffer to replace the old one.  If we fail
+			 * to allocate a buffer, we're going to drop this
+			 * packet and put it right back on the receive ring.
+			 */
+
+			if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
+				     -ENOBUFS)) {
+				dev->stats.rx_dropped++;
+				/* Re-add old buffer */
+				sbdma_add_rcvbuffer(sc, d, sb);
+				/* No point in continuing at the moment */
+				printk(KERN_ERR "dropped packet (1)\n");
+				d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+				goto done;
+			} else {
+				/*
+				 * Set length into the packet
+				 */
+				skb_put(sb,len);
+
+				/*
+				 * Buffer has been replaced on the
+				 * receive ring.  Pass the buffer to
+				 * the kernel
+				 */
+				sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
+				/* Check hw IPv4/TCP checksum if supported */
+				if (sc->rx_hw_checksum == ENABLE) {
+					if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
+					    !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
+						sb->ip_summed = CHECKSUM_UNNECESSARY;
+						/* don't need to set sb->csum */
+					} else {
+						skb_checksum_none_assert(sb);
+					}
+				}
+				prefetch(sb->data);
+				prefetch((const void *)(((char *)sb->data)+32));
+				if (poll)
+					dropped = netif_receive_skb(sb);
+				else
+					dropped = netif_rx(sb);
+
+				if (dropped == NET_RX_DROP) {
+					dev->stats.rx_dropped++;
+					d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+					goto done;
+				}
+				else {
+					dev->stats.rx_bytes += len;
+					dev->stats.rx_packets++;
+				}
+			}
+		} else {
+			/*
+			 * Packet was mangled somehow.  Just drop it and
+			 * put it back on the receive ring.
+			 */
+			dev->stats.rx_errors++;
+			sbdma_add_rcvbuffer(sc, d, sb);
+		}
+
+
+		/*
+		 * .. and advance to the next buffer.
+		 */
+
+		d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+		work_done++;
+	}
+	if (!poll) {
+		work_to_do = 32;
+		goto again; /* collect fifo drop statistics again */
+	}
+done:
+	return work_done;
+}
+
+/**********************************************************************
+ *  SBDMA_TX_PROCESS(sc,d)
+ *
+ *  Process "completed" transmit buffers on the specified DMA channel.
+ *  This is normally called within the interrupt service routine.
+ *  Note that this isn't really ideal for priority channels, since
+ *  it processes all of the packets on a given channel before
+ *  returning.
+ *
+ *  Input parameters:
+ *      sc - softc structure
+ *  	 d - DMA channel context
+ *    poll - 1: using polling (for NAPI)
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			     int poll)
+{
+	struct net_device *dev = sc->sbm_dev;
+	int curidx;
+	int hwidx;
+	struct sbdmadscr *dsc;
+	struct sk_buff *sb;
+	unsigned long flags;
+	int packets_handled = 0;
+
+	spin_lock_irqsave(&(sc->sbm_lock), flags);
+
+	if (d->sbdma_remptr == d->sbdma_addptr)
+	  goto end_unlock;
+
+	hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+		 d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable);
+
+	for (;;) {
+		/*
+		 * figure out where we are (as an index) and where
+		 * the hardware is (also as an index)
+		 *
+		 * This could be done faster if (for example) the
+		 * descriptor table was page-aligned and contiguous in
+		 * both virtual and physical memory -- you could then
+		 * just compare the low-order bits of the virtual address
+		 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+		 */
+
+		curidx = d->sbdma_remptr - d->sbdma_dscrtable;
+
+		/*
+		 * If they're the same, that means we've processed all
+		 * of the descriptors up to (but not including) the one that
+		 * the hardware is working on right now.
+		 */
+
+		if (curidx == hwidx)
+			break;
+
+		/*
+		 * Otherwise, get the packet's sk_buff ptr back
+		 */
+
+		dsc = &(d->sbdma_dscrtable[curidx]);
+		sb = d->sbdma_ctxtable[curidx];
+		d->sbdma_ctxtable[curidx] = NULL;
+
+		/*
+		 * Stats
+		 */
+
+		dev->stats.tx_bytes += sb->len;
+		dev->stats.tx_packets++;
+
+		/*
+		 * for transmits, we just free buffers.
+		 */
+
+		dev_kfree_skb_irq(sb);
+
+		/*
+		 * .. and advance to the next buffer.
+		 */
+
+		d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+
+		packets_handled++;
+
+	}
+
+	/*
+	 * Decide if we should wake up the protocol or not.
+	 * Other drivers seem to do this when we reach a low
+	 * watermark on the transmit queue.
+	 */
+
+	if (packets_handled)
+		netif_wake_queue(d->sbdma_eth->sbm_dev);
+
+end_unlock:
+	spin_unlock_irqrestore(&(sc->sbm_lock), flags);
+
+}
+
+
+
+/**********************************************************************
+ *  SBMAC_INITCTX(s)
+ *
+ *  Initialize an Ethernet context structure - this is called
+ *  once per MAC on the 1250.  Memory is allocated here, so don't
+ *  call it again from inside the ioctl routines that bring the
+ *  interface up/down
+ *
+ *  Input parameters:
+ *  	   s - sbmac context structure
+ *
+ *  Return value:
+ *  	   0
+ ********************************************************************* */
+
+static int sbmac_initctx(struct sbmac_softc *s)
+{
+
+	/*
+	 * figure out the addresses of some ports
+	 */
+
+	s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
+	s->sbm_maccfg    = s->sbm_base + R_MAC_CFG;
+	s->sbm_fifocfg   = s->sbm_base + R_MAC_THRSH_CFG;
+	s->sbm_framecfg  = s->sbm_base + R_MAC_FRAMECFG;
+	s->sbm_rxfilter  = s->sbm_base + R_MAC_ADFILTER_CFG;
+	s->sbm_isr       = s->sbm_base + R_MAC_STATUS;
+	s->sbm_imr       = s->sbm_base + R_MAC_INT_MASK;
+	s->sbm_mdio      = s->sbm_base + R_MAC_MDIO;
+
+	/*
+	 * Initialize the DMA channels.  Right now, only one per MAC is used
+	 * Note: Only do this _once_, as it allocates memory from the kernel!
+	 */
+
+	sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
+	sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
+
+	/*
+	 * initial state is OFF
+	 */
+
+	s->sbm_state = sbmac_state_off;
+
+	return 0;
+}
+
+
+static void sbdma_uninitctx(struct sbmacdma *d)
+{
+	if (d->sbdma_dscrtable_unaligned) {
+		kfree(d->sbdma_dscrtable_unaligned);
+		d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
+	}
+
+	if (d->sbdma_ctxtable) {
+		kfree(d->sbdma_ctxtable);
+		d->sbdma_ctxtable = NULL;
+	}
+}
+
+
+static void sbmac_uninitctx(struct sbmac_softc *sc)
+{
+	sbdma_uninitctx(&(sc->sbm_txdma));
+	sbdma_uninitctx(&(sc->sbm_rxdma));
+}
+
+
+/**********************************************************************
+ *  SBMAC_CHANNEL_START(s)
+ *
+ *  Start packet processing on this MAC.
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_channel_start(struct sbmac_softc *s)
+{
+	uint64_t reg;
+	void __iomem *port;
+	uint64_t cfg,fifo,framecfg;
+	int idx, th_value;
+
+	/*
+	 * Don't do this if running
+	 */
+
+	if (s->sbm_state == sbmac_state_on)
+		return;
+
+	/*
+	 * Bring the controller out of reset, but leave it off.
+	 */
+
+	__raw_writeq(0, s->sbm_macenable);
+
+	/*
+	 * Ignore all received packets
+	 */
+
+	__raw_writeq(0, s->sbm_rxfilter);
+
+	/*
+	 * Calculate values for various control registers.
+	 */
+
+	cfg = M_MAC_RETRY_EN |
+		M_MAC_TX_HOLD_SOP_EN |
+		V_MAC_TX_PAUSE_CNT_16K |
+		M_MAC_AP_STAT_EN |
+		M_MAC_FAST_SYNC |
+		M_MAC_SS_EN |
+		0;
+
+	/*
+	 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
+	 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
+	 * Use a larger RD_THRSH for gigabit
+	 */
+	if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
+		th_value = 28;
+	else
+		th_value = 64;
+
+	fifo = V_MAC_TX_WR_THRSH(4) |	/* Must be '4' or '8' */
+		((s->sbm_speed == sbmac_speed_1000)
+		 ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
+		V_MAC_TX_RL_THRSH(4) |
+		V_MAC_RX_PL_THRSH(4) |
+		V_MAC_RX_RD_THRSH(4) |	/* Must be '4' */
+		V_MAC_RX_RL_THRSH(8) |
+		0;
+
+	framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
+		V_MAC_MAX_FRAMESZ_DEFAULT |
+		V_MAC_BACKOFF_SEL(1);
+
+	/*
+	 * Clear out the hash address map
+	 */
+
+	port = s->sbm_base + R_MAC_HASH_BASE;
+	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+	/*
+	 * Clear out the exact-match table
+	 */
+
+	port = s->sbm_base + R_MAC_ADDR_BASE;
+	for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+	/*
+	 * Clear out the DMA Channel mapping table registers
+	 */
+
+	port = s->sbm_base + R_MAC_CHUP0_BASE;
+	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+
+	port = s->sbm_base + R_MAC_CHLO0_BASE;
+	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+	/*
+	 * Program the hardware address.  It goes into the hardware-address
+	 * register as well as the first filter register.
+	 */
+
+	reg = sbmac_addr2reg(s->sbm_hwaddr);
+
+	port = s->sbm_base + R_MAC_ADDR_BASE;
+	__raw_writeq(reg, port);
+	port = s->sbm_base + R_MAC_ETHERNET_ADDR;
+
+	__raw_writeq(reg, port);
+
+	/*
+	 * Set the receive filter for no packets, and write values
+	 * to the various config registers
+	 */
+
+	__raw_writeq(0, s->sbm_rxfilter);
+	__raw_writeq(0, s->sbm_imr);
+	__raw_writeq(framecfg, s->sbm_framecfg);
+	__raw_writeq(fifo, s->sbm_fifocfg);
+	__raw_writeq(cfg, s->sbm_maccfg);
+
+	/*
+	 * Initialize DMA channels (rings should be ok now)
+	 */
+
+	sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
+	sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
+
+	/*
+	 * Configure the speed, duplex, and flow control
+	 */
+
+	sbmac_set_speed(s,s->sbm_speed);
+	sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
+
+	/*
+	 * Fill the receive ring
+	 */
+
+	sbdma_fillring(s, &(s->sbm_rxdma));
+
+	/*
+	 * Turn on the rest of the bits in the enable register
+	 */
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+	__raw_writeq(M_MAC_RXDMA_EN0 |
+		       M_MAC_TXDMA_EN0, s->sbm_macenable);
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+	__raw_writeq(M_MAC_RXDMA_EN0 |
+		       M_MAC_TXDMA_EN0 |
+		       M_MAC_RX_ENABLE |
+		       M_MAC_TX_ENABLE, s->sbm_macenable);
+#else
+#error invalid SiByte MAC configuration
+#endif
+
+#ifdef CONFIG_SBMAC_COALESCE
+	__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+		       ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
+#else
+	__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+		       (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
+#endif
+
+	/*
+	 * Enable receiving unicasts and broadcasts
+	 */
+
+	__raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
+
+	/*
+	 * we're running now.
+	 */
+
+	s->sbm_state = sbmac_state_on;
+
+	/*
+	 * Program multicast addresses
+	 */
+
+	sbmac_setmulti(s);
+
+	/*
+	 * If channel was in promiscuous mode before, turn that on
+	 */
+
+	if (s->sbm_devflags & IFF_PROMISC) {
+		sbmac_promiscuous_mode(s,1);
+	}
+
+}
+
+
+/**********************************************************************
+ *  SBMAC_CHANNEL_STOP(s)
+ *
+ *  Stop packet processing on this MAC.
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_channel_stop(struct sbmac_softc *s)
+{
+	/* don't do this if already stopped */
+
+	if (s->sbm_state == sbmac_state_off)
+		return;
+
+	/* don't accept any packets, disable all interrupts */
+
+	__raw_writeq(0, s->sbm_rxfilter);
+	__raw_writeq(0, s->sbm_imr);
+
+	/* Turn off ticker */
+
+	/* XXX */
+
+	/* turn off receiver and transmitter */
+
+	__raw_writeq(0, s->sbm_macenable);
+
+	/* We're stopped now. */
+
+	s->sbm_state = sbmac_state_off;
+
+	/*
+	 * Stop DMA channels (rings should be ok now)
+	 */
+
+	sbdma_channel_stop(&(s->sbm_rxdma));
+	sbdma_channel_stop(&(s->sbm_txdma));
+
+	/* Empty the receive and transmit rings */
+
+	sbdma_emptyring(&(s->sbm_rxdma));
+	sbdma_emptyring(&(s->sbm_txdma));
+
+}
+
+/**********************************************************************
+ *  SBMAC_SET_CHANNEL_STATE(state)
+ *
+ *  Set the channel's state ON or OFF
+ *
+ *  Input parameters:
+ *  	   state - new state
+ *
+ *  Return value:
+ *  	   old state
+ ********************************************************************* */
+static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc,
+						enum sbmac_state state)
+{
+	enum sbmac_state oldstate = sc->sbm_state;
+
+	/*
+	 * If same as previous state, return
+	 */
+
+	if (state == oldstate) {
+		return oldstate;
+	}
+
+	/*
+	 * If new state is ON, turn channel on
+	 */
+
+	if (state == sbmac_state_on) {
+		sbmac_channel_start(sc);
+	}
+	else {
+		sbmac_channel_stop(sc);
+	}
+
+	/*
+	 * Return previous state
+	 */
+
+	return oldstate;
+}
+
+
+/**********************************************************************
+ *  SBMAC_PROMISCUOUS_MODE(sc,onoff)
+ *
+ *  Turn on or off promiscuous mode
+ *
+ *  Input parameters:
+ *  	   sc - softc
+ *      onoff - 1 to turn on, 0 to turn off
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
+{
+	uint64_t reg;
+
+	if (sc->sbm_state != sbmac_state_on)
+		return;
+
+	if (onoff) {
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg |= M_MAC_ALLPKT_EN;
+		__raw_writeq(reg, sc->sbm_rxfilter);
+	}
+	else {
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg &= ~M_MAC_ALLPKT_EN;
+		__raw_writeq(reg, sc->sbm_rxfilter);
+	}
+}
+
+/**********************************************************************
+ *  SBMAC_SETIPHDR_OFFSET(sc,onoff)
+ *
+ *  Set the iphdr offset as 15 assuming ethernet encapsulation
+ *
+ *  Input parameters:
+ *  	   sc - softc
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
+{
+	uint64_t reg;
+
+	/* Hard code the off set to 15 for now */
+	reg = __raw_readq(sc->sbm_rxfilter);
+	reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
+	__raw_writeq(reg, sc->sbm_rxfilter);
+
+	/* BCM1250 pass1 didn't have hardware checksum.  Everything
+	   later does.  */
+	if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
+		sc->rx_hw_checksum = DISABLE;
+	} else {
+		sc->rx_hw_checksum = ENABLE;
+	}
+}
+
+
+/**********************************************************************
+ *  SBMAC_ADDR2REG(ptr)
+ *
+ *  Convert six bytes into the 64-bit register value that
+ *  we typically write into the SBMAC's address/mcast registers
+ *
+ *  Input parameters:
+ *  	   ptr - pointer to 6 bytes
+ *
+ *  Return value:
+ *  	   register value
+ ********************************************************************* */
+
+static uint64_t sbmac_addr2reg(unsigned char *ptr)
+{
+	uint64_t reg = 0;
+
+	ptr += 6;
+
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+
+	return reg;
+}
+
+
+/**********************************************************************
+ *  SBMAC_SET_SPEED(s,speed)
+ *
+ *  Configure LAN speed for the specified MAC.
+ *  Warning: must be called when MAC is off!
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *  	   speed - speed to set MAC to (see enum sbmac_speed)
+ *
+ *  Return value:
+ *  	   1 if successful
+ *      0 indicates invalid parameters
+ ********************************************************************* */
+
+static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed)
+{
+	uint64_t cfg;
+	uint64_t framecfg;
+
+	/*
+	 * Save new current values
+	 */
+
+	s->sbm_speed = speed;
+
+	if (s->sbm_state == sbmac_state_on)
+		return 0;	/* save for next restart */
+
+	/*
+	 * Read current register values
+	 */
+
+	cfg = __raw_readq(s->sbm_maccfg);
+	framecfg = __raw_readq(s->sbm_framecfg);
+
+	/*
+	 * Mask out the stuff we want to change
+	 */
+
+	cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
+	framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
+		      M_MAC_SLOT_SIZE);
+
+	/*
+	 * Now add in the new bits
+	 */
+
+	switch (speed) {
+	case sbmac_speed_10:
+		framecfg |= V_MAC_IFG_RX_10 |
+			V_MAC_IFG_TX_10 |
+			K_MAC_IFG_THRSH_10 |
+			V_MAC_SLOT_SIZE_10;
+		cfg |= V_MAC_SPEED_SEL_10MBPS;
+		break;
+
+	case sbmac_speed_100:
+		framecfg |= V_MAC_IFG_RX_100 |
+			V_MAC_IFG_TX_100 |
+			V_MAC_IFG_THRSH_100 |
+			V_MAC_SLOT_SIZE_100;
+		cfg |= V_MAC_SPEED_SEL_100MBPS ;
+		break;
+
+	case sbmac_speed_1000:
+		framecfg |= V_MAC_IFG_RX_1000 |
+			V_MAC_IFG_TX_1000 |
+			V_MAC_IFG_THRSH_1000 |
+			V_MAC_SLOT_SIZE_1000;
+		cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
+		break;
+
+	default:
+		return 0;
+	}
+
+	/*
+	 * Send the bits back to the hardware
+	 */
+
+	__raw_writeq(framecfg, s->sbm_framecfg);
+	__raw_writeq(cfg, s->sbm_maccfg);
+
+	return 1;
+}
+
+/**********************************************************************
+ *  SBMAC_SET_DUPLEX(s,duplex,fc)
+ *
+ *  Set Ethernet duplex and flow control options for this MAC
+ *  Warning: must be called when MAC is off!
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *  	   duplex - duplex setting (see enum sbmac_duplex)
+ *  	   fc - flow control setting (see enum sbmac_fc)
+ *
+ *  Return value:
+ *  	   1 if ok
+ *  	   0 if an invalid parameter combination was specified
+ ********************************************************************* */
+
+static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
+			    enum sbmac_fc fc)
+{
+	uint64_t cfg;
+
+	/*
+	 * Save new current values
+	 */
+
+	s->sbm_duplex = duplex;
+	s->sbm_fc = fc;
+
+	if (s->sbm_state == sbmac_state_on)
+		return 0;	/* save for next restart */
+
+	/*
+	 * Read current register values
+	 */
+
+	cfg = __raw_readq(s->sbm_maccfg);
+
+	/*
+	 * Mask off the stuff we're about to change
+	 */
+
+	cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
+
+
+	switch (duplex) {
+	case sbmac_duplex_half:
+		switch (fc) {
+		case sbmac_fc_disabled:
+			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
+			break;
+
+		case sbmac_fc_collision:
+			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
+			break;
+
+		case sbmac_fc_carrier:
+			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
+			break;
+
+		case sbmac_fc_frame:		/* not valid in half duplex */
+		default:			/* invalid selection */
+			return 0;
+		}
+		break;
+
+	case sbmac_duplex_full:
+		switch (fc) {
+		case sbmac_fc_disabled:
+			cfg |= V_MAC_FC_CMD_DISABLED;
+			break;
+
+		case sbmac_fc_frame:
+			cfg |= V_MAC_FC_CMD_ENABLED;
+			break;
+
+		case sbmac_fc_collision:	/* not valid in full duplex */
+		case sbmac_fc_carrier:		/* not valid in full duplex */
+		default:
+			return 0;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	/*
+	 * Send the bits back to the hardware
+	 */
+
+	__raw_writeq(cfg, s->sbm_maccfg);
+
+	return 1;
+}
+
+
+
+
+/**********************************************************************
+ *  SBMAC_INTR()
+ *
+ *  Interrupt handler for MAC interrupts
+ *
+ *  Input parameters:
+ *  	   MAC structure
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+static irqreturn_t sbmac_intr(int irq,void *dev_instance)
+{
+	struct net_device *dev = (struct net_device *) dev_instance;
+	struct sbmac_softc *sc = netdev_priv(dev);
+	uint64_t isr;
+	int handled = 0;
+
+	/*
+	 * Read the ISR (this clears the bits in the real
+	 * register, except for counter addr)
+	 */
+
+	isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
+
+	if (isr == 0)
+		return IRQ_RETVAL(0);
+	handled = 1;
+
+	/*
+	 * Transmits on channel 0
+	 */
+
+	if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
+		sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
+
+	if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
+		if (napi_schedule_prep(&sc->napi)) {
+			__raw_writeq(0, sc->sbm_imr);
+			__napi_schedule(&sc->napi);
+			/* Depend on the exit from poll to reenable intr */
+		}
+		else {
+			/* may leave some packets behind */
+			sbdma_rx_process(sc,&(sc->sbm_rxdma),
+					 SBMAC_MAX_RXDESCR * 2, 0);
+		}
+	}
+	return IRQ_RETVAL(handled);
+}
+
+/**********************************************************************
+ *  SBMAC_START_TX(skb,dev)
+ *
+ *  Start output on the specified interface.  Basically, we
+ *  queue as many buffers as we can until the ring fills up, or
+ *  we run off the end of the queue, whichever comes first.
+ *
+ *  Input parameters:
+ *
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned long flags;
+
+	/* lock eth irq */
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+
+	/*
+	 * Put the buffer on the transmit ring.  If we
+	 * don't have room, stop the queue.
+	 */
+
+	if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
+		/* XXX save skb that we could not send */
+		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+		return NETDEV_TX_BUSY;
+	}
+
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+	return NETDEV_TX_OK;
+}
+
+/**********************************************************************
+ *  SBMAC_SETMULTI(sc)
+ *
+ *  Reprogram the multicast table into the hardware, given
+ *  the list of multicasts associated with the interface
+ *  structure.
+ *
+ *  Input parameters:
+ *  	   sc - softc
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_setmulti(struct sbmac_softc *sc)
+{
+	uint64_t reg;
+	void __iomem *port;
+	int idx;
+	struct netdev_hw_addr *ha;
+	struct net_device *dev = sc->sbm_dev;
+
+	/*
+	 * Clear out entire multicast table.  We do this by nuking
+	 * the entire hash table and all the direct matches except
+	 * the first one, which is used for our station address
+	 */
+
+	for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
+		port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
+		__raw_writeq(0, port);
+	}
+
+	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+		port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
+		__raw_writeq(0, port);
+	}
+
+	/*
+	 * Clear the filter to say we don't want any multicasts.
+	 */
+
+	reg = __raw_readq(sc->sbm_rxfilter);
+	reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+	__raw_writeq(reg, sc->sbm_rxfilter);
+
+	if (dev->flags & IFF_ALLMULTI) {
+		/*
+		 * Enable ALL multicasts.  Do this by inverting the
+		 * multicast enable bit.
+		 */
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+		__raw_writeq(reg, sc->sbm_rxfilter);
+		return;
+	}
+
+
+	/*
+	 * Progam new multicast entries.  For now, only use the
+	 * perfect filter.  In the future we'll need to use the
+	 * hash filter if the perfect filter overflows
+	 */
+
+	/* XXX only using perfect filter for now, need to use hash
+	 * XXX if the table overflows */
+
+	idx = 1;		/* skip station address */
+	netdev_for_each_mc_addr(ha, dev) {
+		if (idx == MAC_ADDR_COUNT)
+			break;
+		reg = sbmac_addr2reg(ha->addr);
+		port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
+		__raw_writeq(reg, port);
+		idx++;
+	}
+
+	/*
+	 * Enable the "accept multicast bits" if we programmed at least one
+	 * multicast.
+	 */
+
+	if (idx > 1) {
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg |= M_MAC_MCAST_EN;
+		__raw_writeq(reg, sc->sbm_rxfilter);
+	}
+}
+
+static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
+{
+	if (new_mtu >  ENET_PACKET_SIZE)
+		return -EINVAL;
+	_dev->mtu = new_mtu;
+	pr_info("changing the mtu to %d\n", new_mtu);
+	return 0;
+}
+
+static const struct net_device_ops sbmac_netdev_ops = {
+	.ndo_open		= sbmac_open,
+	.ndo_stop		= sbmac_close,
+	.ndo_start_xmit		= sbmac_start_tx,
+	.ndo_set_rx_mode	= sbmac_set_rx_mode,
+	.ndo_tx_timeout		= sbmac_tx_timeout,
+	.ndo_do_ioctl		= sbmac_mii_ioctl,
+	.ndo_change_mtu		= sb1250_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= sbmac_netpoll,
+#endif
+};
+
+/**********************************************************************
+ *  SBMAC_INIT(dev)
+ *
+ *  Attach routine - init hardware and hook ourselves into linux
+ *
+ *  Input parameters:
+ *  	   dev - net_device structure
+ *
+ *  Return value:
+ *  	   status
+ ********************************************************************* */
+
+static int sbmac_init(struct platform_device *pldev, long long base)
+{
+	struct net_device *dev = platform_get_drvdata(pldev);
+	int idx = pldev->id;
+	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned char *eaddr;
+	uint64_t ea_reg;
+	int i;
+	int err;
+
+	sc->sbm_dev = dev;
+	sc->sbe_idx = idx;
+
+	eaddr = sc->sbm_hwaddr;
+
+	/*
+	 * Read the ethernet address.  The firmware left this programmed
+	 * for us in the ethernet address register for each mac.
+	 */
+
+	ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
+	__raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
+	for (i = 0; i < 6; i++) {
+		eaddr[i] = (uint8_t) (ea_reg & 0xFF);
+		ea_reg >>= 8;
+	}
+
+	for (i = 0; i < 6; i++) {
+		dev->dev_addr[i] = eaddr[i];
+	}
+
+	/*
+	 * Initialize context (get pointers to registers and stuff), then
+	 * allocate the memory for the descriptor tables.
+	 */
+
+	sbmac_initctx(sc);
+
+	/*
+	 * Set up Linux device callins
+	 */
+
+	spin_lock_init(&(sc->sbm_lock));
+
+	dev->netdev_ops = &sbmac_netdev_ops;
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
+
+	dev->irq		= UNIT_INT(idx);
+
+	/* This is needed for PASS2 for Rx H/W checksum feature */
+	sbmac_set_iphdr_offset(sc);
+
+	sc->mii_bus = mdiobus_alloc();
+	if (sc->mii_bus == NULL) {
+		err = -ENOMEM;
+		goto uninit_ctx;
+	}
+
+	sc->mii_bus->name = sbmac_mdio_string;
+	snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		pldev->name, idx);
+	sc->mii_bus->priv = sc;
+	sc->mii_bus->read = sbmac_mii_read;
+	sc->mii_bus->write = sbmac_mii_write;
+	sc->mii_bus->irq = sc->phy_irq;
+	for (i = 0; i < PHY_MAX_ADDR; ++i)
+		sc->mii_bus->irq[i] = SBMAC_PHY_INT;
+
+	sc->mii_bus->parent = &pldev->dev;
+	/*
+	 * Probe PHY address
+	 */
+	err = mdiobus_register(sc->mii_bus);
+	if (err) {
+		printk(KERN_ERR "%s: unable to register MDIO bus\n",
+		       dev->name);
+		goto free_mdio;
+	}
+	platform_set_drvdata(pldev, sc->mii_bus);
+
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR "%s.%d: unable to register netdev\n",
+		       sbmac_string, idx);
+		goto unreg_mdio;
+	}
+
+	pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
+
+	if (sc->rx_hw_checksum == ENABLE)
+		pr_info("%s: enabling TCP rcv checksum\n", dev->name);
+
+	/*
+	 * Display Ethernet address (this is called during the config
+	 * process so we need to finish off the config message that
+	 * was being displayed)
+	 */
+	pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
+	       dev->name, base, eaddr);
+
+	return 0;
+unreg_mdio:
+	mdiobus_unregister(sc->mii_bus);
+free_mdio:
+	mdiobus_free(sc->mii_bus);
+uninit_ctx:
+	sbmac_uninitctx(sc);
+	return err;
+}
+
+
+static int sbmac_open(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	int err;
+
+	if (debug > 1)
+		pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
+
+	/*
+	 * map/route interrupt (clear status first, in case something
+	 * weird is pending; we haven't initialized the mac registers
+	 * yet)
+	 */
+
+	__raw_readq(sc->sbm_isr);
+	err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
+	if (err) {
+		printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
+		       dev->irq);
+		goto out_err;
+	}
+
+	sc->sbm_speed = sbmac_speed_none;
+	sc->sbm_duplex = sbmac_duplex_none;
+	sc->sbm_fc = sbmac_fc_none;
+	sc->sbm_pause = -1;
+	sc->sbm_link = 0;
+
+	/*
+	 * Attach to the PHY
+	 */
+	err = sbmac_mii_probe(dev);
+	if (err)
+		goto out_unregister;
+
+	/*
+	 * Turn on the channel
+	 */
+
+	sbmac_set_channel_state(sc,sbmac_state_on);
+
+	netif_start_queue(dev);
+
+	sbmac_set_rx_mode(dev);
+
+	phy_start(sc->phy_dev);
+
+	napi_enable(&sc->napi);
+
+	return 0;
+
+out_unregister:
+	free_irq(dev->irq, dev);
+out_err:
+	return err;
+}
+
+static int sbmac_mii_probe(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	struct phy_device *phy_dev;
+	int i;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++) {
+		phy_dev = sc->mii_bus->phy_map[i];
+		if (phy_dev)
+			break;
+	}
+	if (!phy_dev) {
+		printk(KERN_ERR "%s: no PHY found\n", dev->name);
+		return -ENXIO;
+	}
+
+	phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll,
+			      PHY_INTERFACE_MODE_GMII);
+	if (IS_ERR(phy_dev)) {
+		printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+		return PTR_ERR(phy_dev);
+	}
+
+	/* Remove any features not supported by the controller */
+	phy_dev->supported &= SUPPORTED_10baseT_Half |
+			      SUPPORTED_10baseT_Full |
+			      SUPPORTED_100baseT_Half |
+			      SUPPORTED_100baseT_Full |
+			      SUPPORTED_1000baseT_Half |
+			      SUPPORTED_1000baseT_Full |
+			      SUPPORTED_Autoneg |
+			      SUPPORTED_MII |
+			      SUPPORTED_Pause |
+			      SUPPORTED_Asym_Pause;
+	phy_dev->advertising = phy_dev->supported;
+
+	pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		dev->name, phy_dev->drv->name,
+		dev_name(&phy_dev->dev), phy_dev->irq);
+
+	sc->phy_dev = phy_dev;
+
+	return 0;
+}
+
+
+static void sbmac_mii_poll(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	struct phy_device *phy_dev = sc->phy_dev;
+	unsigned long flags;
+	enum sbmac_fc fc;
+	int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg;
+
+	link_chg = (sc->sbm_link != phy_dev->link);
+	speed_chg = (sc->sbm_speed != phy_dev->speed);
+	duplex_chg = (sc->sbm_duplex != phy_dev->duplex);
+	pause_chg = (sc->sbm_pause != phy_dev->pause);
+
+	if (!link_chg && !speed_chg && !duplex_chg && !pause_chg)
+		return;					/* Hmmm... */
+
+	if (!phy_dev->link) {
+		if (link_chg) {
+			sc->sbm_link = phy_dev->link;
+			sc->sbm_speed = sbmac_speed_none;
+			sc->sbm_duplex = sbmac_duplex_none;
+			sc->sbm_fc = sbmac_fc_disabled;
+			sc->sbm_pause = -1;
+			pr_info("%s: link unavailable\n", dev->name);
+		}
+		return;
+	}
+
+	if (phy_dev->duplex == DUPLEX_FULL) {
+		if (phy_dev->pause)
+			fc = sbmac_fc_frame;
+		else
+			fc = sbmac_fc_disabled;
+	} else
+		fc = sbmac_fc_collision;
+	fc_chg = (sc->sbm_fc != fc);
+
+	pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed,
+		phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H');
+
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+
+	sc->sbm_speed = phy_dev->speed;
+	sc->sbm_duplex = phy_dev->duplex;
+	sc->sbm_fc = fc;
+	sc->sbm_pause = phy_dev->pause;
+	sc->sbm_link = phy_dev->link;
+
+	if ((speed_chg || duplex_chg || fc_chg) &&
+	    sc->sbm_state != sbmac_state_off) {
+		/*
+		 * something changed, restart the channel
+		 */
+		if (debug > 1)
+			pr_debug("%s: restarting channel "
+				 "because PHY state changed\n", dev->name);
+		sbmac_channel_stop(sc);
+		sbmac_channel_start(sc);
+	}
+
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+}
+
+
+static void sbmac_tx_timeout (struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+
+
+	dev->trans_start = jiffies; /* prevent tx timeout */
+	dev->stats.tx_errors++;
+
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+	printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
+}
+
+
+
+
+static void sbmac_set_rx_mode(struct net_device *dev)
+{
+	unsigned long flags;
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+	if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
+		/*
+		 * Promiscuous changed.
+		 */
+
+		if (dev->flags & IFF_PROMISC) {
+			sbmac_promiscuous_mode(sc,1);
+		}
+		else {
+			sbmac_promiscuous_mode(sc,0);
+		}
+	}
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+	/*
+	 * Program the multicasts.  Do this every time.
+	 */
+
+	sbmac_setmulti(sc);
+
+}
+
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	if (!netif_running(dev) || !sc->phy_dev)
+		return -EINVAL;
+
+	return phy_mii_ioctl(sc->phy_dev, rq, cmd);
+}
+
+static int sbmac_close(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	napi_disable(&sc->napi);
+
+	phy_stop(sc->phy_dev);
+
+	sbmac_set_channel_state(sc, sbmac_state_off);
+
+	netif_stop_queue(dev);
+
+	if (debug > 1)
+		pr_debug("%s: Shutting down ethercard\n", dev->name);
+
+	phy_disconnect(sc->phy_dev);
+	sc->phy_dev = NULL;
+	free_irq(dev->irq, dev);
+
+	sbdma_emptyring(&(sc->sbm_txdma));
+	sbdma_emptyring(&(sc->sbm_rxdma));
+
+	return 0;
+}
+
+static int sbmac_poll(struct napi_struct *napi, int budget)
+{
+	struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
+	int work_done;
+
+	work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
+	sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+
+#ifdef CONFIG_SBMAC_COALESCE
+		__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+			     ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
+			     sc->sbm_imr);
+#else
+		__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+			     (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
+#endif
+	}
+
+	return work_done;
+}
+
+
+static int sbmac_probe(struct platform_device *pldev)
+{
+	struct net_device *dev;
+	struct sbmac_softc *sc;
+	void __iomem *sbm_base;
+	struct resource *res;
+	u64 sbmac_orig_hwaddr;
+	int err;
+
+	res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+	BUG_ON(!res);
+	sbm_base = ioremap_nocache(res->start, resource_size(res));
+	if (!sbm_base) {
+		printk(KERN_ERR "%s: unable to map device registers\n",
+		       dev_name(&pldev->dev));
+		err = -ENOMEM;
+		goto out_out;
+	}
+
+	/*
+	 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
+	 * value for us by the firmware if we're going to use this MAC.
+	 * If we find a zero, skip this MAC.
+	 */
+	sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
+	pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
+		 sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
+	if (sbmac_orig_hwaddr == 0) {
+		err = 0;
+		goto out_unmap;
+	}
+
+	/*
+	 * Okay, cool.  Initialize this MAC.
+	 */
+	dev = alloc_etherdev(sizeof(struct sbmac_softc));
+	if (!dev) {
+		err = -ENOMEM;
+		goto out_unmap;
+	}
+
+	platform_set_drvdata(pldev, dev);
+	SET_NETDEV_DEV(dev, &pldev->dev);
+
+	sc = netdev_priv(dev);
+	sc->sbm_base = sbm_base;
+
+	err = sbmac_init(pldev, res->start);
+	if (err)
+		goto out_kfree;
+
+	return 0;
+
+out_kfree:
+	free_netdev(dev);
+	__raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR);
+
+out_unmap:
+	iounmap(sbm_base);
+
+out_out:
+	return err;
+}
+
+static int __exit sbmac_remove(struct platform_device *pldev)
+{
+	struct net_device *dev = platform_get_drvdata(pldev);
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	sbmac_uninitctx(sc);
+	mdiobus_unregister(sc->mii_bus);
+	mdiobus_free(sc->mii_bus);
+	iounmap(sc->sbm_base);
+	free_netdev(dev);
+
+	return 0;
+}
+
+static struct platform_driver sbmac_driver = {
+	.probe = sbmac_probe,
+	.remove = __exit_p(sbmac_remove),
+	.driver = {
+		.name = sbmac_string,
+	},
+};
+
+module_platform_driver(sbmac_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
new file mode 100644
index 0000000..3613469
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -0,0 +1,18296 @@
+/*
+ * tg3.c: Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2005-2014 Broadcom Corporation.
+ *
+ * Firmware is:
+ *	Derived from proprietary unpublished source code,
+ *	Copyright (C) 2000-2003 Broadcom Corporation.
+ *
+ *	Permission is hereby granted for the distribution of this firmware
+ *	data in hexadecimal or equivalent format, provided this copyright
+ *	notice is accompanying it.
+ */
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/brcmphy.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/ssb/ssb_driver_gige.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <uapi/linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
+#define BAR_0	0
+#define BAR_2	2
+
+#include "tg3.h"
+
+/* Functions & macros to verify TG3_FLAGS types */
+
+static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
+{
+	return test_bit(flag, bits);
+}
+
+static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
+{
+	set_bit(flag, bits);
+}
+
+static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
+{
+	clear_bit(flag, bits);
+}
+
+#define tg3_flag(tp, flag)				\
+	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_set(tp, flag)				\
+	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_clear(tp, flag)			\
+	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
+
+#define DRV_MODULE_NAME		"tg3"
+#define TG3_MAJ_NUM			3
+#define TG3_MIN_NUM			137
+#define DRV_MODULE_VERSION	\
+	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
+#define DRV_MODULE_RELDATE	"May 11, 2014"
+
+#define RESET_KIND_SHUTDOWN	0
+#define RESET_KIND_INIT		1
+#define RESET_KIND_SUSPEND	2
+
+#define TG3_DEF_RX_MODE		0
+#define TG3_DEF_TX_MODE		0
+#define TG3_DEF_MSG_ENABLE	  \
+	(NETIF_MSG_DRV		| \
+	 NETIF_MSG_PROBE	| \
+	 NETIF_MSG_LINK		| \
+	 NETIF_MSG_TIMER	| \
+	 NETIF_MSG_IFDOWN	| \
+	 NETIF_MSG_IFUP		| \
+	 NETIF_MSG_RX_ERR	| \
+	 NETIF_MSG_TX_ERR)
+
+#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+
+#define TG3_TX_TIMEOUT			(5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define TG3_MIN_MTU			60
+#define TG3_MAX_MTU(tp)	\
+	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
+
+/* These numbers seem to be hard coded in the NIC firmware somehow.
+ * You can't change the ring sizes, but you can change where you place
+ * them in the NIC onboard memory.
+ */
+#define TG3_RX_STD_RING_SIZE(tp) \
+	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
+#define TG3_DEF_RX_RING_PENDING		200
+#define TG3_RX_JMB_RING_SIZE(tp) \
+	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
+#define TG3_DEF_RX_JUMBO_RING_PENDING	100
+
+/* Do not place this n-ring entries value into the tp struct itself,
+ * we really want to expose these constants to GCC so that modulo et
+ * al.  operations are done with shifts and masks instead of with
+ * hw multiply/modulo instructions.  Another solution would be to
+ * replace things like '% foo' with '& (foo - 1)'.
+ */
+
+#define TG3_TX_RING_SIZE		512
+#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
+
+#define TG3_RX_STD_RING_BYTES(tp) \
+	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
+#define TG3_RX_JMB_RING_BYTES(tp) \
+	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
+#define TG3_RX_RCB_RING_BYTES(tp) \
+	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
+#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
+				 TG3_TX_RING_SIZE)
+#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
+
+#define TG3_DMA_BYTE_ENAB		64
+
+#define TG3_RX_STD_DMA_SZ		1536
+#define TG3_RX_JMB_DMA_SZ		9046
+
+#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
+
+#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
+#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
+
+#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
+	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
+
+#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
+	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode.  The driver
+ * works around this bug by double copying the packet.  This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient.  For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path.  Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD		256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
+#else
+	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
+#endif
+
+#if (NET_IP_ALIGN != 0)
+#define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
+#else
+#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX_2K		2048
+#define TG3_TX_BD_DMA_MAX_4K		4096
+
+#define TG3_RAW_IP_ALIGN 2
+
+#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+
+#define TG3_FW_UPDATE_TIMEOUT_SEC	5
+#define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
+
+#define FIRMWARE_TG3		"tigon/tg3.bin"
+#define FIRMWARE_TG357766	"tigon/tg357766.bin"
+#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
+#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
+
+static char version[] =
+	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
+
+MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
+
+static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
+module_param(tg3_debug, int, 0);
+MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+
+#define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
+#define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
+
+static const struct pci_device_id tg3_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+			TG3_DRV_DATA_FLAG_5705_10_100},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+			TG3_DRV_DATA_FLAG_5705_10_100},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+			TG3_DRV_DATA_FLAG_5705_10_100},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
+			PCI_VENDOR_ID_LENOVO,
+			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
+	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
+	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
+	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
+	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
+
+static const struct {
+	const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+	{ "rx_octets" },
+	{ "rx_fragments" },
+	{ "rx_ucast_packets" },
+	{ "rx_mcast_packets" },
+	{ "rx_bcast_packets" },
+	{ "rx_fcs_errors" },
+	{ "rx_align_errors" },
+	{ "rx_xon_pause_rcvd" },
+	{ "rx_xoff_pause_rcvd" },
+	{ "rx_mac_ctrl_rcvd" },
+	{ "rx_xoff_entered" },
+	{ "rx_frame_too_long_errors" },
+	{ "rx_jabbers" },
+	{ "rx_undersize_packets" },
+	{ "rx_in_length_errors" },
+	{ "rx_out_length_errors" },
+	{ "rx_64_or_less_octet_packets" },
+	{ "rx_65_to_127_octet_packets" },
+	{ "rx_128_to_255_octet_packets" },
+	{ "rx_256_to_511_octet_packets" },
+	{ "rx_512_to_1023_octet_packets" },
+	{ "rx_1024_to_1522_octet_packets" },
+	{ "rx_1523_to_2047_octet_packets" },
+	{ "rx_2048_to_4095_octet_packets" },
+	{ "rx_4096_to_8191_octet_packets" },
+	{ "rx_8192_to_9022_octet_packets" },
+
+	{ "tx_octets" },
+	{ "tx_collisions" },
+
+	{ "tx_xon_sent" },
+	{ "tx_xoff_sent" },
+	{ "tx_flow_control" },
+	{ "tx_mac_errors" },
+	{ "tx_single_collisions" },
+	{ "tx_mult_collisions" },
+	{ "tx_deferred" },
+	{ "tx_excessive_collisions" },
+	{ "tx_late_collisions" },
+	{ "tx_collide_2times" },
+	{ "tx_collide_3times" },
+	{ "tx_collide_4times" },
+	{ "tx_collide_5times" },
+	{ "tx_collide_6times" },
+	{ "tx_collide_7times" },
+	{ "tx_collide_8times" },
+	{ "tx_collide_9times" },
+	{ "tx_collide_10times" },
+	{ "tx_collide_11times" },
+	{ "tx_collide_12times" },
+	{ "tx_collide_13times" },
+	{ "tx_collide_14times" },
+	{ "tx_collide_15times" },
+	{ "tx_ucast_packets" },
+	{ "tx_mcast_packets" },
+	{ "tx_bcast_packets" },
+	{ "tx_carrier_sense_errors" },
+	{ "tx_discards" },
+	{ "tx_errors" },
+
+	{ "dma_writeq_full" },
+	{ "dma_write_prioq_full" },
+	{ "rxbds_empty" },
+	{ "rx_discards" },
+	{ "rx_errors" },
+	{ "rx_threshold_hit" },
+
+	{ "dma_readq_full" },
+	{ "dma_read_prioq_full" },
+	{ "tx_comp_queue_full" },
+
+	{ "ring_set_send_prod_index" },
+	{ "ring_status_update" },
+	{ "nic_irqs" },
+	{ "nic_avoided_irqs" },
+	{ "nic_tx_threshold_hit" },
+
+	{ "mbuf_lwm_thresh_hit" },
+};
+
+#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
+#define TG3_NVRAM_TEST		0
+#define TG3_LINK_TEST		1
+#define TG3_REGISTER_TEST	2
+#define TG3_MEMORY_TEST		3
+#define TG3_MAC_LOOPB_TEST	4
+#define TG3_PHY_LOOPB_TEST	5
+#define TG3_EXT_LOOPB_TEST	6
+#define TG3_INTERRUPT_TEST	7
+
+
+static const struct {
+	const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[] = {
+	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
+	[TG3_LINK_TEST]		= { "link test         (online) " },
+	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
+	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
+	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
+	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
+	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
+	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
+};
+
+#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
+
+
+static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->regs + off);
+}
+
+static u32 tg3_read32(struct tg3 *tp, u32 off)
+{
+	return readl(tp->regs + off);
+}
+
+static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->aperegs + off);
+}
+
+static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
+{
+	return readl(tp->aperegs + off);
+}
+
+static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->regs + off);
+	readl(tp->regs + off);
+}
+
+static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+	return val;
+}
+
+static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+	unsigned long flags;
+
+	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
+				       TG3_64BIT_REG_LOW, val);
+		return;
+	}
+	if (off == TG3_RX_STD_PROD_IDX_REG) {
+		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
+				       TG3_64BIT_REG_LOW, val);
+		return;
+	}
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+
+	/* In indirect mode when disabling interrupts, we also need
+	 * to clear the interrupt bit in the GRC local ctrl register.
+	 */
+	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
+	    (val == 0x1)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
+				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
+	}
+}
+
+static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+	return val;
+}
+
+/* usec_wait specifies the wait time in usec when writing to certain registers
+ * where it is unsafe to read back the register without some delay.
+ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
+ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
+ */
+static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
+{
+	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
+		/* Non-posted methods */
+		tp->write32(tp, off, val);
+	else {
+		/* Posted method */
+		tg3_write32(tp, off, val);
+		if (usec_wait)
+			udelay(usec_wait);
+		tp->read32(tp, off);
+	}
+	/* Wait again after the read for the posted method to guarantee that
+	 * the wait time is met.
+	 */
+	if (usec_wait)
+		udelay(usec_wait);
+}
+
+static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
+{
+	tp->write32_mbox(tp, off, val);
+	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
+	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
+	     !tg3_flag(tp, ICH_WORKAROUND)))
+		tp->read32_mbox(tp, off);
+}
+
+static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+	void __iomem *mbox = tp->regs + off;
+	writel(val, mbox);
+	if (tg3_flag(tp, TXD_MBOX_HWBUG))
+		writel(val, mbox);
+	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
+	    tg3_flag(tp, FLUSH_POSTED_WRITES))
+		readl(mbox);
+}
+
+static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
+{
+	return readl(tp->regs + off + GRCMBOX_BASE);
+}
+
+static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->regs + off + GRCMBOX_BASE);
+}
+
+#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
+#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
+
+#define tw32(reg, val)			tp->write32(tp, reg, val)
+#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg)			tp->read32(tp, reg)
+
+static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
+{
+	unsigned long flags;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
+	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
+		return;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+		/* Always leave this as zero. */
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	} else {
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+		tw32_f(TG3PCI_MEM_WIN_DATA, val);
+
+		/* Always leave this as zero. */
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	}
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
+{
+	unsigned long flags;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
+	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
+		*val = 0;
+		return;
+	}
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+		/* Always leave this as zero. */
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	} else {
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+		*val = tr32(TG3PCI_MEM_WIN_DATA);
+
+		/* Always leave this as zero. */
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	}
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_ape_lock_init(struct tg3 *tp)
+{
+	int i;
+	u32 regbase, bit;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5761)
+		regbase = TG3_APE_LOCK_GRANT;
+	else
+		regbase = TG3_APE_PER_LOCK_GRANT;
+
+	/* Make sure the driver hasn't any stale locks. */
+	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
+		switch (i) {
+		case TG3_APE_LOCK_PHY0:
+		case TG3_APE_LOCK_PHY1:
+		case TG3_APE_LOCK_PHY2:
+		case TG3_APE_LOCK_PHY3:
+			bit = APE_LOCK_GRANT_DRIVER;
+			break;
+		default:
+			if (!tp->pci_fn)
+				bit = APE_LOCK_GRANT_DRIVER;
+			else
+				bit = 1 << tp->pci_fn;
+		}
+		tg3_ape_write32(tp, regbase + 4 * i, bit);
+	}
+
+}
+
+static int tg3_ape_lock(struct tg3 *tp, int locknum)
+{
+	int i, off;
+	int ret = 0;
+	u32 status, req, gnt, bit;
+
+	if (!tg3_flag(tp, ENABLE_APE))
+		return 0;
+
+	switch (locknum) {
+	case TG3_APE_LOCK_GPIO:
+		if (tg3_asic_rev(tp) == ASIC_REV_5761)
+			return 0;
+	case TG3_APE_LOCK_GRC:
+	case TG3_APE_LOCK_MEM:
+		if (!tp->pci_fn)
+			bit = APE_LOCK_REQ_DRIVER;
+		else
+			bit = 1 << tp->pci_fn;
+		break;
+	case TG3_APE_LOCK_PHY0:
+	case TG3_APE_LOCK_PHY1:
+	case TG3_APE_LOCK_PHY2:
+	case TG3_APE_LOCK_PHY3:
+		bit = APE_LOCK_REQ_DRIVER;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
+		req = TG3_APE_LOCK_REQ;
+		gnt = TG3_APE_LOCK_GRANT;
+	} else {
+		req = TG3_APE_PER_LOCK_REQ;
+		gnt = TG3_APE_PER_LOCK_GRANT;
+	}
+
+	off = 4 * locknum;
+
+	tg3_ape_write32(tp, req + off, bit);
+
+	/* Wait for up to 1 millisecond to acquire lock. */
+	for (i = 0; i < 100; i++) {
+		status = tg3_ape_read32(tp, gnt + off);
+		if (status == bit)
+			break;
+		if (pci_channel_offline(tp->pdev))
+			break;
+
+		udelay(10);
+	}
+
+	if (status != bit) {
+		/* Revoke the lock request. */
+		tg3_ape_write32(tp, gnt + off, bit);
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+static void tg3_ape_unlock(struct tg3 *tp, int locknum)
+{
+	u32 gnt, bit;
+
+	if (!tg3_flag(tp, ENABLE_APE))
+		return;
+
+	switch (locknum) {
+	case TG3_APE_LOCK_GPIO:
+		if (tg3_asic_rev(tp) == ASIC_REV_5761)
+			return;
+	case TG3_APE_LOCK_GRC:
+	case TG3_APE_LOCK_MEM:
+		if (!tp->pci_fn)
+			bit = APE_LOCK_GRANT_DRIVER;
+		else
+			bit = 1 << tp->pci_fn;
+		break;
+	case TG3_APE_LOCK_PHY0:
+	case TG3_APE_LOCK_PHY1:
+	case TG3_APE_LOCK_PHY2:
+	case TG3_APE_LOCK_PHY3:
+		bit = APE_LOCK_GRANT_DRIVER;
+		break;
+	default:
+		return;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5761)
+		gnt = TG3_APE_LOCK_GRANT;
+	else
+		gnt = TG3_APE_PER_LOCK_GRANT;
+
+	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
+}
+
+static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
+{
+	u32 apedata;
+
+	while (timeout_us) {
+		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+			return -EBUSY;
+
+		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+			break;
+
+		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+		udelay(10);
+		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
+	}
+
+	return timeout_us ? 0 : -EBUSY;
+}
+
+static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
+{
+	u32 i, apedata;
+
+	for (i = 0; i < timeout_us / 10; i++) {
+		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+			break;
+
+		udelay(10);
+	}
+
+	return i == timeout_us / 10;
+}
+
+static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
+				   u32 len)
+{
+	int err;
+	u32 i, bufoff, msgoff, maxlen, apedata;
+
+	if (!tg3_flag(tp, APE_HAS_NCSI))
+		return 0;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+	if (apedata != APE_SEG_SIG_MAGIC)
+		return -ENODEV;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+	if (!(apedata & APE_FW_STATUS_READY))
+		return -EAGAIN;
+
+	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
+		 TG3_APE_SHMEM_BASE;
+	msgoff = bufoff + 2 * sizeof(u32);
+	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
+
+	while (len) {
+		u32 length;
+
+		/* Cap xfer sizes to scratchpad limits. */
+		length = (len > maxlen) ? maxlen : len;
+		len -= length;
+
+		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+		if (!(apedata & APE_FW_STATUS_READY))
+			return -EAGAIN;
+
+		/* Wait for up to 1 msec for APE to service previous event. */
+		err = tg3_ape_event_lock(tp, 1000);
+		if (err)
+			return err;
+
+		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
+			  APE_EVENT_STATUS_SCRTCHPD_READ |
+			  APE_EVENT_STATUS_EVENT_PENDING;
+		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
+
+		tg3_ape_write32(tp, bufoff, base_off);
+		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
+
+		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+
+		base_off += length;
+
+		if (tg3_ape_wait_for_event(tp, 30000))
+			return -EAGAIN;
+
+		for (i = 0; length; i += 4, length -= 4) {
+			u32 val = tg3_ape_read32(tp, msgoff + i);
+			memcpy(data, &val, sizeof(u32));
+			data++;
+		}
+	}
+
+	return 0;
+}
+
+static int tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+	int err;
+	u32 apedata;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+	if (apedata != APE_SEG_SIG_MAGIC)
+		return -EAGAIN;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+	if (!(apedata & APE_FW_STATUS_READY))
+		return -EAGAIN;
+
+	/* Wait for up to 1 millisecond for APE to service previous event. */
+	err = tg3_ape_event_lock(tp, 1000);
+	if (err)
+		return err;
+
+	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+			event | APE_EVENT_STATUS_EVENT_PENDING);
+
+	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+
+	return 0;
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+	u32 event;
+	u32 apedata;
+
+	if (!tg3_flag(tp, ENABLE_APE))
+		return;
+
+	switch (kind) {
+	case RESET_KIND_INIT:
+		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+				APE_HOST_SEG_SIG_MAGIC);
+		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+				APE_HOST_SEG_LEN_MAGIC);
+		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
+		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+				APE_HOST_BEHAV_NO_PHYLOCK);
+		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+				    TG3_APE_HOST_DRVR_STATE_START);
+
+		event = APE_EVENT_STATUS_STATE_START;
+		break;
+	case RESET_KIND_SHUTDOWN:
+		/* With the interface we are currently using,
+		 * APE does not track driver state.  Wiping
+		 * out the HOST SEGMENT SIGNATURE forces
+		 * the APE to assume OS absent status.
+		 */
+		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
+		if (device_may_wakeup(&tp->pdev->dev) &&
+		    tg3_flag(tp, WOL_ENABLE)) {
+			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+					    TG3_APE_HOST_WOL_SPEED_AUTO);
+			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+		} else
+			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
+		event = APE_EVENT_STATUS_STATE_UNLOAD;
+		break;
+	default:
+		return;
+	}
+
+	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+	tg3_ape_send_event(tp, event);
+}
+
+static void tg3_disable_ints(struct tg3 *tp)
+{
+	int i;
+
+	tw32(TG3PCI_MISC_HOST_CTRL,
+	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
+	for (i = 0; i < tp->irq_max; i++)
+		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
+}
+
+static void tg3_enable_ints(struct tg3 *tp)
+{
+	int i;
+
+	tp->irq_sync = 0;
+	wmb();
+
+	tw32(TG3PCI_MISC_HOST_CTRL,
+	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+
+	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+		if (tg3_flag(tp, 1SHOT_MSI))
+			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+		tp->coal_now |= tnapi->coal_now;
+	}
+
+	/* Force an initial interrupt */
+	if (!tg3_flag(tp, TAGGED_STATUS) &&
+	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
+		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+	else
+		tw32(HOSTCC_MODE, tp->coal_now);
+
+	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
+}
+
+static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
+{
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+	unsigned int work_exists = 0;
+
+	/* check for phy events */
+	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+		if (sblk->status & SD_STATUS_LINK_CHG)
+			work_exists = 1;
+	}
+
+	/* check for TX work to do */
+	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
+		work_exists = 1;
+
+	/* check for RX work to do */
+	if (tnapi->rx_rcb_prod_idx &&
+	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+		work_exists = 1;
+
+	return work_exists;
+}
+
+/* tg3_int_reenable
+ *  similar to tg3_enable_ints, but it accurately determines whether there
+ *  is new work pending and can return without flushing the PIO write
+ *  which reenables interrupts
+ */
+static void tg3_int_reenable(struct tg3_napi *tnapi)
+{
+	struct tg3 *tp = tnapi->tp;
+
+	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+	mmiowb();
+
+	/* When doing tagged status, this work check is unnecessary.
+	 * The last_tag we write above tells the chip which piece of
+	 * work we've completed.
+	 */
+	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
+		tw32(HOSTCC_MODE, tp->coalesce_mode |
+		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
+}
+
+static void tg3_switch_clocks(struct tg3 *tp)
+{
+	u32 clock_ctrl;
+	u32 orig_clock_ctrl;
+
+	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
+		return;
+
+	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+
+	orig_clock_ctrl = clock_ctrl;
+	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
+		       CLOCK_CTRL_CLKRUN_OENABLE |
+		       0x1f);
+	tp->pci_clock_ctrl = clock_ctrl;
+
+	if (tg3_flag(tp, 5705_PLUS)) {
+		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
+			tw32_wait_f(TG3PCI_CLOCK_CTRL,
+				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
+		}
+	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
+		tw32_wait_f(TG3PCI_CLOCK_CTRL,
+			    clock_ctrl |
+			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
+			    40);
+		tw32_wait_f(TG3PCI_CLOCK_CTRL,
+			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
+			    40);
+	}
+	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
+}
+
+#define PHY_BUSY_LOOPS	5000
+
+static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
+			 u32 *val)
+{
+	u32 frame_val;
+	unsigned int loops;
+	int ret;
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE,
+		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+		udelay(80);
+	}
+
+	tg3_ape_lock(tp, tp->phy_ape_lock);
+
+	*val = 0x0;
+
+	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+		      MI_COM_PHY_ADDR_MASK);
+	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+		      MI_COM_REG_ADDR_MASK);
+	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
+
+	tw32_f(MAC_MI_COM, frame_val);
+
+	loops = PHY_BUSY_LOOPS;
+	while (loops != 0) {
+		udelay(10);
+		frame_val = tr32(MAC_MI_COM);
+
+		if ((frame_val & MI_COM_BUSY) == 0) {
+			udelay(5);
+			frame_val = tr32(MAC_MI_COM);
+			break;
+		}
+		loops -= 1;
+	}
+
+	ret = -EBUSY;
+	if (loops != 0) {
+		*val = frame_val & MI_COM_DATA_MASK;
+		ret = 0;
+	}
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE, tp->mi_mode);
+		udelay(80);
+	}
+
+	tg3_ape_unlock(tp, tp->phy_ape_lock);
+
+	return ret;
+}
+
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+	return __tg3_readphy(tp, tp->phy_addr, reg, val);
+}
+
+static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
+			  u32 val)
+{
+	u32 frame_val;
+	unsigned int loops;
+	int ret;
+
+	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
+		return 0;
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE,
+		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+		udelay(80);
+	}
+
+	tg3_ape_lock(tp, tp->phy_ape_lock);
+
+	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+		      MI_COM_PHY_ADDR_MASK);
+	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+		      MI_COM_REG_ADDR_MASK);
+	frame_val |= (val & MI_COM_DATA_MASK);
+	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
+
+	tw32_f(MAC_MI_COM, frame_val);
+
+	loops = PHY_BUSY_LOOPS;
+	while (loops != 0) {
+		udelay(10);
+		frame_val = tr32(MAC_MI_COM);
+		if ((frame_val & MI_COM_BUSY) == 0) {
+			udelay(5);
+			frame_val = tr32(MAC_MI_COM);
+			break;
+		}
+		loops -= 1;
+	}
+
+	ret = -EBUSY;
+	if (loops != 0)
+		ret = 0;
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE, tp->mi_mode);
+		udelay(80);
+	}
+
+	tg3_ape_unlock(tp, tp->phy_ape_lock);
+
+	return ret;
+}
+
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+	return __tg3_writephy(tp, tp->phy_addr, reg, val);
+}
+
+static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+	return err;
+}
+
+static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+	if (err)
+		goto done;
+
+	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+	return err;
+}
+
+static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+	if (!err)
+		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
+
+	return err;
+}
+
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+	if (!err)
+		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+	return err;
+}
+
+static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
+			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
+			   MII_TG3_AUXCTL_SHDWSEL_MISC);
+	if (!err)
+		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
+
+	return err;
+}
+
+static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+{
+	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
+		set |= MII_TG3_AUXCTL_MISC_WREN;
+
+	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+}
+
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+	u32 val;
+	int err;
+
+	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+	if (err)
+		return err;
+
+	if (enable)
+		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+	else
+		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+	return err;
+}
+
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+			    reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
+static int tg3_bmcr_reset(struct tg3 *tp)
+{
+	u32 phy_control;
+	int limit, err;
+
+	/* OK, reset it, and poll the BMCR_RESET bit until it
+	 * clears or we time out.
+	 */
+	phy_control = BMCR_RESET;
+	err = tg3_writephy(tp, MII_BMCR, phy_control);
+	if (err != 0)
+		return -EBUSY;
+
+	limit = 5000;
+	while (limit--) {
+		err = tg3_readphy(tp, MII_BMCR, &phy_control);
+		if (err != 0)
+			return -EBUSY;
+
+		if ((phy_control & BMCR_RESET) == 0) {
+			udelay(40);
+			break;
+		}
+		udelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
+{
+	struct tg3 *tp = bp->priv;
+	u32 val;
+
+	spin_lock_bh(&tp->lock);
+
+	if (__tg3_readphy(tp, mii_id, reg, &val))
+		val = -EIO;
+
+	spin_unlock_bh(&tp->lock);
+
+	return val;
+}
+
+static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
+{
+	struct tg3 *tp = bp->priv;
+	u32 ret = 0;
+
+	spin_lock_bh(&tp->lock);
+
+	if (__tg3_writephy(tp, mii_id, reg, val))
+		ret = -EIO;
+
+	spin_unlock_bh(&tp->lock);
+
+	return ret;
+}
+
+static void tg3_mdio_config_5785(struct tg3 *tp)
+{
+	u32 val;
+	struct phy_device *phydev;
+
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+	case PHY_ID_BCM50610:
+	case PHY_ID_BCM50610M:
+		val = MAC_PHYCFG2_50610_LED_MODES;
+		break;
+	case PHY_ID_BCMAC131:
+		val = MAC_PHYCFG2_AC131_LED_MODES;
+		break;
+	case PHY_ID_RTL8211C:
+		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
+		break;
+	case PHY_ID_RTL8201E:
+		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
+		break;
+	default:
+		return;
+	}
+
+	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
+		tw32(MAC_PHYCFG2, val);
+
+		val = tr32(MAC_PHYCFG1);
+		val &= ~(MAC_PHYCFG1_RGMII_INT |
+			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
+		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
+		tw32(MAC_PHYCFG1, val);
+
+		return;
+	}
+
+	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
+		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
+		       MAC_PHYCFG2_FMODE_MASK_MASK |
+		       MAC_PHYCFG2_GMODE_MASK_MASK |
+		       MAC_PHYCFG2_ACT_MASK_MASK   |
+		       MAC_PHYCFG2_QUAL_MASK_MASK |
+		       MAC_PHYCFG2_INBAND_ENABLE;
+
+	tw32(MAC_PHYCFG2, val);
+
+	val = tr32(MAC_PHYCFG1);
+	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
+		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
+	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
+		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
+	}
+	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
+	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
+	tw32(MAC_PHYCFG1, val);
+
+	val = tr32(MAC_EXT_RGMII_MODE);
+	val &= ~(MAC_RGMII_MODE_RX_INT_B |
+		 MAC_RGMII_MODE_RX_QUALITY |
+		 MAC_RGMII_MODE_RX_ACTIVITY |
+		 MAC_RGMII_MODE_RX_ENG_DET |
+		 MAC_RGMII_MODE_TX_ENABLE |
+		 MAC_RGMII_MODE_TX_LOWPWR |
+		 MAC_RGMII_MODE_TX_RESET);
+	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+			val |= MAC_RGMII_MODE_RX_INT_B |
+			       MAC_RGMII_MODE_RX_QUALITY |
+			       MAC_RGMII_MODE_RX_ACTIVITY |
+			       MAC_RGMII_MODE_RX_ENG_DET;
+		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+			val |= MAC_RGMII_MODE_TX_ENABLE |
+			       MAC_RGMII_MODE_TX_LOWPWR |
+			       MAC_RGMII_MODE_TX_RESET;
+	}
+	tw32(MAC_EXT_RGMII_MODE, val);
+}
+
+static void tg3_mdio_start(struct tg3 *tp)
+{
+	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
+	tw32_f(MAC_MI_MODE, tp->mi_mode);
+	udelay(80);
+
+	if (tg3_flag(tp, MDIOBUS_INITED) &&
+	    tg3_asic_rev(tp) == ASIC_REV_5785)
+		tg3_mdio_config_5785(tp);
+}
+
+static int tg3_mdio_init(struct tg3 *tp)
+{
+	int i;
+	u32 reg;
+	struct phy_device *phydev;
+
+	if (tg3_flag(tp, 5717_PLUS)) {
+		u32 is_serdes;
+
+		tp->phy_addr = tp->pci_fn + 1;
+
+		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
+			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+		else
+			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+				    TG3_CPMU_PHY_STRAP_IS_SERDES;
+		if (is_serdes)
+			tp->phy_addr += 7;
+	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+		int addr;
+
+		addr = ssb_gige_get_phyaddr(tp->pdev);
+		if (addr < 0)
+			return addr;
+		tp->phy_addr = addr;
+	} else
+		tp->phy_addr = TG3_PHY_MII_ADDR;
+
+	tg3_mdio_start(tp);
+
+	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
+		return 0;
+
+	tp->mdio_bus = mdiobus_alloc();
+	if (tp->mdio_bus == NULL)
+		return -ENOMEM;
+
+	tp->mdio_bus->name     = "tg3 mdio bus";
+	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
+		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+	tp->mdio_bus->priv     = tp;
+	tp->mdio_bus->parent   = &tp->pdev->dev;
+	tp->mdio_bus->read     = &tg3_mdio_read;
+	tp->mdio_bus->write    = &tg3_mdio_write;
+	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
+	tp->mdio_bus->irq      = &tp->mdio_irq[0];
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		tp->mdio_bus->irq[i] = PHY_POLL;
+
+	/* The bus registration will look for all the PHYs on the mdio bus.
+	 * Unfortunately, it does not ensure the PHY is powered up before
+	 * accessing the PHY ID registers.  A chip reset is the
+	 * quickest way to bring the device back to an operational state..
+	 */
+	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
+		tg3_bmcr_reset(tp);
+
+	i = mdiobus_register(tp->mdio_bus);
+	if (i) {
+		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
+		mdiobus_free(tp->mdio_bus);
+		return i;
+	}
+
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+
+	if (!phydev || !phydev->drv) {
+		dev_warn(&tp->pdev->dev, "No PHY devices\n");
+		mdiobus_unregister(tp->mdio_bus);
+		mdiobus_free(tp->mdio_bus);
+		return -ENODEV;
+	}
+
+	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+	case PHY_ID_BCM57780:
+		phydev->interface = PHY_INTERFACE_MODE_GMII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+		break;
+	case PHY_ID_BCM50610:
+	case PHY_ID_BCM50610M:
+		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
+				     PHY_BRCM_RX_REFCLK_UNUSED |
+				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
+				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
+		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
+			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
+		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
+		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
+		/* fallthru */
+	case PHY_ID_RTL8211C:
+		phydev->interface = PHY_INTERFACE_MODE_RGMII;
+		break;
+	case PHY_ID_RTL8201E:
+	case PHY_ID_BCMAC131:
+		phydev->interface = PHY_INTERFACE_MODE_MII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+		tp->phy_flags |= TG3_PHYFLG_IS_FET;
+		break;
+	}
+
+	tg3_flag_set(tp, MDIOBUS_INITED);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5785)
+		tg3_mdio_config_5785(tp);
+
+	return 0;
+}
+
+static void tg3_mdio_fini(struct tg3 *tp)
+{
+	if (tg3_flag(tp, MDIOBUS_INITED)) {
+		tg3_flag_clear(tp, MDIOBUS_INITED);
+		mdiobus_unregister(tp->mdio_bus);
+		mdiobus_free(tp->mdio_bus);
+	}
+}
+
+/* tp->lock is held. */
+static inline void tg3_generate_fw_event(struct tg3 *tp)
+{
+	u32 val;
+
+	val = tr32(GRC_RX_CPU_EVENT);
+	val |= GRC_RX_CPU_DRIVER_EVENT;
+	tw32_f(GRC_RX_CPU_EVENT, val);
+
+	tp->last_event_jiffies = jiffies;
+}
+
+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
+
+/* tp->lock is held. */
+static void tg3_wait_for_event_ack(struct tg3 *tp)
+{
+	int i;
+	unsigned int delay_cnt;
+	long time_remain;
+
+	/* If enough time has passed, no wait is necessary. */
+	time_remain = (long)(tp->last_event_jiffies + 1 +
+		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
+		      (long)jiffies;
+	if (time_remain < 0)
+		return;
+
+	/* Check if we can shorten the wait time. */
+	delay_cnt = jiffies_to_usecs(time_remain);
+	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
+		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
+	delay_cnt = (delay_cnt >> 3) + 1;
+
+	for (i = 0; i < delay_cnt; i++) {
+		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
+			break;
+		if (pci_channel_offline(tp->pdev))
+			break;
+
+		udelay(8);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
+{
+	u32 reg, val;
+
+	val = 0;
+	if (!tg3_readphy(tp, MII_BMCR, &reg))
+		val = reg << 16;
+	if (!tg3_readphy(tp, MII_BMSR, &reg))
+		val |= (reg & 0xffff);
+	*data++ = val;
+
+	val = 0;
+	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
+		val = reg << 16;
+	if (!tg3_readphy(tp, MII_LPA, &reg))
+		val |= (reg & 0xffff);
+	*data++ = val;
+
+	val = 0;
+	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
+		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
+			val = reg << 16;
+		if (!tg3_readphy(tp, MII_STAT1000, &reg))
+			val |= (reg & 0xffff);
+	}
+	*data++ = val;
+
+	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
+		val = reg << 16;
+	else
+		val = 0;
+	*data++ = val;
+}
+
+/* tp->lock is held. */
+static void tg3_ump_link_report(struct tg3 *tp)
+{
+	u32 data[4];
+
+	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
+		return;
+
+	tg3_phy_gather_ump_data(tp, data);
+
+	tg3_wait_for_event_ack(tp);
+
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
+
+	tg3_generate_fw_event(tp);
+}
+
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+		/* Wait for RX cpu to ACK the previous event. */
+		tg3_wait_for_event_ack(tp);
+
+		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+
+		tg3_generate_fw_event(tp);
+
+		/* Wait for RX cpu to ACK this event. */
+		tg3_wait_for_event_ack(tp);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+		switch (kind) {
+		case RESET_KIND_INIT:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_START);
+			break;
+
+		case RESET_KIND_SHUTDOWN:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_UNLOAD);
+			break;
+
+		case RESET_KIND_SUSPEND:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_SUSPEND);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+		switch (kind) {
+		case RESET_KIND_INIT:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_START_DONE);
+			break;
+
+		case RESET_KIND_SHUTDOWN:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_UNLOAD_DONE);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+	if (tg3_flag(tp, ENABLE_ASF)) {
+		switch (kind) {
+		case RESET_KIND_INIT:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_START);
+			break;
+
+		case RESET_KIND_SHUTDOWN:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_UNLOAD);
+			break;
+
+		case RESET_KIND_SUSPEND:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_SUSPEND);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+static int tg3_poll_fw(struct tg3 *tp)
+{
+	int i;
+	u32 val;
+
+	if (tg3_flag(tp, NO_FWARE_REPORTED))
+		return 0;
+
+	if (tg3_flag(tp, IS_SSB_CORE)) {
+		/* We don't use firmware. */
+		return 0;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		/* Wait up to 20ms for init done. */
+		for (i = 0; i < 200; i++) {
+			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+				return 0;
+			if (pci_channel_offline(tp->pdev))
+				return -ENODEV;
+
+			udelay(100);
+		}
+		return -ENODEV;
+	}
+
+	/* Wait for firmware initialization to complete. */
+	for (i = 0; i < 100000; i++) {
+		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+			break;
+		if (pci_channel_offline(tp->pdev)) {
+			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
+				tg3_flag_set(tp, NO_FWARE_REPORTED);
+				netdev_info(tp->dev, "No firmware running\n");
+			}
+
+			break;
+		}
+
+		udelay(10);
+	}
+
+	/* Chip might not be fitted with firmware.  Some Sun onboard
+	 * parts are configured like that.  So don't signal the timeout
+	 * of the above loop as an error, but do report the lack of
+	 * running firmware once.
+	 */
+	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+		tg3_flag_set(tp, NO_FWARE_REPORTED);
+
+		netdev_info(tp->dev, "No firmware running\n");
+	}
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
+		/* The 57765 A0 needs a little more
+		 * time to do some important work.
+		 */
+		mdelay(10);
+	}
+
+	return 0;
+}
+
+static void tg3_link_report(struct tg3 *tp)
+{
+	if (!netif_carrier_ok(tp->dev)) {
+		netif_info(tp, link, tp->dev, "Link is down\n");
+		tg3_ump_link_report(tp);
+	} else if (netif_msg_link(tp)) {
+		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+			    (tp->link_config.active_speed == SPEED_1000 ?
+			     1000 :
+			     (tp->link_config.active_speed == SPEED_100 ?
+			      100 : 10)),
+			    (tp->link_config.active_duplex == DUPLEX_FULL ?
+			     "full" : "half"));
+
+		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+			    "on" : "off",
+			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+			    "on" : "off");
+
+		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+			netdev_info(tp->dev, "EEE is %s\n",
+				    tp->setlpicnt ? "enabled" : "disabled");
+
+		tg3_ump_link_report(tp);
+	}
+
+	tp->link_up = netif_carrier_ok(tp->dev);
+}
+
+static u32 tg3_decode_flowctrl_1000T(u32 adv)
+{
+	u32 flowctrl = 0;
+
+	if (adv & ADVERTISE_PAUSE_CAP) {
+		flowctrl |= FLOW_CTRL_RX;
+		if (!(adv & ADVERTISE_PAUSE_ASYM))
+			flowctrl |= FLOW_CTRL_TX;
+	} else if (adv & ADVERTISE_PAUSE_ASYM)
+		flowctrl |= FLOW_CTRL_TX;
+
+	return flowctrl;
+}
+
+static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
+{
+	u16 miireg;
+
+	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+		miireg = ADVERTISE_1000XPAUSE;
+	else if (flow_ctrl & FLOW_CTRL_TX)
+		miireg = ADVERTISE_1000XPSE_ASYM;
+	else if (flow_ctrl & FLOW_CTRL_RX)
+		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+	else
+		miireg = 0;
+
+	return miireg;
+}
+
+static u32 tg3_decode_flowctrl_1000X(u32 adv)
+{
+	u32 flowctrl = 0;
+
+	if (adv & ADVERTISE_1000XPAUSE) {
+		flowctrl |= FLOW_CTRL_RX;
+		if (!(adv & ADVERTISE_1000XPSE_ASYM))
+			flowctrl |= FLOW_CTRL_TX;
+	} else if (adv & ADVERTISE_1000XPSE_ASYM)
+		flowctrl |= FLOW_CTRL_TX;
+
+	return flowctrl;
+}
+
+static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
+{
+	u8 cap = 0;
+
+	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+		if (lcladv & ADVERTISE_1000XPAUSE)
+			cap = FLOW_CTRL_RX;
+		if (rmtadv & ADVERTISE_1000XPAUSE)
+			cap = FLOW_CTRL_TX;
+	}
+
+	return cap;
+}
+
+static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
+{
+	u8 autoneg;
+	u8 flowctrl = 0;
+	u32 old_rx_mode = tp->rx_mode;
+	u32 old_tx_mode = tp->tx_mode;
+
+	if (tg3_flag(tp, USE_PHYLIB))
+		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
+	else
+		autoneg = tp->link_config.autoneg;
+
+	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
+		else
+			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+	} else
+		flowctrl = tp->link_config.flowctrl;
+
+	tp->link_config.active_flowctrl = flowctrl;
+
+	if (flowctrl & FLOW_CTRL_RX)
+		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
+	else
+		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
+
+	if (old_rx_mode != tp->rx_mode)
+		tw32_f(MAC_RX_MODE, tp->rx_mode);
+
+	if (flowctrl & FLOW_CTRL_TX)
+		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+	else
+		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
+
+	if (old_tx_mode != tp->tx_mode)
+		tw32_f(MAC_TX_MODE, tp->tx_mode);
+}
+
+static void tg3_adjust_link(struct net_device *dev)
+{
+	u8 oldflowctrl, linkmesg = 0;
+	u32 mac_mode, lcl_adv, rmt_adv;
+	struct tg3 *tp = netdev_priv(dev);
+	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+
+	spin_lock_bh(&tp->lock);
+
+	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
+				    MAC_MODE_HALF_DUPLEX);
+
+	oldflowctrl = tp->link_config.active_flowctrl;
+
+	if (phydev->link) {
+		lcl_adv = 0;
+		rmt_adv = 0;
+
+		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+		else if (phydev->speed == SPEED_1000 ||
+			 tg3_asic_rev(tp) != ASIC_REV_5785)
+			mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		else
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+		if (phydev->duplex == DUPLEX_HALF)
+			mac_mode |= MAC_MODE_HALF_DUPLEX;
+		else {
+			lcl_adv = mii_advertise_flowctrl(
+				  tp->link_config.flowctrl);
+
+			if (phydev->pause)
+				rmt_adv = LPA_PAUSE_CAP;
+			if (phydev->asym_pause)
+				rmt_adv |= LPA_PAUSE_ASYM;
+		}
+
+		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+	} else
+		mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+	if (mac_mode != tp->mac_mode) {
+		tp->mac_mode = mac_mode;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
+		if (phydev->speed == SPEED_10)
+			tw32(MAC_MI_STAT,
+			     MAC_MI_STAT_10MBPS_MODE |
+			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+		else
+			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+	}
+
+	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
+		tw32(MAC_TX_LENGTHS,
+		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+		      (6 << TX_LENGTHS_IPG_SHIFT) |
+		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+	else
+		tw32(MAC_TX_LENGTHS,
+		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+		      (6 << TX_LENGTHS_IPG_SHIFT) |
+		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+
+	if (phydev->link != tp->old_link ||
+	    phydev->speed != tp->link_config.active_speed ||
+	    phydev->duplex != tp->link_config.active_duplex ||
+	    oldflowctrl != tp->link_config.active_flowctrl)
+		linkmesg = 1;
+
+	tp->old_link = phydev->link;
+	tp->link_config.active_speed = phydev->speed;
+	tp->link_config.active_duplex = phydev->duplex;
+
+	spin_unlock_bh(&tp->lock);
+
+	if (linkmesg)
+		tg3_link_report(tp);
+}
+
+static int tg3_phy_init(struct tg3 *tp)
+{
+	struct phy_device *phydev;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
+		return 0;
+
+	/* Bring the PHY back to a known state. */
+	tg3_bmcr_reset(tp);
+
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+
+	/* Attach the MAC to the PHY. */
+	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
+			     tg3_adjust_link, phydev->interface);
+	if (IS_ERR(phydev)) {
+		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
+		return PTR_ERR(phydev);
+	}
+
+	/* Mask with MAC supported features. */
+	switch (phydev->interface) {
+	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_RGMII:
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+			phydev->supported &= (PHY_GBIT_FEATURES |
+					      SUPPORTED_Pause |
+					      SUPPORTED_Asym_Pause);
+			break;
+		}
+		/* fallthru */
+	case PHY_INTERFACE_MODE_MII:
+		phydev->supported &= (PHY_BASIC_FEATURES |
+				      SUPPORTED_Pause |
+				      SUPPORTED_Asym_Pause);
+		break;
+	default:
+		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
+		return -EINVAL;
+	}
+
+	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
+
+	phydev->advertising = phydev->supported;
+
+	return 0;
+}
+
+static void tg3_phy_start(struct tg3 *tp)
+{
+	struct phy_device *phydev;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+		return;
+
+	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+		phydev->speed = tp->link_config.speed;
+		phydev->duplex = tp->link_config.duplex;
+		phydev->autoneg = tp->link_config.autoneg;
+		phydev->advertising = tp->link_config.advertising;
+	}
+
+	phy_start(phydev);
+
+	phy_start_aneg(phydev);
+}
+
+static void tg3_phy_stop(struct tg3 *tp)
+{
+	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+		return;
+
+	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
+}
+
+static void tg3_phy_fini(struct tg3 *tp)
+{
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
+		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
+	}
+}
+
+static int tg3_phy_set_extloopbk(struct tg3 *tp)
+{
+	int err;
+	u32 val;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+		return 0;
+
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		/* Cannot do read-modify-write on 5401 */
+		err = tg3_phy_auxctl_write(tp,
+					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
+					   0x4c20);
+		goto done;
+	}
+
+	err = tg3_phy_auxctl_read(tp,
+				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+	if (err)
+		return err;
+
+	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
+	err = tg3_phy_auxctl_write(tp,
+				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
+
+done:
+	return err;
+}
+
+static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
+{
+	u32 phytest;
+
+	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+		u32 phy;
+
+		tg3_writephy(tp, MII_TG3_FET_TEST,
+			     phytest | MII_TG3_FET_SHADOW_EN);
+		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
+			if (enable)
+				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
+			else
+				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
+			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
+		}
+		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+	}
+}
+
+static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
+{
+	u32 reg;
+
+	if (!tg3_flag(tp, 5705_PLUS) ||
+	    (tg3_flag(tp, 5717_PLUS) &&
+	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+		return;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+		tg3_phy_fet_toggle_apd(tp, enable);
+		return;
+	}
+
+	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
+	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
+	      MII_TG3_MISC_SHDW_SCR5_SDTL |
+	      MII_TG3_MISC_SHDW_SCR5_C125OE;
+	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
+		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
+
+	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
+
+
+	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+	if (enable)
+		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
+
+	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
+}
+
+static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
+{
+	u32 phy;
+
+	if (!tg3_flag(tp, 5705_PLUS) ||
+	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		return;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+		u32 ephy;
+
+		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
+			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
+
+			tg3_writephy(tp, MII_TG3_FET_TEST,
+				     ephy | MII_TG3_FET_SHADOW_EN);
+			if (!tg3_readphy(tp, reg, &phy)) {
+				if (enable)
+					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+				else
+					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+				tg3_writephy(tp, reg, phy);
+			}
+			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
+		}
+	} else {
+		int ret;
+
+		ret = tg3_phy_auxctl_read(tp,
+					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
+		if (!ret) {
+			if (enable)
+				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+			else
+				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+			tg3_phy_auxctl_write(tp,
+					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
+		}
+	}
+}
+
+static void tg3_phy_set_wirespeed(struct tg3 *tp)
+{
+	int ret;
+	u32 val;
+
+	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
+		return;
+
+	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
+	if (!ret)
+		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
+				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
+}
+
+static void tg3_phy_apply_otp(struct tg3 *tp)
+{
+	u32 otp, phy;
+
+	if (!tp->phy_otp)
+		return;
+
+	otp = tp->phy_otp;
+
+	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
+		return;
+
+	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
+	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
+
+	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
+	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
+
+	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
+	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
+	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
+
+	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
+
+	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
+
+	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
+	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+	tg3_phy_toggle_auxctl_smdsp(tp, false);
+}
+
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+{
+	u32 val;
+	struct ethtool_eee *dest = &tp->eee;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+		return;
+
+	if (eee)
+		dest = eee;
+
+	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
+		return;
+
+	/* Pull eee_active */
+	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
+	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
+		dest->eee_active = 1;
+	} else
+		dest->eee_active = 0;
+
+	/* Pull lp advertised settings */
+	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
+		return;
+	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+
+	/* Pull advertised and eee_enabled settings */
+	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
+		return;
+	dest->eee_enabled = !!val;
+	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+
+	/* Pull tx_lpi_enabled */
+	val = tr32(TG3_CPMU_EEE_MODE);
+	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
+
+	/* Pull lpi timer value */
+	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
+}
+
+static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
+{
+	u32 val;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+		return;
+
+	tp->setlpicnt = 0;
+
+	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+	    current_link_up &&
+	    tp->link_config.active_duplex == DUPLEX_FULL &&
+	    (tp->link_config.active_speed == SPEED_100 ||
+	     tp->link_config.active_speed == SPEED_1000)) {
+		u32 eeectl;
+
+		if (tp->link_config.active_speed == SPEED_1000)
+			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
+		else
+			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
+
+		tw32(TG3_CPMU_EEE_CTRL, eeectl);
+
+		tg3_eee_pull_config(tp, NULL);
+		if (tp->eee.eee_active)
+			tp->setlpicnt = 2;
+	}
+
+	if (!tp->setlpicnt) {
+		if (current_link_up &&
+		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+			tg3_phy_toggle_auxctl_smdsp(tp, false);
+		}
+
+		val = tr32(TG3_CPMU_EEE_MODE);
+		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+	}
+}
+
+static void tg3_phy_eee_enable(struct tg3 *tp)
+{
+	u32 val;
+
+	if (tp->link_config.active_speed == SPEED_1000 &&
+	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	     tg3_flag(tp, 57765_CLASS)) &&
+	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+		val = MII_TG3_DSP_TAP26_ALNOKO |
+		      MII_TG3_DSP_TAP26_RMRXSTO;
+		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+		tg3_phy_toggle_auxctl_smdsp(tp, false);
+	}
+
+	val = tr32(TG3_CPMU_EEE_MODE);
+	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
+}
+
+static int tg3_wait_macro_done(struct tg3 *tp)
+{
+	int limit = 100;
+
+	while (limit--) {
+		u32 tmp32;
+
+		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
+			if ((tmp32 & 0x1000) == 0)
+				break;
+		}
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
+{
+	static const u32 test_pat[4][6] = {
+	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
+	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
+	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
+	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
+	};
+	int chan;
+
+	for (chan = 0; chan < 4; chan++) {
+		int i;
+
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+			     (chan * 0x2000) | 0x0200);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+
+		for (i = 0; i < 6; i++)
+			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
+				     test_pat[chan][i]);
+
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+		if (tg3_wait_macro_done(tp)) {
+			*resetp = 1;
+			return -EBUSY;
+		}
+
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+			     (chan * 0x2000) | 0x0200);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
+		if (tg3_wait_macro_done(tp)) {
+			*resetp = 1;
+			return -EBUSY;
+		}
+
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
+		if (tg3_wait_macro_done(tp)) {
+			*resetp = 1;
+			return -EBUSY;
+		}
+
+		for (i = 0; i < 6; i += 2) {
+			u32 low, high;
+
+			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
+			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
+			    tg3_wait_macro_done(tp)) {
+				*resetp = 1;
+				return -EBUSY;
+			}
+			low &= 0x7fff;
+			high &= 0x000f;
+			if (low != test_pat[chan][i] ||
+			    high != test_pat[chan][i+1]) {
+				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
+
+				return -EBUSY;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tg3_phy_reset_chanpat(struct tg3 *tp)
+{
+	int chan;
+
+	for (chan = 0; chan < 4; chan++) {
+		int i;
+
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+			     (chan * 0x2000) | 0x0200);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+		for (i = 0; i < 6; i++)
+			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+		if (tg3_wait_macro_done(tp))
+			return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+{
+	u32 reg32, phy9_orig;
+	int retries, do_phy_reset, err;
+
+	retries = 10;
+	do_phy_reset = 1;
+	do {
+		if (do_phy_reset) {
+			err = tg3_bmcr_reset(tp);
+			if (err)
+				return err;
+			do_phy_reset = 0;
+		}
+
+		/* Disable transmitter and interrupt.  */
+		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
+			continue;
+
+		reg32 |= 0x3000;
+		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+		/* Set full-duplex, 1000 mbps.  */
+		tg3_writephy(tp, MII_BMCR,
+			     BMCR_FULLDPLX | BMCR_SPEED1000);
+
+		/* Set to master mode.  */
+		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
+			continue;
+
+		tg3_writephy(tp, MII_CTRL1000,
+			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+		if (err)
+			return err;
+
+		/* Block the PHY control access.  */
+		tg3_phydsp_write(tp, 0x8005, 0x0800);
+
+		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
+		if (!err)
+			break;
+	} while (--retries);
+
+	err = tg3_phy_reset_chanpat(tp);
+	if (err)
+		return err;
+
+	tg3_phydsp_write(tp, 0x8005, 0x0000);
+
+	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+	tg3_phy_toggle_auxctl_smdsp(tp, false);
+
+	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+	if (err)
+		return err;
+
+	reg32 &= ~0x3000;
+	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+	return 0;
+}
+
+static void tg3_carrier_off(struct tg3 *tp)
+{
+	netif_carrier_off(tp->dev);
+	tp->link_up = false;
+}
+
+static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
+{
+	if (tg3_flag(tp, ENABLE_ASF))
+		netdev_warn(tp->dev,
+			    "Management side-band traffic will be interrupted during phy settings change\n");
+}
+
+/* This will reset the tigon3 PHY if there is no valid
+ * link unless the FORCE argument is non-zero.
+ */
+static int tg3_phy_reset(struct tg3 *tp)
+{
+	u32 val, cpmuctrl;
+	int err;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		val = tr32(GRC_MISC_CFG);
+		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
+		udelay(40);
+	}
+	err  = tg3_readphy(tp, MII_BMSR, &val);
+	err |= tg3_readphy(tp, MII_BMSR, &val);
+	if (err != 0)
+		return -EBUSY;
+
+	if (netif_running(tp->dev) && tp->link_up) {
+		netif_carrier_off(tp->dev);
+		tg3_link_report(tp);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5705) {
+		err = tg3_phy_reset_5703_4_5(tp);
+		if (err)
+			return err;
+		goto out;
+	}
+
+	cpmuctrl = 0;
+	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
+		cpmuctrl = tr32(TG3_CPMU_CTRL);
+		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
+			tw32(TG3_CPMU_CTRL,
+			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
+	}
+
+	err = tg3_bmcr_reset(tp);
+	if (err)
+		return err;
+
+	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
+		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
+
+		tw32(TG3_CPMU_CTRL, cpmuctrl);
+	}
+
+	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
+		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
+		    CPMU_LSPD_1000MB_MACCLK_12_5) {
+			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+			udelay(40);
+			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+		}
+	}
+
+	if (tg3_flag(tp, 5717_PLUS) &&
+	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
+		return 0;
+
+	tg3_phy_apply_otp(tp);
+
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+		tg3_phy_toggle_apd(tp, true);
+	else
+		tg3_phy_toggle_apd(tp, false);
+
+out:
+	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+		tg3_phydsp_write(tp, 0x000a, 0x0323);
+		tg3_phy_toggle_auxctl_smdsp(tp, false);
+	}
+
+	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+	}
+
+	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+			tg3_phydsp_write(tp, 0x000a, 0x310b);
+			tg3_phydsp_write(tp, 0x201f, 0x9506);
+			tg3_phydsp_write(tp, 0x401f, 0x14e2);
+			tg3_phy_toggle_auxctl_smdsp(tp, false);
+		}
+	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+				tg3_writephy(tp, MII_TG3_TEST1,
+					     MII_TG3_TEST1_TRIM_EN | 0x4);
+			} else
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+			tg3_phy_toggle_auxctl_smdsp(tp, false);
+		}
+	}
+
+	/* Set Extended packet length bit (bit 14) on all chips that */
+	/* support jumbo frames */
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		/* Cannot do read-modify-write on 5401 */
+		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
+		/* Set bit 14 with read-modify-write to preserve other bits */
+		err = tg3_phy_auxctl_read(tp,
+					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+		if (!err)
+			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
+	}
+
+	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
+	 * jumbo frames transmission.
+	 */
+	if (tg3_flag(tp, JUMBO_CAPABLE)) {
+		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		/* adjust output voltage */
+		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
+	}
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
+		tg3_phydsp_write(tp, 0xffb, 0x4000);
+
+	tg3_phy_toggle_automdix(tp, true);
+	tg3_phy_set_wirespeed(tp);
+	return 0;
+}
+
+#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
+#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
+#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
+					  TG3_GPIO_MSG_NEED_VAUX)
+#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
+	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
+	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
+	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
+	 (TG3_GPIO_MSG_DRVR_PRES << 12))
+
+#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
+	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
+	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
+	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
+	 (TG3_GPIO_MSG_NEED_VAUX << 12))
+
+static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
+{
+	u32 status, shift;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719)
+		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
+	else
+		status = tr32(TG3_CPMU_DRV_STATUS);
+
+	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
+	status &= ~(TG3_GPIO_MSG_MASK << shift);
+	status |= (newstat << shift);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719)
+		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
+	else
+		tw32(TG3_CPMU_DRV_STATUS, status);
+
+	return status >> TG3_APE_GPIO_MSG_SHIFT;
+}
+
+static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, IS_NIC))
+		return 0;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720) {
+		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+			return -EIO;
+
+		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
+
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+	} else {
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	}
+
+	return 0;
+}
+
+static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
+{
+	u32 grc_local_ctrl;
+
+	if (!tg3_flag(tp, IS_NIC) ||
+	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5701)
+		return;
+
+	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+}
+
+static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, IS_NIC))
+		return;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5701) {
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+			    (GRC_LCLCTRL_GPIO_OE0 |
+			     GRC_LCLCTRL_GPIO_OE1 |
+			     GRC_LCLCTRL_GPIO_OE2 |
+			     GRC_LCLCTRL_GPIO_OUTPUT0 |
+			     GRC_LCLCTRL_GPIO_OUTPUT1),
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+				     GRC_LCLCTRL_GPIO_OE1 |
+				     GRC_LCLCTRL_GPIO_OE2 |
+				     GRC_LCLCTRL_GPIO_OUTPUT0 |
+				     GRC_LCLCTRL_GPIO_OUTPUT1 |
+				     tp->grc_local_ctrl;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	} else {
+		u32 no_gpio2;
+		u32 grc_local_ctrl = 0;
+
+		/* Workaround to prevent overdrawing Amps. */
+		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+				    grc_local_ctrl,
+				    TG3_GRC_LCLCTL_PWRSW_DELAY);
+		}
+
+		/* On 5753 and variants, GPIO2 cannot be used. */
+		no_gpio2 = tp->nic_sram_data_cfg &
+			   NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+				  GRC_LCLCTRL_GPIO_OE1 |
+				  GRC_LCLCTRL_GPIO_OE2 |
+				  GRC_LCLCTRL_GPIO_OUTPUT1 |
+				  GRC_LCLCTRL_GPIO_OUTPUT2;
+		if (no_gpio2) {
+			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+					    GRC_LCLCTRL_GPIO_OUTPUT2);
+		}
+		tw32_wait_f(GRC_LOCAL_CTRL,
+			    tp->grc_local_ctrl | grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+		tw32_wait_f(GRC_LOCAL_CTRL,
+			    tp->grc_local_ctrl | grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		if (!no_gpio2) {
+			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+			tw32_wait_f(GRC_LOCAL_CTRL,
+				    tp->grc_local_ctrl | grc_local_ctrl,
+				    TG3_GRC_LCLCTL_PWRSW_DELAY);
+		}
+	}
+}
+
+static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
+{
+	u32 msg = 0;
+
+	/* Serialize power state transitions */
+	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+		return;
+
+	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
+		msg = TG3_GPIO_MSG_NEED_VAUX;
+
+	msg = tg3_set_function_status(tp, msg);
+
+	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
+		goto done;
+
+	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
+		tg3_pwrsrc_switch_to_vaux(tp);
+	else
+		tg3_pwrsrc_die_with_vmain(tp);
+
+done:
+	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+}
+
+static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
+{
+	bool need_vaux = false;
+
+	/* The GPIOs do something completely different on 57765. */
+	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
+		return;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720) {
+		tg3_frob_aux_power_5717(tp, include_wol ?
+					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
+		return;
+	}
+
+	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
+		struct net_device *dev_peer;
+
+		dev_peer = pci_get_drvdata(tp->pdev_peer);
+
+		/* remove_one() may have been run on the peer. */
+		if (dev_peer) {
+			struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+			if (tg3_flag(tp_peer, INIT_COMPLETE))
+				return;
+
+			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
+			    tg3_flag(tp_peer, ENABLE_ASF))
+				need_vaux = true;
+		}
+	}
+
+	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
+	    tg3_flag(tp, ENABLE_ASF))
+		need_vaux = true;
+
+	if (need_vaux)
+		tg3_pwrsrc_switch_to_vaux(tp);
+	else
+		tg3_pwrsrc_die_with_vmain(tp);
+}
+
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+		return 1;
+	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
+		if (speed != SPEED_10)
+			return 1;
+	} else if (speed == SPEED_10)
+		return 1;
+
+	return 0;
+}
+
+static bool tg3_phy_power_bug(struct tg3 *tp)
+{
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5700:
+	case ASIC_REV_5704:
+		return true;
+	case ASIC_REV_5780:
+		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+			return true;
+		return false;
+	case ASIC_REV_5717:
+		if (!tp->pci_fn)
+			return true;
+		return false;
+	case ASIC_REV_5719:
+	case ASIC_REV_5720:
+		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+		    !tp->pci_fn)
+			return true;
+		return false;
+	}
+
+	return false;
+}
+
+static bool tg3_phy_led_bug(struct tg3 *tp)
+{
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5719:
+	case ASIC_REV_5720:
+		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+		    !tp->pci_fn)
+			return true;
+		return false;
+	}
+
+	return false;
+}
+
+static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+{
+	u32 val;
+
+	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
+		return;
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
+			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
+			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
+
+			sg_dig_ctrl |=
+				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
+			tw32(SG_DIG_CTRL, sg_dig_ctrl);
+			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
+		}
+		return;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		tg3_bmcr_reset(tp);
+		val = tr32(GRC_MISC_CFG);
+		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
+		udelay(40);
+		return;
+	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+		u32 phytest;
+		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+			u32 phy;
+
+			tg3_writephy(tp, MII_ADVERTISE, 0);
+			tg3_writephy(tp, MII_BMCR,
+				     BMCR_ANENABLE | BMCR_ANRESTART);
+
+			tg3_writephy(tp, MII_TG3_FET_TEST,
+				     phytest | MII_TG3_FET_SHADOW_EN);
+			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
+				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
+				tg3_writephy(tp,
+					     MII_TG3_FET_SHDW_AUXMODE4,
+					     phy);
+			}
+			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+		}
+		return;
+	} else if (do_low_power) {
+		if (!tg3_phy_led_bug(tp))
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+
+		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
+		      MII_TG3_AUXCTL_PCTL_VREG_11V;
+		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
+	}
+
+	/* The PHY should not be powered down on some chips because
+	 * of bugs.
+	 */
+	if (tg3_phy_power_bug(tp))
+		return;
+
+	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
+		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
+		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+	}
+
+	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+/* tp->lock is held. */
+static int tg3_nvram_lock(struct tg3 *tp)
+{
+	if (tg3_flag(tp, NVRAM)) {
+		int i;
+
+		if (tp->nvram_lock_cnt == 0) {
+			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+			for (i = 0; i < 8000; i++) {
+				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+					break;
+				udelay(20);
+			}
+			if (i == 8000) {
+				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+				return -ENODEV;
+			}
+		}
+		tp->nvram_lock_cnt++;
+	}
+	return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_nvram_unlock(struct tg3 *tp)
+{
+	if (tg3_flag(tp, NVRAM)) {
+		if (tp->nvram_lock_cnt > 0)
+			tp->nvram_lock_cnt--;
+		if (tp->nvram_lock_cnt == 0)
+			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_enable_nvram_access(struct tg3 *tp)
+{
+	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+		u32 nvaccess = tr32(NVRAM_ACCESS);
+
+		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_disable_nvram_access(struct tg3 *tp)
+{
+	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+		u32 nvaccess = tr32(NVRAM_ACCESS);
+
+		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+	}
+}
+
+static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
+					u32 offset, u32 *val)
+{
+	u32 tmp;
+	int i;
+
+	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
+		return -EINVAL;
+
+	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
+					EEPROM_ADDR_DEVID_MASK |
+					EEPROM_ADDR_READ);
+	tw32(GRC_EEPROM_ADDR,
+	     tmp |
+	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
+	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
+	      EEPROM_ADDR_ADDR_MASK) |
+	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
+
+	for (i = 0; i < 1000; i++) {
+		tmp = tr32(GRC_EEPROM_ADDR);
+
+		if (tmp & EEPROM_ADDR_COMPLETE)
+			break;
+		msleep(1);
+	}
+	if (!(tmp & EEPROM_ADDR_COMPLETE))
+		return -EBUSY;
+
+	tmp = tr32(GRC_EEPROM_DATA);
+
+	/*
+	 * The data will always be opposite the native endian
+	 * format.  Perform a blind byteswap to compensate.
+	 */
+	*val = swab32(tmp);
+
+	return 0;
+}
+
+#define NVRAM_CMD_TIMEOUT 5000
+
+static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
+{
+	int i;
+
+	tw32(NVRAM_CMD, nvram_cmd);
+	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
+		usleep_range(10, 40);
+		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
+			udelay(10);
+			break;
+		}
+	}
+
+	if (i == NVRAM_CMD_TIMEOUT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
+{
+	if (tg3_flag(tp, NVRAM) &&
+	    tg3_flag(tp, NVRAM_BUFFERED) &&
+	    tg3_flag(tp, FLASH) &&
+	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+	    (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+		addr = ((addr / tp->nvram_pagesize) <<
+			ATMEL_AT45DB0X1B_PAGE_POS) +
+		       (addr % tp->nvram_pagesize);
+
+	return addr;
+}
+
+static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
+{
+	if (tg3_flag(tp, NVRAM) &&
+	    tg3_flag(tp, NVRAM_BUFFERED) &&
+	    tg3_flag(tp, FLASH) &&
+	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+	    (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
+			tp->nvram_pagesize) +
+		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
+
+	return addr;
+}
+
+/* NOTE: Data read in from NVRAM is byteswapped according to
+ * the byteswapping settings for all other register accesses.
+ * tg3 devices are BE devices, so on a BE machine, the data
+ * returned will be exactly as it is seen in NVRAM.  On a LE
+ * machine, the 32-bit value will be byteswapped.
+ */
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+	int ret;
+
+	if (!tg3_flag(tp, NVRAM))
+		return tg3_nvram_read_using_eeprom(tp, offset, val);
+
+	offset = tg3_nvram_phys_addr(tp, offset);
+
+	if (offset > NVRAM_ADDR_MSK)
+		return -EINVAL;
+
+	ret = tg3_nvram_lock(tp);
+	if (ret)
+		return ret;
+
+	tg3_enable_nvram_access(tp);
+
+	tw32(NVRAM_ADDR, offset);
+	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
+		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+
+	if (ret == 0)
+		*val = tr32(NVRAM_RDDATA);
+
+	tg3_disable_nvram_access(tp);
+
+	tg3_nvram_unlock(tp);
+
+	return ret;
+}
+
+/* Ensures NVRAM data is in bytestream format. */
+static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
+{
+	u32 v;
+	int res = tg3_nvram_read(tp, offset, &v);
+	if (!res)
+		*val = cpu_to_be32(v);
+	return res;
+}
+
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+				    u32 offset, u32 len, u8 *buf)
+{
+	int i, j, rc = 0;
+	u32 val;
+
+	for (i = 0; i < len; i += 4) {
+		u32 addr;
+		__be32 data;
+
+		addr = offset + i;
+
+		memcpy(&data, buf + i, 4);
+
+		/*
+		 * The SEEPROM interface expects the data to always be opposite
+		 * the native endian format.  We accomplish this by reversing
+		 * all the operations that would have been performed on the
+		 * data from a call to tg3_nvram_read_be32().
+		 */
+		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
+
+		val = tr32(GRC_EEPROM_ADDR);
+		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+			EEPROM_ADDR_READ);
+		tw32(GRC_EEPROM_ADDR, val |
+			(0 << EEPROM_ADDR_DEVID_SHIFT) |
+			(addr & EEPROM_ADDR_ADDR_MASK) |
+			EEPROM_ADDR_START |
+			EEPROM_ADDR_WRITE);
+
+		for (j = 0; j < 1000; j++) {
+			val = tr32(GRC_EEPROM_ADDR);
+
+			if (val & EEPROM_ADDR_COMPLETE)
+				break;
+			msleep(1);
+		}
+		if (!(val & EEPROM_ADDR_COMPLETE)) {
+			rc = -EBUSY;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+		u8 *buf)
+{
+	int ret = 0;
+	u32 pagesize = tp->nvram_pagesize;
+	u32 pagemask = pagesize - 1;
+	u32 nvram_cmd;
+	u8 *tmp;
+
+	tmp = kmalloc(pagesize, GFP_KERNEL);
+	if (tmp == NULL)
+		return -ENOMEM;
+
+	while (len) {
+		int j;
+		u32 phy_addr, page_off, size;
+
+		phy_addr = offset & ~pagemask;
+
+		for (j = 0; j < pagesize; j += 4) {
+			ret = tg3_nvram_read_be32(tp, phy_addr + j,
+						  (__be32 *) (tmp + j));
+			if (ret)
+				break;
+		}
+		if (ret)
+			break;
+
+		page_off = offset & pagemask;
+		size = pagesize;
+		if (len < size)
+			size = len;
+
+		len -= size;
+
+		memcpy(tmp + page_off, buf, size);
+
+		offset = offset + (pagesize - page_off);
+
+		tg3_enable_nvram_access(tp);
+
+		/*
+		 * Before we can erase the flash page, we need
+		 * to issue a special "write enable" command.
+		 */
+		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+			break;
+
+		/* Erase the target page */
+		tw32(NVRAM_ADDR, phy_addr);
+
+		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+			break;
+
+		/* Issue another write enable to start the write. */
+		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+			break;
+
+		for (j = 0; j < pagesize; j += 4) {
+			__be32 data;
+
+			data = *((__be32 *) (tmp + j));
+
+			tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+			tw32(NVRAM_ADDR, phy_addr + j);
+
+			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+				NVRAM_CMD_WR;
+
+			if (j == 0)
+				nvram_cmd |= NVRAM_CMD_FIRST;
+			else if (j == (pagesize - 4))
+				nvram_cmd |= NVRAM_CMD_LAST;
+
+			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
+			if (ret)
+				break;
+		}
+		if (ret)
+			break;
+	}
+
+	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+	tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+	kfree(tmp);
+
+	return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+		u8 *buf)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < len; i += 4, offset += 4) {
+		u32 page_off, phy_addr, nvram_cmd;
+		__be32 data;
+
+		memcpy(&data, buf + i, 4);
+		tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+		page_off = offset % tp->nvram_pagesize;
+
+		phy_addr = tg3_nvram_phys_addr(tp, offset);
+
+		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+		if (page_off == 0 || i == 0)
+			nvram_cmd |= NVRAM_CMD_FIRST;
+		if (page_off == (tp->nvram_pagesize - 4))
+			nvram_cmd |= NVRAM_CMD_LAST;
+
+		if (i == (len - 4))
+			nvram_cmd |= NVRAM_CMD_LAST;
+
+		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
+		    !tg3_flag(tp, FLASH) ||
+		    !tg3_flag(tp, 57765_PLUS))
+			tw32(NVRAM_ADDR, phy_addr);
+
+		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
+		    !tg3_flag(tp, 5755_PLUS) &&
+		    (tp->nvram_jedecnum == JEDEC_ST) &&
+		    (nvram_cmd & NVRAM_CMD_FIRST)) {
+			u32 cmd;
+
+			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+			ret = tg3_nvram_exec_cmd(tp, cmd);
+			if (ret)
+				break;
+		}
+		if (!tg3_flag(tp, FLASH)) {
+			/* We always do complete word writes to eeprom. */
+			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+		}
+
+		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
+{
+	int ret;
+
+	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
+		udelay(40);
+	}
+
+	if (!tg3_flag(tp, NVRAM)) {
+		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+	} else {
+		u32 grc_mode;
+
+		ret = tg3_nvram_lock(tp);
+		if (ret)
+			return ret;
+
+		tg3_enable_nvram_access(tp);
+		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
+			tw32(NVRAM_WRITE1, 0x406);
+
+		grc_mode = tr32(GRC_MODE);
+		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
+
+		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
+			ret = tg3_nvram_write_block_buffered(tp, offset, len,
+				buf);
+		} else {
+			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+				buf);
+		}
+
+		grc_mode = tr32(GRC_MODE);
+		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
+
+		tg3_disable_nvram_access(tp);
+		tg3_nvram_unlock(tp);
+	}
+
+	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+		udelay(40);
+	}
+
+	return ret;
+}
+
+#define RX_CPU_SCRATCH_BASE	0x30000
+#define RX_CPU_SCRATCH_SIZE	0x04000
+#define TX_CPU_SCRATCH_BASE	0x34000
+#define TX_CPU_SCRATCH_SIZE	0x04000
+
+/* tp->lock is held. */
+static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
+{
+	int i;
+	const int iters = 10000;
+
+	for (i = 0; i < iters; i++) {
+		tw32(cpu_base + CPU_STATE, 0xffffffff);
+		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
+		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
+			break;
+		if (pci_channel_offline(tp->pdev))
+			return -EBUSY;
+	}
+
+	return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_rxcpu_pause(struct tg3 *tp)
+{
+	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
+
+	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
+	udelay(10);
+
+	return rc;
+}
+
+/* tp->lock is held. */
+static int tg3_txcpu_pause(struct tg3 *tp)
+{
+	return tg3_pause_cpu(tp, TX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
+{
+	tw32(cpu_base + CPU_STATE, 0xffffffff);
+	tw32_f(cpu_base + CPU_MODE,  0x00000000);
+}
+
+/* tp->lock is held. */
+static void tg3_rxcpu_resume(struct tg3 *tp)
+{
+	tg3_resume_cpu(tp, RX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
+{
+	int rc;
+
+	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		u32 val = tr32(GRC_VCPU_EXT_CTRL);
+
+		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+		return 0;
+	}
+	if (cpu_base == RX_CPU_BASE) {
+		rc = tg3_rxcpu_pause(tp);
+	} else {
+		/*
+		 * There is only an Rx CPU for the 5750 derivative in the
+		 * BCM4785.
+		 */
+		if (tg3_flag(tp, IS_SSB_CORE))
+			return 0;
+
+		rc = tg3_txcpu_pause(tp);
+	}
+
+	if (rc) {
+		netdev_err(tp->dev, "%s timed out, %s CPU\n",
+			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
+		return -ENODEV;
+	}
+
+	/* Clear firmware's nvram arbitration. */
+	if (tg3_flag(tp, NVRAM))
+		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
+	return 0;
+}
+
+static int tg3_fw_data_len(struct tg3 *tp,
+			   const struct tg3_firmware_hdr *fw_hdr)
+{
+	int fw_len;
+
+	/* Non fragmented firmware have one firmware header followed by a
+	 * contiguous chunk of data to be written. The length field in that
+	 * header is not the length of data to be written but the complete
+	 * length of the bss. The data length is determined based on
+	 * tp->fw->size minus headers.
+	 *
+	 * Fragmented firmware have a main header followed by multiple
+	 * fragments. Each fragment is identical to non fragmented firmware
+	 * with a firmware header followed by a contiguous chunk of data. In
+	 * the main header, the length field is unused and set to 0xffffffff.
+	 * In each fragment header the length is the entire size of that
+	 * fragment i.e. fragment data + header length. Data length is
+	 * therefore length field in the header minus TG3_FW_HDR_LEN.
+	 */
+	if (tp->fw_len == 0xffffffff)
+		fw_len = be32_to_cpu(fw_hdr->len);
+	else
+		fw_len = tp->fw->size;
+
+	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
+}
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
+				 u32 cpu_scratch_base, int cpu_scratch_size,
+				 const struct tg3_firmware_hdr *fw_hdr)
+{
+	int err, i;
+	void (*write_op)(struct tg3 *, u32, u32);
+	int total_len = tp->fw->size;
+
+	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
+		netdev_err(tp->dev,
+			   "%s: Trying to load TX cpu firmware which is 5705\n",
+			   __func__);
+		return -EINVAL;
+	}
+
+	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
+		write_op = tg3_write_mem;
+	else
+		write_op = tg3_write_indirect_reg32;
+
+	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
+		/* It is possible that bootcode is still loading at this point.
+		 * Get the nvram lock first before halting the cpu.
+		 */
+		int lock_err = tg3_nvram_lock(tp);
+		err = tg3_halt_cpu(tp, cpu_base);
+		if (!lock_err)
+			tg3_nvram_unlock(tp);
+		if (err)
+			goto out;
+
+		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+			write_op(tp, cpu_scratch_base + i, 0);
+		tw32(cpu_base + CPU_STATE, 0xffffffff);
+		tw32(cpu_base + CPU_MODE,
+		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
+	} else {
+		/* Subtract additional main header for fragmented firmware and
+		 * advance to the first fragment
+		 */
+		total_len -= TG3_FW_HDR_LEN;
+		fw_hdr++;
+	}
+
+	do {
+		u32 *fw_data = (u32 *)(fw_hdr + 1);
+		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
+			write_op(tp, cpu_scratch_base +
+				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
+				     (i * sizeof(u32)),
+				 be32_to_cpu(fw_data[i]));
+
+		total_len -= be32_to_cpu(fw_hdr->len);
+
+		/* Advance to next fragment */
+		fw_hdr = (struct tg3_firmware_hdr *)
+			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
+	} while (total_len > 0);
+
+	err = 0;
+
+out:
+	return err;
+}
+
+/* tp->lock is held. */
+static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
+{
+	int i;
+	const int iters = 5;
+
+	tw32(cpu_base + CPU_STATE, 0xffffffff);
+	tw32_f(cpu_base + CPU_PC, pc);
+
+	for (i = 0; i < iters; i++) {
+		if (tr32(cpu_base + CPU_PC) == pc)
+			break;
+		tw32(cpu_base + CPU_STATE, 0xffffffff);
+		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
+		tw32_f(cpu_base + CPU_PC, pc);
+		udelay(1000);
+	}
+
+	return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+	const struct tg3_firmware_hdr *fw_hdr;
+	int err;
+
+	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+
+	/* Firmware blob starts with version numbers, followed by
+	   start address and length. We are setting complete length.
+	   length = end_address_of_bss - start_address_of_text.
+	   Remainder is the blob to be loaded contiguously
+	   from start address. */
+
+	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+				    fw_hdr);
+	if (err)
+		return err;
+
+	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+				    fw_hdr);
+	if (err)
+		return err;
+
+	/* Now startup only the RX cpu. */
+	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
+				       be32_to_cpu(fw_hdr->base_addr));
+	if (err) {
+		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+			   "should be %08x\n", __func__,
+			   tr32(RX_CPU_BASE + CPU_PC),
+				be32_to_cpu(fw_hdr->base_addr));
+		return -ENODEV;
+	}
+
+	tg3_rxcpu_resume(tp);
+
+	return 0;
+}
+
+static int tg3_validate_rxcpu_state(struct tg3 *tp)
+{
+	const int iters = 1000;
+	int i;
+	u32 val;
+
+	/* Wait for boot code to complete initialization and enter service
+	 * loop. It is then safe to download service patches
+	 */
+	for (i = 0; i < iters; i++) {
+		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
+			break;
+
+		udelay(10);
+	}
+
+	if (i == iters) {
+		netdev_err(tp->dev, "Boot code not ready for service patches\n");
+		return -EBUSY;
+	}
+
+	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
+	if (val & 0xff) {
+		netdev_warn(tp->dev,
+			    "Other patches exist. Not downloading EEE patch\n");
+		return -EEXIST;
+	}
+
+	return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_load_57766_firmware(struct tg3 *tp)
+{
+	struct tg3_firmware_hdr *fw_hdr;
+
+	if (!tg3_flag(tp, NO_NVRAM))
+		return;
+
+	if (tg3_validate_rxcpu_state(tp))
+		return;
+
+	if (!tp->fw)
+		return;
+
+	/* This firmware blob has a different format than older firmware
+	 * releases as given below. The main difference is we have fragmented
+	 * data to be written to non-contiguous locations.
+	 *
+	 * In the beginning we have a firmware header identical to other
+	 * firmware which consists of version, base addr and length. The length
+	 * here is unused and set to 0xffffffff.
+	 *
+	 * This is followed by a series of firmware fragments which are
+	 * individually identical to previous firmware. i.e. they have the
+	 * firmware header and followed by data for that fragment. The version
+	 * field of the individual fragment header is unused.
+	 */
+
+	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
+		return;
+
+	if (tg3_rxcpu_pause(tp))
+		return;
+
+	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
+	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
+
+	tg3_rxcpu_resume(tp);
+}
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+	const struct tg3_firmware_hdr *fw_hdr;
+	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+	int err;
+
+	if (!tg3_flag(tp, FW_TSO))
+		return 0;
+
+	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+
+	/* Firmware blob starts with version numbers, followed by
+	   start address and length. We are setting complete length.
+	   length = end_address_of_bss - start_address_of_text.
+	   Remainder is the blob to be loaded contiguously
+	   from start address. */
+
+	cpu_scratch_size = tp->fw_len;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
+		cpu_base = RX_CPU_BASE;
+		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+	} else {
+		cpu_base = TX_CPU_BASE;
+		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+	}
+
+	err = tg3_load_firmware_cpu(tp, cpu_base,
+				    cpu_scratch_base, cpu_scratch_size,
+				    fw_hdr);
+	if (err)
+		return err;
+
+	/* Now startup the cpu. */
+	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
+				       be32_to_cpu(fw_hdr->base_addr));
+	if (err) {
+		netdev_err(tp->dev,
+			   "%s fails to set CPU PC, is %08x should be %08x\n",
+			   __func__, tr32(cpu_base + CPU_PC),
+			   be32_to_cpu(fw_hdr->base_addr));
+		return -ENODEV;
+	}
+
+	tg3_resume_cpu(tp, cpu_base);
+	return 0;
+}
+
+/* tp->lock is held. */
+static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+{
+	u32 addr_high, addr_low;
+
+	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		    (mac_addr[4] <<  8) | mac_addr[5]);
+
+	if (index < 4) {
+		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+	} else {
+		index -= 4;
+		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+	}
+}
+
+/* tp->lock is held. */
+static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
+{
+	u32 addr_high;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (i == 1 && skip_mac_1)
+			continue;
+		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5704) {
+		for (i = 4; i < 16; i++)
+			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
+	}
+
+	addr_high = (tp->dev->dev_addr[0] +
+		     tp->dev->dev_addr[1] +
+		     tp->dev->dev_addr[2] +
+		     tp->dev->dev_addr[3] +
+		     tp->dev->dev_addr[4] +
+		     tp->dev->dev_addr[5]) &
+		TX_BACKOFF_SEED_MASK;
+	tw32(MAC_TX_BACKOFF_SEED, addr_high);
+}
+
+static void tg3_enable_register_access(struct tg3 *tp)
+{
+	/*
+	 * Make sure register accesses (indirect or otherwise) will function
+	 * correctly.
+	 */
+	pci_write_config_dword(tp->pdev,
+			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
+}
+
+static int tg3_power_up(struct tg3 *tp)
+{
+	int err;
+
+	tg3_enable_register_access(tp);
+
+	err = pci_set_power_state(tp->pdev, PCI_D0);
+	if (!err) {
+		/* Switch out of Vaux if it is a NIC */
+		tg3_pwrsrc_switch_to_vmain(tp);
+	} else {
+		netdev_err(tp->dev, "Transition to D0 failed\n");
+	}
+
+	return err;
+}
+
+static int tg3_setup_phy(struct tg3 *, bool);
+
+static int tg3_power_down_prepare(struct tg3 *tp)
+{
+	u32 misc_host_ctrl;
+	bool device_should_wake, do_low_power;
+
+	tg3_enable_register_access(tp);
+
+	/* Restore the CLKREQ setting. */
+	if (tg3_flag(tp, CLKREQ_BUG))
+		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
+					 PCI_EXP_LNKCTL_CLKREQ_EN);
+
+	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+	tw32(TG3PCI_MISC_HOST_CTRL,
+	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
+
+	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
+			     tg3_flag(tp, WOL_ENABLE);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		do_low_power = false;
+		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
+		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+			struct phy_device *phydev;
+			u32 phyid, advertising;
+
+			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+
+			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+
+			tp->link_config.speed = phydev->speed;
+			tp->link_config.duplex = phydev->duplex;
+			tp->link_config.autoneg = phydev->autoneg;
+			tp->link_config.advertising = phydev->advertising;
+
+			advertising = ADVERTISED_TP |
+				      ADVERTISED_Pause |
+				      ADVERTISED_Autoneg |
+				      ADVERTISED_10baseT_Half;
+
+			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
+				if (tg3_flag(tp, WOL_SPEED_100MB))
+					advertising |=
+						ADVERTISED_100baseT_Half |
+						ADVERTISED_100baseT_Full |
+						ADVERTISED_10baseT_Full;
+				else
+					advertising |= ADVERTISED_10baseT_Full;
+			}
+
+			phydev->advertising = advertising;
+
+			phy_start_aneg(phydev);
+
+			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
+			if (phyid != PHY_ID_BCMAC131) {
+				phyid &= PHY_BCM_OUI_MASK;
+				if (phyid == PHY_BCM_OUI_1 ||
+				    phyid == PHY_BCM_OUI_2 ||
+				    phyid == PHY_BCM_OUI_3)
+					do_low_power = true;
+			}
+		}
+	} else {
+		do_low_power = true;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
+			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+			tg3_setup_phy(tp, false);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		u32 val;
+
+		val = tr32(GRC_VCPU_EXT_CTRL);
+		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
+	} else if (!tg3_flag(tp, ENABLE_ASF)) {
+		int i;
+		u32 val;
+
+		for (i = 0; i < 200; i++) {
+			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
+			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+				break;
+			msleep(1);
+		}
+	}
+	if (tg3_flag(tp, WOL_CAP))
+		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+						     WOL_DRV_STATE_SHUTDOWN |
+						     WOL_DRV_WOL |
+						     WOL_SET_MAGIC_PKT);
+
+	if (device_should_wake) {
+		u32 mac_mode;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+			if (do_low_power &&
+			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+				tg3_phy_auxctl_write(tp,
+					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
+					       MII_TG3_AUXCTL_PCTL_WOL_EN |
+					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
+				udelay(40);
+			}
+
+			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+				mac_mode = MAC_MODE_PORT_MODE_GMII;
+			else if (tp->phy_flags &
+				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
+				if (tp->link_config.active_speed == SPEED_1000)
+					mac_mode = MAC_MODE_PORT_MODE_GMII;
+				else
+					mac_mode = MAC_MODE_PORT_MODE_MII;
+			} else
+				mac_mode = MAC_MODE_PORT_MODE_MII;
+
+			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
+				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
+					     SPEED_100 : SPEED_10;
+				if (tg3_5700_link_polarity(tp, speed))
+					mac_mode |= MAC_MODE_LINK_POLARITY;
+				else
+					mac_mode &= ~MAC_MODE_LINK_POLARITY;
+			}
+		} else {
+			mac_mode = MAC_MODE_PORT_MODE_TBI;
+		}
+
+		if (!tg3_flag(tp, 5750_PLUS))
+			tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
+		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
+			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
+
+		if (tg3_flag(tp, ENABLE_APE))
+			mac_mode |= MAC_MODE_APE_TX_EN |
+				    MAC_MODE_APE_RX_EN |
+				    MAC_MODE_TDE_ENABLE;
+
+		tw32_f(MAC_MODE, mac_mode);
+		udelay(100);
+
+		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
+		udelay(10);
+	}
+
+	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
+	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
+		u32 base_val;
+
+		base_val = tp->pci_clock_ctrl;
+		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
+			     CLOCK_CTRL_TXCLK_DISABLE);
+
+		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
+			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
+	} else if (tg3_flag(tp, 5780_CLASS) ||
+		   tg3_flag(tp, CPMU_PRESENT) ||
+		   tg3_asic_rev(tp) == ASIC_REV_5906) {
+		/* do nothing */
+	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
+		u32 newbits1, newbits2;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5701) {
+			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
+				    CLOCK_CTRL_TXCLK_DISABLE |
+				    CLOCK_CTRL_ALTCLK);
+			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+		} else if (tg3_flag(tp, 5705_PLUS)) {
+			newbits1 = CLOCK_CTRL_625_CORE;
+			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
+		} else {
+			newbits1 = CLOCK_CTRL_ALTCLK;
+			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+		}
+
+		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
+			    40);
+
+		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
+			    40);
+
+		if (!tg3_flag(tp, 5705_PLUS)) {
+			u32 newbits3;
+
+			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+			    tg3_asic_rev(tp) == ASIC_REV_5701) {
+				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
+					    CLOCK_CTRL_TXCLK_DISABLE |
+					    CLOCK_CTRL_44MHZ_CORE);
+			} else {
+				newbits3 = CLOCK_CTRL_44MHZ_CORE;
+			}
+
+			tw32_wait_f(TG3PCI_CLOCK_CTRL,
+				    tp->pci_clock_ctrl | newbits3, 40);
+		}
+	}
+
+	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
+		tg3_power_down_phy(tp, do_low_power);
+
+	tg3_frob_aux_power(tp, true);
+
+	/* Workaround for unstable PLL clock */
+	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
+	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
+	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
+		u32 val = tr32(0x7d00);
+
+		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
+		tw32(0x7d00, val);
+		if (!tg3_flag(tp, ENABLE_ASF)) {
+			int err;
+
+			err = tg3_nvram_lock(tp);
+			tg3_halt_cpu(tp, RX_CPU_BASE);
+			if (!err)
+				tg3_nvram_unlock(tp);
+		}
+	}
+
+	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
+	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
+
+	return 0;
+}
+
+static void tg3_power_down(struct tg3 *tp)
+{
+	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
+	pci_set_power_state(tp->pdev, PCI_D3hot);
+}
+
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+{
+	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
+	case MII_TG3_AUX_STAT_10HALF:
+		*speed = SPEED_10;
+		*duplex = DUPLEX_HALF;
+		break;
+
+	case MII_TG3_AUX_STAT_10FULL:
+		*speed = SPEED_10;
+		*duplex = DUPLEX_FULL;
+		break;
+
+	case MII_TG3_AUX_STAT_100HALF:
+		*speed = SPEED_100;
+		*duplex = DUPLEX_HALF;
+		break;
+
+	case MII_TG3_AUX_STAT_100FULL:
+		*speed = SPEED_100;
+		*duplex = DUPLEX_FULL;
+		break;
+
+	case MII_TG3_AUX_STAT_1000HALF:
+		*speed = SPEED_1000;
+		*duplex = DUPLEX_HALF;
+		break;
+
+	case MII_TG3_AUX_STAT_1000FULL:
+		*speed = SPEED_1000;
+		*duplex = DUPLEX_FULL;
+		break;
+
+	default:
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
+				 SPEED_10;
+			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
+				  DUPLEX_HALF;
+			break;
+		}
+		*speed = SPEED_UNKNOWN;
+		*duplex = DUPLEX_UNKNOWN;
+		break;
+	}
+}
+
+static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+{
+	int err = 0;
+	u32 val, new_adv;
+
+	new_adv = ADVERTISE_CSMA;
+	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+	new_adv |= mii_advertise_flowctrl(flowctrl);
+
+	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
+	if (err)
+		goto done;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
+
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
+			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+
+		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+		if (err)
+			goto done;
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+		goto done;
+
+	tw32(TG3_CPMU_EEE_MODE,
+	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+	if (!err) {
+		u32 err2;
+
+		val = 0;
+		/* Advertise 100-BaseTX EEE ability */
+		if (advertise & ADVERTISED_100baseT_Full)
+			val |= MDIO_AN_EEE_ADV_100TX;
+		/* Advertise 1000-BaseT EEE ability */
+		if (advertise & ADVERTISED_1000baseT_Full)
+			val |= MDIO_AN_EEE_ADV_1000T;
+
+		if (!tp->eee.eee_enabled) {
+			val = 0;
+			tp->eee.advertised = 0;
+		} else {
+			tp->eee.advertised = advertise &
+					     (ADVERTISED_100baseT_Full |
+					      ADVERTISED_1000baseT_Full);
+		}
+
+		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+		if (err)
+			val = 0;
+
+		switch (tg3_asic_rev(tp)) {
+		case ASIC_REV_5717:
+		case ASIC_REV_57765:
+		case ASIC_REV_57766:
+		case ASIC_REV_5719:
+			/* If we advertised any eee advertisements above... */
+			if (val)
+				val = MII_TG3_DSP_TAP26_ALNOKO |
+				      MII_TG3_DSP_TAP26_RMRXSTO |
+				      MII_TG3_DSP_TAP26_OPCSINPT;
+			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+			/* Fall through */
+		case ASIC_REV_5720:
+		case ASIC_REV_5762:
+			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+						 MII_TG3_DSP_CH34TP2_HIBW01);
+		}
+
+		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
+		if (!err)
+			err = err2;
+	}
+
+done:
+	return err;
+}
+
+static void tg3_phy_copper_begin(struct tg3 *tp)
+{
+	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
+	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+		u32 adv, fc;
+
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
+			adv = ADVERTISED_10baseT_Half |
+			      ADVERTISED_10baseT_Full;
+			if (tg3_flag(tp, WOL_SPEED_100MB))
+				adv |= ADVERTISED_100baseT_Half |
+				       ADVERTISED_100baseT_Full;
+			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+				if (!(tp->phy_flags &
+				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
+					adv |= ADVERTISED_1000baseT_Half;
+				adv |= ADVERTISED_1000baseT_Full;
+			}
+
+			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
+		} else {
+			adv = tp->link_config.advertising;
+			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+				adv &= ~(ADVERTISED_1000baseT_Half |
+					 ADVERTISED_1000baseT_Full);
+
+			fc = tp->link_config.flowctrl;
+		}
+
+		tg3_phy_autoneg_cfg(tp, adv, fc);
+
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
+			/* Normally during power down we want to autonegotiate
+			 * the lowest possible speed for WOL. However, to avoid
+			 * link flap, we leave it untouched.
+			 */
+			return;
+		}
+
+		tg3_writephy(tp, MII_BMCR,
+			     BMCR_ANENABLE | BMCR_ANRESTART);
+	} else {
+		int i;
+		u32 bmcr, orig_bmcr;
+
+		tp->link_config.active_speed = tp->link_config.speed;
+		tp->link_config.active_duplex = tp->link_config.duplex;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+			/* With autoneg disabled, 5715 only links up when the
+			 * advertisement register has the configured speed
+			 * enabled.
+			 */
+			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
+		}
+
+		bmcr = 0;
+		switch (tp->link_config.speed) {
+		default:
+		case SPEED_10:
+			break;
+
+		case SPEED_100:
+			bmcr |= BMCR_SPEED100;
+			break;
+
+		case SPEED_1000:
+			bmcr |= BMCR_SPEED1000;
+			break;
+		}
+
+		if (tp->link_config.duplex == DUPLEX_FULL)
+			bmcr |= BMCR_FULLDPLX;
+
+		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
+		    (bmcr != orig_bmcr)) {
+			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
+			for (i = 0; i < 1500; i++) {
+				u32 tmp;
+
+				udelay(10);
+				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
+				    tg3_readphy(tp, MII_BMSR, &tmp))
+					continue;
+				if (!(tmp & BMSR_LSTATUS)) {
+					udelay(40);
+					break;
+				}
+			}
+			tg3_writephy(tp, MII_BMCR, bmcr);
+			udelay(40);
+		}
+	}
+}
+
+static int tg3_phy_pull_config(struct tg3 *tp)
+{
+	int err;
+	u32 val;
+
+	err = tg3_readphy(tp, MII_BMCR, &val);
+	if (err)
+		goto done;
+
+	if (!(val & BMCR_ANENABLE)) {
+		tp->link_config.autoneg = AUTONEG_DISABLE;
+		tp->link_config.advertising = 0;
+		tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+		err = -EIO;
+
+		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
+		case 0:
+			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+				goto done;
+
+			tp->link_config.speed = SPEED_10;
+			break;
+		case BMCR_SPEED100:
+			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+				goto done;
+
+			tp->link_config.speed = SPEED_100;
+			break;
+		case BMCR_SPEED1000:
+			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+				tp->link_config.speed = SPEED_1000;
+				break;
+			}
+			/* Fall through */
+		default:
+			goto done;
+		}
+
+		if (val & BMCR_FULLDPLX)
+			tp->link_config.duplex = DUPLEX_FULL;
+		else
+			tp->link_config.duplex = DUPLEX_HALF;
+
+		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
+
+		err = 0;
+		goto done;
+	}
+
+	tp->link_config.autoneg = AUTONEG_ENABLE;
+	tp->link_config.advertising = ADVERTISED_Autoneg;
+	tg3_flag_set(tp, PAUSE_AUTONEG);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+		u32 adv;
+
+		err = tg3_readphy(tp, MII_ADVERTISE, &val);
+		if (err)
+			goto done;
+
+		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
+		tp->link_config.advertising |= adv | ADVERTISED_TP;
+
+		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
+	} else {
+		tp->link_config.advertising |= ADVERTISED_FIBRE;
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		u32 adv;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+			err = tg3_readphy(tp, MII_CTRL1000, &val);
+			if (err)
+				goto done;
+
+			adv = mii_ctrl1000_to_ethtool_adv_t(val);
+		} else {
+			err = tg3_readphy(tp, MII_ADVERTISE, &val);
+			if (err)
+				goto done;
+
+			adv = tg3_decode_flowctrl_1000X(val);
+			tp->link_config.flowctrl = adv;
+
+			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
+			adv = mii_adv_to_ethtool_adv_x(val);
+		}
+
+		tp->link_config.advertising |= adv;
+	}
+
+done:
+	return err;
+}
+
+static int tg3_init_5401phy_dsp(struct tg3 *tp)
+{
+	int err;
+
+	/* Turn off tap power management. */
+	/* Set Extended packet length bit */
+	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+
+	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
+	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
+	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
+	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
+	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
+
+	udelay(40);
+
+	return err;
+}
+
+static bool tg3_phy_eee_config_ok(struct tg3 *tp)
+{
+	struct ethtool_eee eee;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+		return true;
+
+	tg3_eee_pull_config(tp, &eee);
+
+	if (tp->eee.eee_enabled) {
+		if (tp->eee.advertised != eee.advertised ||
+		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
+		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
+			return false;
+	} else {
+		/* EEE is disabled but we're advertising */
+		if (eee.advertised)
+			return false;
+	}
+
+	return true;
+}
+
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
+{
+	u32 advmsk, tgtadv, advertising;
+
+	advertising = tp->link_config.advertising;
+	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
+
+	advmsk = ADVERTISE_ALL;
+	if (tp->link_config.active_duplex == DUPLEX_FULL) {
+		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+	}
+
+	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+		return false;
+
+	if ((*lcladv & advmsk) != tgtadv)
+		return false;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		u32 tg3_ctrl;
+
+		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
+
+		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
+			return false;
+
+		if (tgtadv &&
+		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
+			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
+				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+		} else {
+			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+		}
+
+		if (tg3_ctrl != tgtadv)
+			return false;
+	}
+
+	return true;
+}
+
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
+{
+	u32 lpeth = 0;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		u32 val;
+
+		if (tg3_readphy(tp, MII_STAT1000, &val))
+			return false;
+
+		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
+	}
+
+	if (tg3_readphy(tp, MII_LPA, rmtadv))
+		return false;
+
+	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+	tp->link_config.rmt_adv = lpeth;
+
+	return true;
+}
+
+static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
+{
+	if (curr_link_up != tp->link_up) {
+		if (curr_link_up) {
+			netif_carrier_on(tp->dev);
+		} else {
+			netif_carrier_off(tp->dev);
+			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		}
+
+		tg3_link_report(tp);
+		return true;
+	}
+
+	return false;
+}
+
+static void tg3_clear_mac_status(struct tg3 *tp)
+{
+	tw32(MAC_EVENT, 0);
+
+	tw32_f(MAC_STATUS,
+	       MAC_STATUS_SYNC_CHANGED |
+	       MAC_STATUS_CFG_CHANGED |
+	       MAC_STATUS_MI_COMPLETION |
+	       MAC_STATUS_LNKSTATE_CHANGED);
+	udelay(40);
+}
+
+static void tg3_setup_eee(struct tg3 *tp)
+{
+	u32 val;
+
+	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
+		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
+
+	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
+
+	tw32_f(TG3_CPMU_EEE_CTRL,
+	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
+	      TG3_CPMU_EEEMD_LPI_IN_RX |
+	      TG3_CPMU_EEEMD_EEE_ENABLE;
+
+	if (tg3_asic_rev(tp) != ASIC_REV_5717)
+		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+	if (tg3_flag(tp, ENABLE_APE))
+		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
+
+	tw32_f(TG3_CPMU_EEE_DBTMR1,
+	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+	       (tp->eee.tx_lpi_timer & 0xffff));
+
+	tw32_f(TG3_CPMU_EEE_DBTMR2,
+	       TG3_CPMU_DBTMR2_APE_TX_2047US |
+	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
+{
+	bool current_link_up;
+	u32 bmsr, val;
+	u32 lcl_adv, rmt_adv;
+	u16 current_speed;
+	u8 current_duplex;
+	int i, err;
+
+	tg3_clear_mac_status(tp);
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE,
+		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+		udelay(80);
+	}
+
+	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
+
+	/* Some third-party PHYs need to be reset on link going
+	 * down.
+	 */
+	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
+	    tp->link_up) {
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+		    !(bmsr & BMSR_LSTATUS))
+			force_reset = true;
+	}
+	if (force_reset)
+		tg3_phy_reset(tp);
+
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
+		    !tg3_flag(tp, INIT_COMPLETE))
+			bmsr = 0;
+
+		if (!(bmsr & BMSR_LSTATUS)) {
+			err = tg3_init_5401phy_dsp(tp);
+			if (err)
+				return err;
+
+			tg3_readphy(tp, MII_BMSR, &bmsr);
+			for (i = 0; i < 1000; i++) {
+				udelay(10);
+				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+				    (bmsr & BMSR_LSTATUS)) {
+					udelay(40);
+					break;
+				}
+			}
+
+			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
+			    TG3_PHY_REV_BCM5401_B0 &&
+			    !(bmsr & BMSR_LSTATUS) &&
+			    tp->link_config.active_speed == SPEED_1000) {
+				err = tg3_phy_reset(tp);
+				if (!err)
+					err = tg3_init_5401phy_dsp(tp);
+				if (err)
+					return err;
+			}
+		}
+	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
+		/* 5701 {A0,B0} CRC bug workaround */
+		tg3_writephy(tp, 0x15, 0x0a75);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+	}
+
+	/* Clear pending interrupts... */
+	tg3_readphy(tp, MII_TG3_ISTAT, &val);
+	tg3_readphy(tp, MII_TG3_ISTAT, &val);
+
+	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
+		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
+	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
+		tg3_writephy(tp, MII_TG3_IMASK, ~0);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5701) {
+		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+		else
+			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
+	}
+
+	current_link_up = false;
+	current_speed = SPEED_UNKNOWN;
+	current_duplex = DUPLEX_UNKNOWN;
+	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+	tp->link_config.rmt_adv = 0;
+
+	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
+		err = tg3_phy_auxctl_read(tp,
+					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+					  &val);
+		if (!err && !(val & (1 << 10))) {
+			tg3_phy_auxctl_write(tp,
+					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+					     val | (1 << 10));
+			goto relink;
+		}
+	}
+
+	bmsr = 0;
+	for (i = 0; i < 100; i++) {
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+		    (bmsr & BMSR_LSTATUS))
+			break;
+		udelay(40);
+	}
+
+	if (bmsr & BMSR_LSTATUS) {
+		u32 aux_stat, bmcr;
+
+		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
+		for (i = 0; i < 2000; i++) {
+			udelay(10);
+			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
+			    aux_stat)
+				break;
+		}
+
+		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
+					     &current_speed,
+					     &current_duplex);
+
+		bmcr = 0;
+		for (i = 0; i < 200; i++) {
+			tg3_readphy(tp, MII_BMCR, &bmcr);
+			if (tg3_readphy(tp, MII_BMCR, &bmcr))
+				continue;
+			if (bmcr && bmcr != 0x7fff)
+				break;
+			udelay(10);
+		}
+
+		lcl_adv = 0;
+		rmt_adv = 0;
+
+		tp->link_config.active_speed = current_speed;
+		tp->link_config.active_duplex = current_duplex;
+
+		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
+
+			if ((bmcr & BMCR_ANENABLE) &&
+			    eee_config_ok &&
+			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+				current_link_up = true;
+
+			/* EEE settings changes take effect only after a phy
+			 * reset.  If we have skipped a reset due to Link Flap
+			 * Avoidance being enabled, do it now.
+			 */
+			if (!eee_config_ok &&
+			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+			    !force_reset) {
+				tg3_setup_eee(tp);
+				tg3_phy_reset(tp);
+			}
+		} else {
+			if (!(bmcr & BMCR_ANENABLE) &&
+			    tp->link_config.speed == current_speed &&
+			    tp->link_config.duplex == current_duplex) {
+				current_link_up = true;
+			}
+		}
+
+		if (current_link_up &&
+		    tp->link_config.active_duplex == DUPLEX_FULL) {
+			u32 reg, bit;
+
+			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+				reg = MII_TG3_FET_GEN_STAT;
+				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+			} else {
+				reg = MII_TG3_EXT_STAT;
+				bit = MII_TG3_EXT_STAT_MDIX;
+			}
+
+			if (!tg3_readphy(tp, reg, &val) && (val & bit))
+				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
+			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+		}
+	}
+
+relink:
+	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+		tg3_phy_copper_begin(tp);
+
+		if (tg3_flag(tp, ROBOSWITCH)) {
+			current_link_up = true;
+			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
+			current_speed = SPEED_1000;
+			current_duplex = DUPLEX_FULL;
+			tp->link_config.active_speed = current_speed;
+			tp->link_config.active_duplex = current_duplex;
+		}
+
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
+		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+			current_link_up = true;
+	}
+
+	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+	if (current_link_up) {
+		if (tp->link_config.active_speed == SPEED_100 ||
+		    tp->link_config.active_speed == SPEED_10)
+			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+		else
+			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+	else
+		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+	/* In order for the 5750 core in BCM4785 chip to work properly
+	 * in RGMII mode, the Led Control Register must be set up.
+	 */
+	if (tg3_flag(tp, RGMII_MODE)) {
+		u32 led_ctrl = tr32(MAC_LED_CTRL);
+		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
+
+		if (tp->link_config.active_speed == SPEED_10)
+			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
+		else if (tp->link_config.active_speed == SPEED_100)
+			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+				     LED_CTRL_100MBPS_ON);
+		else if (tp->link_config.active_speed == SPEED_1000)
+			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+				     LED_CTRL_1000MBPS_ON);
+
+		tw32(MAC_LED_CTRL, led_ctrl);
+		udelay(40);
+	}
+
+	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+	if (tp->link_config.active_duplex == DUPLEX_HALF)
+		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
+		if (current_link_up &&
+		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
+			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+		else
+			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+	}
+
+	/* ??? Without this setting Netgear GA302T PHY does not
+	 * ??? send/receive packets...
+	 */
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
+		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
+		tw32_f(MAC_MI_MODE, tp->mi_mode);
+		udelay(80);
+	}
+
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tg3_phy_eee_adjust(tp, current_link_up);
+
+	if (tg3_flag(tp, USE_LINKCHG_REG)) {
+		/* Polled via timer. */
+		tw32_f(MAC_EVENT, 0);
+	} else {
+		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+	}
+	udelay(40);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
+	    current_link_up &&
+	    tp->link_config.active_speed == SPEED_1000 &&
+	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
+		udelay(120);
+		tw32_f(MAC_STATUS,
+		     (MAC_STATUS_SYNC_CHANGED |
+		      MAC_STATUS_CFG_CHANGED));
+		udelay(40);
+		tg3_write_mem(tp,
+			      NIC_SRAM_FIRMWARE_MBOX,
+			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
+	}
+
+	/* Prevent send BD corruption. */
+	if (tg3_flag(tp, CLKREQ_BUG)) {
+		if (tp->link_config.active_speed == SPEED_100 ||
+		    tp->link_config.active_speed == SPEED_10)
+			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
+						   PCI_EXP_LNKCTL_CLKREQ_EN);
+		else
+			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
+						 PCI_EXP_LNKCTL_CLKREQ_EN);
+	}
+
+	tg3_test_and_report_link_chg(tp, current_link_up);
+
+	return 0;
+}
+
+struct tg3_fiber_aneginfo {
+	int state;
+#define ANEG_STATE_UNKNOWN		0
+#define ANEG_STATE_AN_ENABLE		1
+#define ANEG_STATE_RESTART_INIT		2
+#define ANEG_STATE_RESTART		3
+#define ANEG_STATE_DISABLE_LINK_OK	4
+#define ANEG_STATE_ABILITY_DETECT_INIT	5
+#define ANEG_STATE_ABILITY_DETECT	6
+#define ANEG_STATE_ACK_DETECT_INIT	7
+#define ANEG_STATE_ACK_DETECT		8
+#define ANEG_STATE_COMPLETE_ACK_INIT	9
+#define ANEG_STATE_COMPLETE_ACK		10
+#define ANEG_STATE_IDLE_DETECT_INIT	11
+#define ANEG_STATE_IDLE_DETECT		12
+#define ANEG_STATE_LINK_OK		13
+#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
+#define ANEG_STATE_NEXT_PAGE_WAIT	15
+
+	u32 flags;
+#define MR_AN_ENABLE		0x00000001
+#define MR_RESTART_AN		0x00000002
+#define MR_AN_COMPLETE		0x00000004
+#define MR_PAGE_RX		0x00000008
+#define MR_NP_LOADED		0x00000010
+#define MR_TOGGLE_TX		0x00000020
+#define MR_LP_ADV_FULL_DUPLEX	0x00000040
+#define MR_LP_ADV_HALF_DUPLEX	0x00000080
+#define MR_LP_ADV_SYM_PAUSE	0x00000100
+#define MR_LP_ADV_ASYM_PAUSE	0x00000200
+#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
+#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
+#define MR_LP_ADV_NEXT_PAGE	0x00001000
+#define MR_TOGGLE_RX		0x00002000
+#define MR_NP_RX		0x00004000
+
+#define MR_LINK_OK		0x80000000
+
+	unsigned long link_time, cur_time;
+
+	u32 ability_match_cfg;
+	int ability_match_count;
+
+	char ability_match, idle_match, ack_match;
+
+	u32 txconfig, rxconfig;
+#define ANEG_CFG_NP		0x00000080
+#define ANEG_CFG_ACK		0x00000040
+#define ANEG_CFG_RF2		0x00000020
+#define ANEG_CFG_RF1		0x00000010
+#define ANEG_CFG_PS2		0x00000001
+#define ANEG_CFG_PS1		0x00008000
+#define ANEG_CFG_HD		0x00004000
+#define ANEG_CFG_FD		0x00002000
+#define ANEG_CFG_INVAL		0x00001f06
+
+};
+#define ANEG_OK		0
+#define ANEG_DONE	1
+#define ANEG_TIMER_ENAB	2
+#define ANEG_FAILED	-1
+
+#define ANEG_STATE_SETTLE_TIME	10000
+
+static int tg3_fiber_aneg_smachine(struct tg3 *tp,
+				   struct tg3_fiber_aneginfo *ap)
+{
+	u16 flowctrl;
+	unsigned long delta;
+	u32 rx_cfg_reg;
+	int ret;
+
+	if (ap->state == ANEG_STATE_UNKNOWN) {
+		ap->rxconfig = 0;
+		ap->link_time = 0;
+		ap->cur_time = 0;
+		ap->ability_match_cfg = 0;
+		ap->ability_match_count = 0;
+		ap->ability_match = 0;
+		ap->idle_match = 0;
+		ap->ack_match = 0;
+	}
+	ap->cur_time++;
+
+	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
+		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
+
+		if (rx_cfg_reg != ap->ability_match_cfg) {
+			ap->ability_match_cfg = rx_cfg_reg;
+			ap->ability_match = 0;
+			ap->ability_match_count = 0;
+		} else {
+			if (++ap->ability_match_count > 1) {
+				ap->ability_match = 1;
+				ap->ability_match_cfg = rx_cfg_reg;
+			}
+		}
+		if (rx_cfg_reg & ANEG_CFG_ACK)
+			ap->ack_match = 1;
+		else
+			ap->ack_match = 0;
+
+		ap->idle_match = 0;
+	} else {
+		ap->idle_match = 1;
+		ap->ability_match_cfg = 0;
+		ap->ability_match_count = 0;
+		ap->ability_match = 0;
+		ap->ack_match = 0;
+
+		rx_cfg_reg = 0;
+	}
+
+	ap->rxconfig = rx_cfg_reg;
+	ret = ANEG_OK;
+
+	switch (ap->state) {
+	case ANEG_STATE_UNKNOWN:
+		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
+			ap->state = ANEG_STATE_AN_ENABLE;
+
+		/* fallthru */
+	case ANEG_STATE_AN_ENABLE:
+		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
+		if (ap->flags & MR_AN_ENABLE) {
+			ap->link_time = 0;
+			ap->cur_time = 0;
+			ap->ability_match_cfg = 0;
+			ap->ability_match_count = 0;
+			ap->ability_match = 0;
+			ap->idle_match = 0;
+			ap->ack_match = 0;
+
+			ap->state = ANEG_STATE_RESTART_INIT;
+		} else {
+			ap->state = ANEG_STATE_DISABLE_LINK_OK;
+		}
+		break;
+
+	case ANEG_STATE_RESTART_INIT:
+		ap->link_time = ap->cur_time;
+		ap->flags &= ~(MR_NP_LOADED);
+		ap->txconfig = 0;
+		tw32(MAC_TX_AUTO_NEG, 0);
+		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ret = ANEG_TIMER_ENAB;
+		ap->state = ANEG_STATE_RESTART;
+
+		/* fallthru */
+	case ANEG_STATE_RESTART:
+		delta = ap->cur_time - ap->link_time;
+		if (delta > ANEG_STATE_SETTLE_TIME)
+			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
+		else
+			ret = ANEG_TIMER_ENAB;
+		break;
+
+	case ANEG_STATE_DISABLE_LINK_OK:
+		ret = ANEG_DONE;
+		break;
+
+	case ANEG_STATE_ABILITY_DETECT_INIT:
+		ap->flags &= ~(MR_TOGGLE_TX);
+		ap->txconfig = ANEG_CFG_FD;
+		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+		if (flowctrl & ADVERTISE_1000XPAUSE)
+			ap->txconfig |= ANEG_CFG_PS1;
+		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+			ap->txconfig |= ANEG_CFG_PS2;
+		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ap->state = ANEG_STATE_ABILITY_DETECT;
+		break;
+
+	case ANEG_STATE_ABILITY_DETECT:
+		if (ap->ability_match != 0 && ap->rxconfig != 0)
+			ap->state = ANEG_STATE_ACK_DETECT_INIT;
+		break;
+
+	case ANEG_STATE_ACK_DETECT_INIT:
+		ap->txconfig |= ANEG_CFG_ACK;
+		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ap->state = ANEG_STATE_ACK_DETECT;
+
+		/* fallthru */
+	case ANEG_STATE_ACK_DETECT:
+		if (ap->ack_match != 0) {
+			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
+			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
+				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
+			} else {
+				ap->state = ANEG_STATE_AN_ENABLE;
+			}
+		} else if (ap->ability_match != 0 &&
+			   ap->rxconfig == 0) {
+			ap->state = ANEG_STATE_AN_ENABLE;
+		}
+		break;
+
+	case ANEG_STATE_COMPLETE_ACK_INIT:
+		if (ap->rxconfig & ANEG_CFG_INVAL) {
+			ret = ANEG_FAILED;
+			break;
+		}
+		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
+			       MR_LP_ADV_HALF_DUPLEX |
+			       MR_LP_ADV_SYM_PAUSE |
+			       MR_LP_ADV_ASYM_PAUSE |
+			       MR_LP_ADV_REMOTE_FAULT1 |
+			       MR_LP_ADV_REMOTE_FAULT2 |
+			       MR_LP_ADV_NEXT_PAGE |
+			       MR_TOGGLE_RX |
+			       MR_NP_RX);
+		if (ap->rxconfig & ANEG_CFG_FD)
+			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
+		if (ap->rxconfig & ANEG_CFG_HD)
+			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
+		if (ap->rxconfig & ANEG_CFG_PS1)
+			ap->flags |= MR_LP_ADV_SYM_PAUSE;
+		if (ap->rxconfig & ANEG_CFG_PS2)
+			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
+		if (ap->rxconfig & ANEG_CFG_RF1)
+			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
+		if (ap->rxconfig & ANEG_CFG_RF2)
+			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
+		if (ap->rxconfig & ANEG_CFG_NP)
+			ap->flags |= MR_LP_ADV_NEXT_PAGE;
+
+		ap->link_time = ap->cur_time;
+
+		ap->flags ^= (MR_TOGGLE_TX);
+		if (ap->rxconfig & 0x0008)
+			ap->flags |= MR_TOGGLE_RX;
+		if (ap->rxconfig & ANEG_CFG_NP)
+			ap->flags |= MR_NP_RX;
+		ap->flags |= MR_PAGE_RX;
+
+		ap->state = ANEG_STATE_COMPLETE_ACK;
+		ret = ANEG_TIMER_ENAB;
+		break;
+
+	case ANEG_STATE_COMPLETE_ACK:
+		if (ap->ability_match != 0 &&
+		    ap->rxconfig == 0) {
+			ap->state = ANEG_STATE_AN_ENABLE;
+			break;
+		}
+		delta = ap->cur_time - ap->link_time;
+		if (delta > ANEG_STATE_SETTLE_TIME) {
+			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
+				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+			} else {
+				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
+				    !(ap->flags & MR_NP_RX)) {
+					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+				} else {
+					ret = ANEG_FAILED;
+				}
+			}
+		}
+		break;
+
+	case ANEG_STATE_IDLE_DETECT_INIT:
+		ap->link_time = ap->cur_time;
+		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ap->state = ANEG_STATE_IDLE_DETECT;
+		ret = ANEG_TIMER_ENAB;
+		break;
+
+	case ANEG_STATE_IDLE_DETECT:
+		if (ap->ability_match != 0 &&
+		    ap->rxconfig == 0) {
+			ap->state = ANEG_STATE_AN_ENABLE;
+			break;
+		}
+		delta = ap->cur_time - ap->link_time;
+		if (delta > ANEG_STATE_SETTLE_TIME) {
+			/* XXX another gem from the Broadcom driver :( */
+			ap->state = ANEG_STATE_LINK_OK;
+		}
+		break;
+
+	case ANEG_STATE_LINK_OK:
+		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
+		ret = ANEG_DONE;
+		break;
+
+	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
+		/* ??? unimplemented */
+		break;
+
+	case ANEG_STATE_NEXT_PAGE_WAIT:
+		/* ??? unimplemented */
+		break;
+
+	default:
+		ret = ANEG_FAILED;
+		break;
+	}
+
+	return ret;
+}
+
+static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
+{
+	int res = 0;
+	struct tg3_fiber_aneginfo aninfo;
+	int status = ANEG_FAILED;
+	unsigned int tick;
+	u32 tmp;
+
+	tw32_f(MAC_TX_AUTO_NEG, 0);
+
+	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
+	udelay(40);
+
+	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
+	udelay(40);
+
+	memset(&aninfo, 0, sizeof(aninfo));
+	aninfo.flags |= MR_AN_ENABLE;
+	aninfo.state = ANEG_STATE_UNKNOWN;
+	aninfo.cur_time = 0;
+	tick = 0;
+	while (++tick < 195000) {
+		status = tg3_fiber_aneg_smachine(tp, &aninfo);
+		if (status == ANEG_DONE || status == ANEG_FAILED)
+			break;
+
+		udelay(1);
+	}
+
+	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	*txflags = aninfo.txconfig;
+	*rxflags = aninfo.flags;
+
+	if (status == ANEG_DONE &&
+	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
+			     MR_LP_ADV_FULL_DUPLEX)))
+		res = 1;
+
+	return res;
+}
+
+static void tg3_init_bcm8002(struct tg3 *tp)
+{
+	u32 mac_status = tr32(MAC_STATUS);
+	int i;
+
+	/* Reset when initting first time or we have a link. */
+	if (tg3_flag(tp, INIT_COMPLETE) &&
+	    !(mac_status & MAC_STATUS_PCS_SYNCED))
+		return;
+
+	/* Set PLL lock range. */
+	tg3_writephy(tp, 0x16, 0x8007);
+
+	/* SW reset */
+	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+
+	/* Wait for reset to complete. */
+	/* XXX schedule_timeout() ... */
+	for (i = 0; i < 500; i++)
+		udelay(10);
+
+	/* Config mode; select PMA/Ch 1 regs. */
+	tg3_writephy(tp, 0x10, 0x8411);
+
+	/* Enable auto-lock and comdet, select txclk for tx. */
+	tg3_writephy(tp, 0x11, 0x0a10);
+
+	tg3_writephy(tp, 0x18, 0x00a0);
+	tg3_writephy(tp, 0x16, 0x41ff);
+
+	/* Assert and deassert POR. */
+	tg3_writephy(tp, 0x13, 0x0400);
+	udelay(40);
+	tg3_writephy(tp, 0x13, 0x0000);
+
+	tg3_writephy(tp, 0x11, 0x0a50);
+	udelay(40);
+	tg3_writephy(tp, 0x11, 0x0a10);
+
+	/* Wait for signal to stabilize */
+	/* XXX schedule_timeout() ... */
+	for (i = 0; i < 15000; i++)
+		udelay(10);
+
+	/* Deselect the channel register so we can read the PHYID
+	 * later.
+	 */
+	tg3_writephy(tp, 0x10, 0x8011);
+}
+
+static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+{
+	u16 flowctrl;
+	bool current_link_up;
+	u32 sg_dig_ctrl, sg_dig_status;
+	u32 serdes_cfg, expected_sg_dig_ctrl;
+	int workaround, port_a;
+
+	serdes_cfg = 0;
+	expected_sg_dig_ctrl = 0;
+	workaround = 0;
+	port_a = 1;
+	current_link_up = false;
+
+	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
+		workaround = 1;
+		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+			port_a = 0;
+
+		/* preserve bits 0-11,13,14 for signal pre-emphasis */
+		/* preserve bits 20-23 for voltage regulator */
+		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
+	}
+
+	sg_dig_ctrl = tr32(SG_DIG_CTRL);
+
+	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
+		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
+			if (workaround) {
+				u32 val = serdes_cfg;
+
+				if (port_a)
+					val |= 0xc010000;
+				else
+					val |= 0x4010000;
+				tw32_f(MAC_SERDES_CFG, val);
+			}
+
+			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+		}
+		if (mac_status & MAC_STATUS_PCS_SYNCED) {
+			tg3_setup_flow_control(tp, 0, 0);
+			current_link_up = true;
+		}
+		goto out;
+	}
+
+	/* Want auto-negotiation.  */
+	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
+
+	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+	if (flowctrl & ADVERTISE_1000XPAUSE)
+		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
+	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
+
+	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
+		    tp->serdes_counter &&
+		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
+				    MAC_STATUS_RCVD_CFG)) ==
+		     MAC_STATUS_PCS_SYNCED)) {
+			tp->serdes_counter--;
+			current_link_up = true;
+			goto out;
+		}
+restart_autoneg:
+		if (workaround)
+			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
+		udelay(5);
+		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+
+		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+				 MAC_STATUS_SIGNAL_DET)) {
+		sg_dig_status = tr32(SG_DIG_STATUS);
+		mac_status = tr32(MAC_STATUS);
+
+		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
+		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
+			u32 local_adv = 0, remote_adv = 0;
+
+			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
+				local_adv |= ADVERTISE_1000XPAUSE;
+			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
+				local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
+				remote_adv |= LPA_1000XPAUSE;
+			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
+				remote_adv |= LPA_1000XPAUSE_ASYM;
+
+			tp->link_config.rmt_adv =
+					   mii_adv_to_ethtool_adv_x(remote_adv);
+
+			tg3_setup_flow_control(tp, local_adv, remote_adv);
+			current_link_up = true;
+			tp->serdes_counter = 0;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
+			if (tp->serdes_counter)
+				tp->serdes_counter--;
+			else {
+				if (workaround) {
+					u32 val = serdes_cfg;
+
+					if (port_a)
+						val |= 0xc010000;
+					else
+						val |= 0x4010000;
+
+					tw32_f(MAC_SERDES_CFG, val);
+				}
+
+				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+				udelay(40);
+
+				/* Link parallel detection - link is up */
+				/* only if we have PCS_SYNC and not */
+				/* receiving config code words */
+				mac_status = tr32(MAC_STATUS);
+				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
+				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
+					tg3_setup_flow_control(tp, 0, 0);
+					current_link_up = true;
+					tp->phy_flags |=
+						TG3_PHYFLG_PARALLEL_DETECT;
+					tp->serdes_counter =
+						SERDES_PARALLEL_DET_TIMEOUT;
+				} else
+					goto restart_autoneg;
+			}
+		}
+	} else {
+		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+	}
+
+out:
+	return current_link_up;
+}
+
+static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+{
+	bool current_link_up = false;
+
+	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
+		goto out;
+
+	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+		u32 txflags, rxflags;
+		int i;
+
+		if (fiber_autoneg(tp, &txflags, &rxflags)) {
+			u32 local_adv = 0, remote_adv = 0;
+
+			if (txflags & ANEG_CFG_PS1)
+				local_adv |= ADVERTISE_1000XPAUSE;
+			if (txflags & ANEG_CFG_PS2)
+				local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+			if (rxflags & MR_LP_ADV_SYM_PAUSE)
+				remote_adv |= LPA_1000XPAUSE;
+			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
+				remote_adv |= LPA_1000XPAUSE_ASYM;
+
+			tp->link_config.rmt_adv =
+					   mii_adv_to_ethtool_adv_x(remote_adv);
+
+			tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+			current_link_up = true;
+		}
+		for (i = 0; i < 30; i++) {
+			udelay(20);
+			tw32_f(MAC_STATUS,
+			       (MAC_STATUS_SYNC_CHANGED |
+				MAC_STATUS_CFG_CHANGED));
+			udelay(40);
+			if ((tr32(MAC_STATUS) &
+			     (MAC_STATUS_SYNC_CHANGED |
+			      MAC_STATUS_CFG_CHANGED)) == 0)
+				break;
+		}
+
+		mac_status = tr32(MAC_STATUS);
+		if (!current_link_up &&
+		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
+		    !(mac_status & MAC_STATUS_RCVD_CFG))
+			current_link_up = true;
+	} else {
+		tg3_setup_flow_control(tp, 0, 0);
+
+		/* Forcing 1000FD link up. */
+		current_link_up = true;
+
+		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+		udelay(40);
+
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+	}
+
+out:
+	return current_link_up;
+}
+
+static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
+{
+	u32 orig_pause_cfg;
+	u16 orig_active_speed;
+	u8 orig_active_duplex;
+	u32 mac_status;
+	bool current_link_up;
+	int i;
+
+	orig_pause_cfg = tp->link_config.active_flowctrl;
+	orig_active_speed = tp->link_config.active_speed;
+	orig_active_duplex = tp->link_config.active_duplex;
+
+	if (!tg3_flag(tp, HW_AUTONEG) &&
+	    tp->link_up &&
+	    tg3_flag(tp, INIT_COMPLETE)) {
+		mac_status = tr32(MAC_STATUS);
+		mac_status &= (MAC_STATUS_PCS_SYNCED |
+			       MAC_STATUS_SIGNAL_DET |
+			       MAC_STATUS_CFG_CHANGED |
+			       MAC_STATUS_RCVD_CFG);
+		if (mac_status == (MAC_STATUS_PCS_SYNCED |
+				   MAC_STATUS_SIGNAL_DET)) {
+			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+					    MAC_STATUS_CFG_CHANGED));
+			return 0;
+		}
+	}
+
+	tw32_f(MAC_TX_AUTO_NEG, 0);
+
+	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	if (tp->phy_id == TG3_PHY_ID_BCM8002)
+		tg3_init_bcm8002(tp);
+
+	/* Enable link change event even when serdes polling.  */
+	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+	udelay(40);
+
+	current_link_up = false;
+	tp->link_config.rmt_adv = 0;
+	mac_status = tr32(MAC_STATUS);
+
+	if (tg3_flag(tp, HW_AUTONEG))
+		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
+	else
+		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
+
+	tp->napi[0].hw_status->status =
+		(SD_STATUS_UPDATED |
+		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
+
+	for (i = 0; i < 100; i++) {
+		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+				    MAC_STATUS_CFG_CHANGED));
+		udelay(5);
+		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+					 MAC_STATUS_CFG_CHANGED |
+					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
+			break;
+	}
+
+	mac_status = tr32(MAC_STATUS);
+	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
+		current_link_up = false;
+		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+		    tp->serdes_counter == 0) {
+			tw32_f(MAC_MODE, (tp->mac_mode |
+					  MAC_MODE_SEND_CONFIGS));
+			udelay(1);
+			tw32_f(MAC_MODE, tp->mac_mode);
+		}
+	}
+
+	if (current_link_up) {
+		tp->link_config.active_speed = SPEED_1000;
+		tp->link_config.active_duplex = DUPLEX_FULL;
+		tw32(MAC_LED_CTRL, (tp->led_ctrl |
+				    LED_CTRL_LNKLED_OVERRIDE |
+				    LED_CTRL_1000MBPS_ON));
+	} else {
+		tp->link_config.active_speed = SPEED_UNKNOWN;
+		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
+		tw32(MAC_LED_CTRL, (tp->led_ctrl |
+				    LED_CTRL_LNKLED_OVERRIDE |
+				    LED_CTRL_TRAFFIC_OVERRIDE));
+	}
+
+	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
+		u32 now_pause_cfg = tp->link_config.active_flowctrl;
+		if (orig_pause_cfg != now_pause_cfg ||
+		    orig_active_speed != tp->link_config.active_speed ||
+		    orig_active_duplex != tp->link_config.active_duplex)
+			tg3_link_report(tp);
+	}
+
+	return 0;
+}
+
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
+{
+	int err = 0;
+	u32 bmsr, bmcr;
+	u16 current_speed = SPEED_UNKNOWN;
+	u8 current_duplex = DUPLEX_UNKNOWN;
+	bool current_link_up = false;
+	u32 local_adv, remote_adv, sgsr;
+
+	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
+	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
+	     (sgsr & SERDES_TG3_SGMII_MODE)) {
+
+		if (force_reset)
+			tg3_phy_reset(tp);
+
+		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+
+		if (!(sgsr & SERDES_TG3_LINK_UP)) {
+			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		} else {
+			current_link_up = true;
+			if (sgsr & SERDES_TG3_SPEED_1000) {
+				current_speed = SPEED_1000;
+				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+			} else if (sgsr & SERDES_TG3_SPEED_100) {
+				current_speed = SPEED_100;
+				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+			} else {
+				current_speed = SPEED_10;
+				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+			}
+
+			if (sgsr & SERDES_TG3_FULL_DUPLEX)
+				current_duplex = DUPLEX_FULL;
+			else
+				current_duplex = DUPLEX_HALF;
+		}
+
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		tg3_clear_mac_status(tp);
+
+		goto fiber_setup_done;
+	}
+
+	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tg3_clear_mac_status(tp);
+
+	if (force_reset)
+		tg3_phy_reset(tp);
+
+	tp->link_config.rmt_adv = 0;
+
+	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+			bmsr |= BMSR_LSTATUS;
+		else
+			bmsr &= ~BMSR_LSTATUS;
+	}
+
+	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
+
+	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
+	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+		/* do nothing, just check for link up at the end */
+	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+		u32 adv, newadv;
+
+		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+				 ADVERTISE_1000XPAUSE |
+				 ADVERTISE_1000XPSE_ASYM |
+				 ADVERTISE_SLCT);
+
+		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
+
+		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+			tg3_writephy(tp, MII_ADVERTISE, newadv);
+			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
+			tg3_writephy(tp, MII_BMCR, bmcr);
+
+			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+			return err;
+		}
+	} else {
+		u32 new_bmcr;
+
+		bmcr &= ~BMCR_SPEED1000;
+		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
+
+		if (tp->link_config.duplex == DUPLEX_FULL)
+			new_bmcr |= BMCR_FULLDPLX;
+
+		if (new_bmcr != bmcr) {
+			/* BMCR_SPEED1000 is a reserved bit that needs
+			 * to be set on write.
+			 */
+			new_bmcr |= BMCR_SPEED1000;
+
+			/* Force a linkdown */
+			if (tp->link_up) {
+				u32 adv;
+
+				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+				adv &= ~(ADVERTISE_1000XFULL |
+					 ADVERTISE_1000XHALF |
+					 ADVERTISE_SLCT);
+				tg3_writephy(tp, MII_ADVERTISE, adv);
+				tg3_writephy(tp, MII_BMCR, bmcr |
+							   BMCR_ANRESTART |
+							   BMCR_ANENABLE);
+				udelay(10);
+				tg3_carrier_off(tp);
+			}
+			tg3_writephy(tp, MII_BMCR, new_bmcr);
+			bmcr = new_bmcr;
+			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+					bmsr |= BMSR_LSTATUS;
+				else
+					bmsr &= ~BMSR_LSTATUS;
+			}
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		}
+	}
+
+	if (bmsr & BMSR_LSTATUS) {
+		current_speed = SPEED_1000;
+		current_link_up = true;
+		if (bmcr & BMCR_FULLDPLX)
+			current_duplex = DUPLEX_FULL;
+		else
+			current_duplex = DUPLEX_HALF;
+
+		local_adv = 0;
+		remote_adv = 0;
+
+		if (bmcr & BMCR_ANENABLE) {
+			u32 common;
+
+			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
+			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
+			common = local_adv & remote_adv;
+			if (common & (ADVERTISE_1000XHALF |
+				      ADVERTISE_1000XFULL)) {
+				if (common & ADVERTISE_1000XFULL)
+					current_duplex = DUPLEX_FULL;
+				else
+					current_duplex = DUPLEX_HALF;
+
+				tp->link_config.rmt_adv =
+					   mii_adv_to_ethtool_adv_x(remote_adv);
+			} else if (!tg3_flag(tp, 5780_CLASS)) {
+				/* Link is up via parallel detect */
+			} else {
+				current_link_up = false;
+			}
+		}
+	}
+
+fiber_setup_done:
+	if (current_link_up && current_duplex == DUPLEX_FULL)
+		tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+	if (tp->link_config.active_duplex == DUPLEX_HALF)
+		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+
+	tp->link_config.active_speed = current_speed;
+	tp->link_config.active_duplex = current_duplex;
+
+	tg3_test_and_report_link_chg(tp, current_link_up);
+	return err;
+}
+
+static void tg3_serdes_parallel_detect(struct tg3 *tp)
+{
+	if (tp->serdes_counter) {
+		/* Give autoneg time to complete. */
+		tp->serdes_counter--;
+		return;
+	}
+
+	if (!tp->link_up &&
+	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
+		u32 bmcr;
+
+		tg3_readphy(tp, MII_BMCR, &bmcr);
+		if (bmcr & BMCR_ANENABLE) {
+			u32 phy1, phy2;
+
+			/* Select shadow register 0x1f */
+			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
+			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
+
+			/* Select expansion interrupt status register */
+			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+					 MII_TG3_DSP_EXP1_INT_STAT);
+			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+
+			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
+				/* We have signal detect and not receiving
+				 * config code words, link is up by parallel
+				 * detection.
+				 */
+
+				bmcr &= ~BMCR_ANENABLE;
+				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+				tg3_writephy(tp, MII_BMCR, bmcr);
+				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
+			}
+		}
+	} else if (tp->link_up &&
+		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+		u32 phy2;
+
+		/* Select expansion interrupt status register */
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+				 MII_TG3_DSP_EXP1_INT_STAT);
+		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+		if (phy2 & 0x20) {
+			u32 bmcr;
+
+			/* Config code words received, turn on autoneg. */
+			tg3_readphy(tp, MII_BMCR, &bmcr);
+			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
+
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+		}
+	}
+}
+
+static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
+{
+	u32 val;
+	int err;
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+		err = tg3_setup_fiber_phy(tp, force_reset);
+	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+		err = tg3_setup_fiber_mii_phy(tp, force_reset);
+	else
+		err = tg3_setup_copper_phy(tp, force_reset);
+
+	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
+		u32 scale;
+
+		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
+		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
+			scale = 65;
+		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
+			scale = 6;
+		else
+			scale = 12;
+
+		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
+		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
+		tw32(GRC_MISC_CFG, val);
+	}
+
+	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+	      (6 << TX_LENGTHS_IPG_SHIFT);
+	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762)
+		val |= tr32(MAC_TX_LENGTHS) &
+		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
+			TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+	if (tp->link_config.active_speed == SPEED_1000 &&
+	    tp->link_config.active_duplex == DUPLEX_HALF)
+		tw32(MAC_TX_LENGTHS, val |
+		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
+	else
+		tw32(MAC_TX_LENGTHS, val |
+		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		if (tp->link_up) {
+			tw32(HOSTCC_STAT_COAL_TICKS,
+			     tp->coal.stats_block_coalesce_usecs);
+		} else {
+			tw32(HOSTCC_STAT_COAL_TICKS, 0);
+		}
+	}
+
+	if (tg3_flag(tp, ASPM_WORKAROUND)) {
+		val = tr32(PCIE_PWR_MGMT_THRESH);
+		if (!tp->link_up)
+			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
+			      tp->pwrmgmt_thresh;
+		else
+			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
+		tw32(PCIE_PWR_MGMT_THRESH, val);
+	}
+
+	return err;
+}
+
+/* tp->lock must be held */
+static u64 tg3_refclk_read(struct tg3 *tp)
+{
+	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+}
+
+/* tp->lock must be held */
+static void tg3_refclk_write(struct tg3 *tp, u64 newval)
+{
+	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+
+	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
+	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
+	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
+	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
+}
+
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
+static inline void tg3_full_unlock(struct tg3 *tp);
+static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE;
+
+	if (tg3_flag(tp, PTP_CAPABLE)) {
+		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+					SOF_TIMESTAMPING_RX_HARDWARE |
+					SOF_TIMESTAMPING_RAW_HARDWARE;
+	}
+
+	if (tp->ptp_clock)
+		info->phc_index = ptp_clock_index(tp->ptp_clock);
+	else
+		info->phc_index = -1;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+	return 0;
+}
+
+static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+	bool neg_adj = false;
+	u32 correction = 0;
+
+	if (ppb < 0) {
+		neg_adj = true;
+		ppb = -ppb;
+	}
+
+	/* Frequency adjustment is performed using hardware with a 24 bit
+	 * accumulator and a programmable correction value. On each clk, the
+	 * correction value gets added to the accumulator and when it
+	 * overflows, the time counter is incremented/decremented.
+	 *
+	 * So conversion from ppb to correction value is
+	 *		ppb * (1 << 24) / 1000000000
+	 */
+	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
+		     TG3_EAV_REF_CLK_CORRECT_MASK;
+
+	tg3_full_lock(tp, 0);
+
+	if (correction)
+		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
+		     TG3_EAV_REF_CLK_CORRECT_EN |
+		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
+	else
+		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
+
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
+static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+
+	tg3_full_lock(tp, 0);
+	tp->ptp_adjust += delta;
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
+static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	u64 ns;
+	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+
+	tg3_full_lock(tp, 0);
+	ns = tg3_refclk_read(tp);
+	ns += tp->ptp_adjust;
+	tg3_full_unlock(tp);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int tg3_ptp_settime(struct ptp_clock_info *ptp,
+			   const struct timespec64 *ts)
+{
+	u64 ns;
+	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+
+	ns = timespec64_to_ns(ts);
+
+	tg3_full_lock(tp, 0);
+	tg3_refclk_write(tp, ns);
+	tp->ptp_adjust = 0;
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
+static int tg3_ptp_enable(struct ptp_clock_info *ptp,
+			  struct ptp_clock_request *rq, int on)
+{
+	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+	u32 clock_ctl;
+	int rval = 0;
+
+	switch (rq->type) {
+	case PTP_CLK_REQ_PEROUT:
+		if (rq->perout.index != 0)
+			return -EINVAL;
+
+		tg3_full_lock(tp, 0);
+		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
+
+		if (on) {
+			u64 nsec;
+
+			nsec = rq->perout.start.sec * 1000000000ULL +
+			       rq->perout.start.nsec;
+
+			if (rq->perout.period.sec || rq->perout.period.nsec) {
+				netdev_warn(tp->dev,
+					    "Device supports only a one-shot timesync output, period must be 0\n");
+				rval = -EINVAL;
+				goto err_out;
+			}
+
+			if (nsec & (1ULL << 63)) {
+				netdev_warn(tp->dev,
+					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
+				rval = -EINVAL;
+				goto err_out;
+			}
+
+			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
+			tw32(TG3_EAV_WATCHDOG0_MSB,
+			     TG3_EAV_WATCHDOG0_EN |
+			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
+
+			tw32(TG3_EAV_REF_CLCK_CTL,
+			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
+		} else {
+			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
+			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
+		}
+
+err_out:
+		tg3_full_unlock(tp);
+		return rval;
+
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info tg3_ptp_caps = {
+	.owner		= THIS_MODULE,
+	.name		= "tg3 clock",
+	.max_adj	= 250000000,
+	.n_alarm	= 0,
+	.n_ext_ts	= 0,
+	.n_per_out	= 1,
+	.n_pins		= 0,
+	.pps		= 0,
+	.adjfreq	= tg3_ptp_adjfreq,
+	.adjtime	= tg3_ptp_adjtime,
+	.gettime64	= tg3_ptp_gettime,
+	.settime64	= tg3_ptp_settime,
+	.enable		= tg3_ptp_enable,
+};
+
+static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
+				     struct skb_shared_hwtstamps *timestamp)
+{
+	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
+	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
+					   tp->ptp_adjust);
+}
+
+/* tp->lock must be held */
+static void tg3_ptp_init(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, PTP_CAPABLE))
+		return;
+
+	/* Initialize the hardware clock to the system time. */
+	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
+	tp->ptp_adjust = 0;
+	tp->ptp_info = tg3_ptp_caps;
+}
+
+/* tp->lock must be held */
+static void tg3_ptp_resume(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, PTP_CAPABLE))
+		return;
+
+	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
+	tp->ptp_adjust = 0;
+}
+
+static void tg3_ptp_fini(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
+		return;
+
+	ptp_clock_unregister(tp->ptp_clock);
+	tp->ptp_clock = NULL;
+	tp->ptp_adjust = 0;
+}
+
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+	return tp->irq_sync;
+}
+
+static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
+{
+	int i;
+
+	dst = (u32 *)((u8 *)dst + off);
+	for (i = 0; i < len; i += sizeof(u32))
+		*dst++ = tr32(off + i);
+}
+
+static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
+{
+	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
+	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
+	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
+	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
+	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
+	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
+	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
+	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
+	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
+	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
+	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
+	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
+	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
+	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
+
+	if (tg3_flag(tp, SUPPORT_MSIX))
+		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
+
+	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
+	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
+	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
+	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
+	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
+		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
+		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
+	}
+
+	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
+	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
+	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
+	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
+
+	if (tg3_flag(tp, NVRAM))
+		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
+}
+
+static void tg3_dump_state(struct tg3 *tp)
+{
+	int i;
+	u32 *regs;
+
+	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+	if (!regs)
+		return;
+
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		/* Read up to but not including private PCI registers */
+		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
+			regs[i / sizeof(u32)] = tr32(i);
+	} else
+		tg3_dump_legacy_regs(tp, regs);
+
+	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
+		if (!regs[i + 0] && !regs[i + 1] &&
+		    !regs[i + 2] && !regs[i + 3])
+			continue;
+
+		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+			   i * 4,
+			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
+	}
+
+	kfree(regs);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		/* SW status block */
+		netdev_err(tp->dev,
+			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+			   i,
+			   tnapi->hw_status->status,
+			   tnapi->hw_status->status_tag,
+			   tnapi->hw_status->rx_jumbo_consumer,
+			   tnapi->hw_status->rx_consumer,
+			   tnapi->hw_status->rx_mini_consumer,
+			   tnapi->hw_status->idx[0].rx_producer,
+			   tnapi->hw_status->idx[0].tx_consumer);
+
+		netdev_err(tp->dev,
+		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
+			   i,
+			   tnapi->last_tag, tnapi->last_irq_tag,
+			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
+			   tnapi->rx_rcb_ptr,
+			   tnapi->prodring.rx_std_prod_idx,
+			   tnapi->prodring.rx_std_cons_idx,
+			   tnapi->prodring.rx_jmb_prod_idx,
+			   tnapi->prodring.rx_jmb_cons_idx);
+	}
+}
+
+/* This is called whenever we suspect that the system chipset is re-
+ * ordering the sequence of MMIO to the tx send mailbox. The symptom
+ * is bogus tx completions. We try to recover by setting the
+ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
+ * in the workqueue.
+ */
+static void tg3_tx_recover(struct tg3 *tp)
+{
+	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
+	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
+
+	netdev_warn(tp->dev,
+		    "The system may be re-ordering memory-mapped I/O "
+		    "cycles to the network device, attempting to recover. "
+		    "Please report the problem to the driver maintainer "
+		    "and include system chipset information.\n");
+
+	tg3_flag_set(tp, TX_RECOVERY_PENDING);
+}
+
+static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
+{
+	/* Tell compiler to fetch tx indices from memory. */
+	barrier();
+	return tnapi->tx_pending -
+	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
+}
+
+/* Tigon3 never reports partial packet sends.  So we do not
+ * need special logic to handle SKBs that have not had all
+ * of their frags sent yet, like SunGEM does.
+ */
+static void tg3_tx(struct tg3_napi *tnapi)
+{
+	struct tg3 *tp = tnapi->tp;
+	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
+	u32 sw_idx = tnapi->tx_cons;
+	struct netdev_queue *txq;
+	int index = tnapi - tp->napi;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
+
+	if (tg3_flag(tp, ENABLE_TSS))
+		index--;
+
+	txq = netdev_get_tx_queue(tp->dev, index);
+
+	while (sw_idx != hw_idx) {
+		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+		struct sk_buff *skb = ri->skb;
+		int i, tx_bug = 0;
+
+		if (unlikely(skb == NULL)) {
+			tg3_tx_recover(tp);
+			return;
+		}
+
+		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
+			struct skb_shared_hwtstamps timestamp;
+			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
+			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
+
+			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+
+			skb_tstamp_tx(skb, &timestamp);
+		}
+
+		pci_unmap_single(tp->pdev,
+				 dma_unmap_addr(ri, mapping),
+				 skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+
+		ri->skb = NULL;
+
+		while (ri->fragmented) {
+			ri->fragmented = false;
+			sw_idx = NEXT_TX(sw_idx);
+			ri = &tnapi->tx_buffers[sw_idx];
+		}
+
+		sw_idx = NEXT_TX(sw_idx);
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			ri = &tnapi->tx_buffers[sw_idx];
+			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
+				tx_bug = 1;
+
+			pci_unmap_page(tp->pdev,
+				       dma_unmap_addr(ri, mapping),
+				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
+				       PCI_DMA_TODEVICE);
+
+			while (ri->fragmented) {
+				ri->fragmented = false;
+				sw_idx = NEXT_TX(sw_idx);
+				ri = &tnapi->tx_buffers[sw_idx];
+			}
+
+			sw_idx = NEXT_TX(sw_idx);
+		}
+
+		pkts_compl++;
+		bytes_compl += skb->len;
+
+		dev_kfree_skb_any(skb);
+
+		if (unlikely(tx_bug)) {
+			tg3_tx_recover(tp);
+			return;
+		}
+	}
+
+	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
+	tnapi->tx_cons = sw_idx;
+
+	/* Need to make the tx_cons update visible to tg3_start_xmit()
+	 * before checking for netif_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that tg3_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq) &&
+		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
+		__netif_tx_lock(txq, smp_processor_id());
+		if (netif_tx_queue_stopped(txq) &&
+		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
+			netif_tx_wake_queue(txq);
+		__netif_tx_unlock(txq);
+	}
+}
+
+static void tg3_frag_free(bool is_frag, void *data)
+{
+	if (is_frag)
+		skb_free_frag(data);
+	else
+		kfree(data);
+}
+
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	if (!ri->data)
+		return;
+
+	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
+			 map_sz, PCI_DMA_FROMDEVICE);
+	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
+	ri->data = NULL;
+}
+
+
+/* Returns size of skb allocated or < 0 on error.
+ *
+ * We only need to fill in the address because the other members
+ * of the RX descriptor are invariant, see tg3_init_rings.
+ *
+ * Note the purposeful assymetry of cpu vs. chip accesses.  For
+ * posting buffers we only dirty the first cache line of the RX
+ * descriptor (containing the address).  Whereas for the RX status
+ * buffers the cpu only reads the last cacheline of the RX descriptor
+ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
+ */
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+			     u32 opaque_key, u32 dest_idx_unmasked,
+			     unsigned int *frag_size)
+{
+	struct tg3_rx_buffer_desc *desc;
+	struct ring_info *map;
+	u8 *data;
+	dma_addr_t mapping;
+	int skb_size, data_size, dest_idx;
+
+	switch (opaque_key) {
+	case RXD_OPAQUE_RING_STD:
+		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+		desc = &tpr->rx_std[dest_idx];
+		map = &tpr->rx_std_buffers[dest_idx];
+		data_size = tp->rx_pkt_map_sz;
+		break;
+
+	case RXD_OPAQUE_RING_JUMBO:
+		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+		desc = &tpr->rx_jmb[dest_idx].std;
+		map = &tpr->rx_jmb_buffers[dest_idx];
+		data_size = TG3_RX_JMB_MAP_SZ;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* Do not overwrite any of the map or rp information
+	 * until we are sure we can commit to a new buffer.
+	 *
+	 * Callers depend upon this behavior and assume that
+	 * we leave everything unchanged if we fail.
+	 */
+	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	if (skb_size <= PAGE_SIZE) {
+		data = netdev_alloc_frag(skb_size);
+		*frag_size = skb_size;
+	} else {
+		data = kmalloc(skb_size, GFP_ATOMIC);
+		*frag_size = 0;
+	}
+	if (!data)
+		return -ENOMEM;
+
+	mapping = pci_map_single(tp->pdev,
+				 data + TG3_RX_OFFSET(tp),
+				 data_size,
+				 PCI_DMA_FROMDEVICE);
+	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
+		tg3_frag_free(skb_size <= PAGE_SIZE, data);
+		return -EIO;
+	}
+
+	map->data = data;
+	dma_unmap_addr_set(map, mapping, mapping);
+
+	desc->addr_hi = ((u64)mapping >> 32);
+	desc->addr_lo = ((u64)mapping & 0xffffffff);
+
+	return data_size;
+}
+
+/* We only need to move over in the address because the other
+ * members of the RX descriptor are invariant.  See notes above
+ * tg3_alloc_rx_data for full details.
+ */
+static void tg3_recycle_rx(struct tg3_napi *tnapi,
+			   struct tg3_rx_prodring_set *dpr,
+			   u32 opaque_key, int src_idx,
+			   u32 dest_idx_unmasked)
+{
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
+	struct ring_info *src_map, *dest_map;
+	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
+	int dest_idx;
+
+	switch (opaque_key) {
+	case RXD_OPAQUE_RING_STD:
+		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+		dest_desc = &dpr->rx_std[dest_idx];
+		dest_map = &dpr->rx_std_buffers[dest_idx];
+		src_desc = &spr->rx_std[src_idx];
+		src_map = &spr->rx_std_buffers[src_idx];
+		break;
+
+	case RXD_OPAQUE_RING_JUMBO:
+		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+		dest_desc = &dpr->rx_jmb[dest_idx].std;
+		dest_map = &dpr->rx_jmb_buffers[dest_idx];
+		src_desc = &spr->rx_jmb[src_idx].std;
+		src_map = &spr->rx_jmb_buffers[src_idx];
+		break;
+
+	default:
+		return;
+	}
+
+	dest_map->data = src_map->data;
+	dma_unmap_addr_set(dest_map, mapping,
+			   dma_unmap_addr(src_map, mapping));
+	dest_desc->addr_hi = src_desc->addr_hi;
+	dest_desc->addr_lo = src_desc->addr_lo;
+
+	/* Ensure that the update to the skb happens after the physical
+	 * addresses have been transferred to the new BD location.
+	 */
+	smp_wmb();
+
+	src_map->data = NULL;
+}
+
+/* The RX ring scheme is composed of multiple rings which post fresh
+ * buffers to the chip, and one special ring the chip uses to report
+ * status back to the host.
+ *
+ * The special ring reports the status of received packets to the
+ * host.  The chip does not write into the original descriptor the
+ * RX buffer was obtained from.  The chip simply takes the original
+ * descriptor as provided by the host, updates the status and length
+ * field, then writes this into the next status ring entry.
+ *
+ * Each ring the host uses to post buffers to the chip is described
+ * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
+ * it is first placed into the on-chip ram.  When the packet's length
+ * is known, it walks down the TG3_BDINFO entries to select the ring.
+ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
+ * which is within the range of the new packet's length is chosen.
+ *
+ * The "separate ring for rx status" scheme may sound queer, but it makes
+ * sense from a cache coherency perspective.  If only the host writes
+ * to the buffer post rings, and only the chip writes to the rx status
+ * rings, then cache lines never move beyond shared-modified state.
+ * If both the host and chip were to write into the same ring, cache line
+ * eviction could occur since both entities want it in an exclusive state.
+ */
+static int tg3_rx(struct tg3_napi *tnapi, int budget)
+{
+	struct tg3 *tp = tnapi->tp;
+	u32 work_mask, rx_std_posted = 0;
+	u32 std_prod_idx, jmb_prod_idx;
+	u32 sw_idx = tnapi->rx_rcb_ptr;
+	u16 hw_idx;
+	int received;
+	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
+
+	hw_idx = *(tnapi->rx_rcb_prod_idx);
+	/*
+	 * We need to order the read of hw_idx and the read of
+	 * the opaque cookie.
+	 */
+	rmb();
+	work_mask = 0;
+	received = 0;
+	std_prod_idx = tpr->rx_std_prod_idx;
+	jmb_prod_idx = tpr->rx_jmb_prod_idx;
+	while (sw_idx != hw_idx && budget > 0) {
+		struct ring_info *ri;
+		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
+		unsigned int len;
+		struct sk_buff *skb;
+		dma_addr_t dma_addr;
+		u32 opaque_key, desc_idx, *post_ptr;
+		u8 *data;
+		u64 tstamp = 0;
+
+		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+		if (opaque_key == RXD_OPAQUE_RING_STD) {
+			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
+			dma_addr = dma_unmap_addr(ri, mapping);
+			data = ri->data;
+			post_ptr = &std_prod_idx;
+			rx_std_posted++;
+		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
+			dma_addr = dma_unmap_addr(ri, mapping);
+			data = ri->data;
+			post_ptr = &jmb_prod_idx;
+		} else
+			goto next_pkt_nopost;
+
+		work_mask |= opaque_key;
+
+		if (desc->err_vlan & RXD_ERR_MASK) {
+		drop_it:
+			tg3_recycle_rx(tnapi, tpr, opaque_key,
+				       desc_idx, *post_ptr);
+		drop_it_no_recycle:
+			/* Other statistics kept track of by card. */
+			tp->rx_dropped++;
+			goto next_pkt;
+		}
+
+		prefetch(data + TG3_RX_OFFSET(tp));
+		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
+		      ETH_FCS_LEN;
+
+		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
+		     RXD_FLAG_PTPSTAT_PTPV1 ||
+		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
+		     RXD_FLAG_PTPSTAT_PTPV2) {
+			tstamp = tr32(TG3_RX_TSTAMP_LSB);
+			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
+		}
+
+		if (len > TG3_RX_COPY_THRESH(tp)) {
+			int skb_size;
+			unsigned int frag_size;
+
+			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
+						    *post_ptr, &frag_size);
+			if (skb_size < 0)
+				goto drop_it;
+
+			pci_unmap_single(tp->pdev, dma_addr, skb_size,
+					 PCI_DMA_FROMDEVICE);
+
+			/* Ensure that the update to the data happens
+			 * after the usage of the old DMA mapping.
+			 */
+			smp_wmb();
+
+			ri->data = NULL;
+
+			skb = build_skb(data, frag_size);
+			if (!skb) {
+				tg3_frag_free(frag_size != 0, data);
+				goto drop_it_no_recycle;
+			}
+			skb_reserve(skb, TG3_RX_OFFSET(tp));
+		} else {
+			tg3_recycle_rx(tnapi, tpr, opaque_key,
+				       desc_idx, *post_ptr);
+
+			skb = netdev_alloc_skb(tp->dev,
+					       len + TG3_RAW_IP_ALIGN);
+			if (skb == NULL)
+				goto drop_it_no_recycle;
+
+			skb_reserve(skb, TG3_RAW_IP_ALIGN);
+			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+			memcpy(skb->data,
+			       data + TG3_RX_OFFSET(tp),
+			       len);
+			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+		}
+
+		skb_put(skb, len);
+		if (tstamp)
+			tg3_hwclock_to_timestamp(tp, tstamp,
+						 skb_hwtstamps(skb));
+
+		if ((tp->dev->features & NETIF_F_RXCSUM) &&
+		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb_checksum_none_assert(skb);
+
+		skb->protocol = eth_type_trans(skb, tp->dev);
+
+		if (len > (tp->dev->mtu + ETH_HLEN) &&
+		    skb->protocol != htons(ETH_P_8021Q) &&
+		    skb->protocol != htons(ETH_P_8021AD)) {
+			dev_kfree_skb_any(skb);
+			goto drop_it_no_recycle;
+		}
+
+		if (desc->type_flags & RXD_FLAG_VLAN &&
+		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+					       desc->err_vlan & RXD_VLAN_MASK);
+
+		napi_gro_receive(&tnapi->napi, skb);
+
+		received++;
+		budget--;
+
+next_pkt:
+		(*post_ptr)++;
+
+		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
+			tpr->rx_std_prod_idx = std_prod_idx &
+					       tp->rx_std_ring_mask;
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     tpr->rx_std_prod_idx);
+			work_mask &= ~RXD_OPAQUE_RING_STD;
+			rx_std_posted = 0;
+		}
+next_pkt_nopost:
+		sw_idx++;
+		sw_idx &= tp->rx_ret_ring_mask;
+
+		/* Refresh hw_idx to see if there is new work */
+		if (sw_idx == hw_idx) {
+			hw_idx = *(tnapi->rx_rcb_prod_idx);
+			rmb();
+		}
+	}
+
+	/* ACK the status ring. */
+	tnapi->rx_rcb_ptr = sw_idx;
+	tw32_rx_mbox(tnapi->consmbox, sw_idx);
+
+	/* Refill RX ring(s). */
+	if (!tg3_flag(tp, ENABLE_RSS)) {
+		/* Sync BD data before updating mailbox */
+		wmb();
+
+		if (work_mask & RXD_OPAQUE_RING_STD) {
+			tpr->rx_std_prod_idx = std_prod_idx &
+					       tp->rx_std_ring_mask;
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     tpr->rx_std_prod_idx);
+		}
+		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+			tpr->rx_jmb_prod_idx = jmb_prod_idx &
+					       tp->rx_jmb_ring_mask;
+			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+				     tpr->rx_jmb_prod_idx);
+		}
+		mmiowb();
+	} else if (work_mask) {
+		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+		 * updated before the producer indices can be updated.
+		 */
+		smp_wmb();
+
+		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
+		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
+
+		if (tnapi != &tp->napi[1]) {
+			tp->rx_refill = true;
+			napi_schedule(&tp->napi[1].napi);
+		}
+	}
+
+	return received;
+}
+
+static void tg3_poll_link(struct tg3 *tp)
+{
+	/* handle link change and other phy events */
+	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
+
+		if (sblk->status & SD_STATUS_LINK_CHG) {
+			sblk->status = SD_STATUS_UPDATED |
+				       (sblk->status & ~SD_STATUS_LINK_CHG);
+			spin_lock(&tp->lock);
+			if (tg3_flag(tp, USE_PHYLIB)) {
+				tw32_f(MAC_STATUS,
+				     (MAC_STATUS_SYNC_CHANGED |
+				      MAC_STATUS_CFG_CHANGED |
+				      MAC_STATUS_MI_COMPLETION |
+				      MAC_STATUS_LNKSTATE_CHANGED));
+				udelay(40);
+			} else
+				tg3_setup_phy(tp, false);
+			spin_unlock(&tp->lock);
+		}
+	}
+}
+
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+				struct tg3_rx_prodring_set *dpr,
+				struct tg3_rx_prodring_set *spr)
+{
+	u32 si, di, cpycnt, src_prod_idx;
+	int i, err = 0;
+
+	while (1) {
+		src_prod_idx = spr->rx_std_prod_idx;
+
+		/* Make sure updates to the rx_std_buffers[] entries and the
+		 * standard producer index are seen in the correct order.
+		 */
+		smp_rmb();
+
+		if (spr->rx_std_cons_idx == src_prod_idx)
+			break;
+
+		if (spr->rx_std_cons_idx < src_prod_idx)
+			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+		else
+			cpycnt = tp->rx_std_ring_mask + 1 -
+				 spr->rx_std_cons_idx;
+
+		cpycnt = min(cpycnt,
+			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
+
+		si = spr->rx_std_cons_idx;
+		di = dpr->rx_std_prod_idx;
+
+		for (i = di; i < di + cpycnt; i++) {
+			if (dpr->rx_std_buffers[i].data) {
+				cpycnt = i - di;
+				err = -ENOSPC;
+				break;
+			}
+		}
+
+		if (!cpycnt)
+			break;
+
+		/* Ensure that updates to the rx_std_buffers ring and the
+		 * shadowed hardware producer ring from tg3_recycle_skb() are
+		 * ordered correctly WRT the skb check above.
+		 */
+		smp_rmb();
+
+		memcpy(&dpr->rx_std_buffers[di],
+		       &spr->rx_std_buffers[si],
+		       cpycnt * sizeof(struct ring_info));
+
+		for (i = 0; i < cpycnt; i++, di++, si++) {
+			struct tg3_rx_buffer_desc *sbd, *dbd;
+			sbd = &spr->rx_std[si];
+			dbd = &dpr->rx_std[di];
+			dbd->addr_hi = sbd->addr_hi;
+			dbd->addr_lo = sbd->addr_lo;
+		}
+
+		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
+				       tp->rx_std_ring_mask;
+		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
+				       tp->rx_std_ring_mask;
+	}
+
+	while (1) {
+		src_prod_idx = spr->rx_jmb_prod_idx;
+
+		/* Make sure updates to the rx_jmb_buffers[] entries and
+		 * the jumbo producer index are seen in the correct order.
+		 */
+		smp_rmb();
+
+		if (spr->rx_jmb_cons_idx == src_prod_idx)
+			break;
+
+		if (spr->rx_jmb_cons_idx < src_prod_idx)
+			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+		else
+			cpycnt = tp->rx_jmb_ring_mask + 1 -
+				 spr->rx_jmb_cons_idx;
+
+		cpycnt = min(cpycnt,
+			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
+
+		si = spr->rx_jmb_cons_idx;
+		di = dpr->rx_jmb_prod_idx;
+
+		for (i = di; i < di + cpycnt; i++) {
+			if (dpr->rx_jmb_buffers[i].data) {
+				cpycnt = i - di;
+				err = -ENOSPC;
+				break;
+			}
+		}
+
+		if (!cpycnt)
+			break;
+
+		/* Ensure that updates to the rx_jmb_buffers ring and the
+		 * shadowed hardware producer ring from tg3_recycle_skb() are
+		 * ordered correctly WRT the skb check above.
+		 */
+		smp_rmb();
+
+		memcpy(&dpr->rx_jmb_buffers[di],
+		       &spr->rx_jmb_buffers[si],
+		       cpycnt * sizeof(struct ring_info));
+
+		for (i = 0; i < cpycnt; i++, di++, si++) {
+			struct tg3_rx_buffer_desc *sbd, *dbd;
+			sbd = &spr->rx_jmb[si].std;
+			dbd = &dpr->rx_jmb[di].std;
+			dbd->addr_hi = sbd->addr_hi;
+			dbd->addr_lo = sbd->addr_lo;
+		}
+
+		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
+				       tp->rx_jmb_ring_mask;
+		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
+				       tp->rx_jmb_ring_mask;
+	}
+
+	return err;
+}
+
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+{
+	struct tg3 *tp = tnapi->tp;
+
+	/* run TX completion thread */
+	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
+		tg3_tx(tnapi);
+		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+			return work_done;
+	}
+
+	if (!tnapi->rx_rcb_prod_idx)
+		return work_done;
+
+	/* run RX thread, within the bounds set by NAPI.
+	 * All RX "locking" is done by ensuring outside
+	 * code synchronizes with tg3->napi.poll()
+	 */
+	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+		work_done += tg3_rx(tnapi, budget - work_done);
+
+	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
+		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
+		int i, err = 0;
+		u32 std_prod_idx = dpr->rx_std_prod_idx;
+		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
+
+		tp->rx_refill = false;
+		for (i = 1; i <= tp->rxq_cnt; i++)
+			err |= tg3_rx_prodring_xfer(tp, dpr,
+						    &tp->napi[i].prodring);
+
+		wmb();
+
+		if (std_prod_idx != dpr->rx_std_prod_idx)
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     dpr->rx_std_prod_idx);
+
+		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+				     dpr->rx_jmb_prod_idx);
+
+		mmiowb();
+
+		if (err)
+			tw32_f(HOSTCC_MODE, tp->coal_now);
+	}
+
+	return work_done;
+}
+
+static inline void tg3_reset_task_schedule(struct tg3 *tp)
+{
+	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+		schedule_work(&tp->reset_task);
+}
+
+static inline void tg3_reset_task_cancel(struct tg3 *tp)
+{
+	cancel_work_sync(&tp->reset_task);
+	tg3_flag_clear(tp, RESET_TASK_PENDING);
+	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+}
+
+static int tg3_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+	struct tg3 *tp = tnapi->tp;
+	int work_done = 0;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	while (1) {
+		work_done = tg3_poll_work(tnapi, work_done, budget);
+
+		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+			goto tx_recovery;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		/* tp->last_tag is used in tg3_int_reenable() below
+		 * to tell the hw how much work has been processed,
+		 * so we must read it before checking for more work.
+		 */
+		tnapi->last_tag = sblk->status_tag;
+		tnapi->last_irq_tag = tnapi->last_tag;
+		rmb();
+
+		/* check for RX/TX work to do */
+		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
+
+			/* This test here is not race free, but will reduce
+			 * the number of interrupts by looping again.
+			 */
+			if (tnapi == &tp->napi[1] && tp->rx_refill)
+				continue;
+
+			napi_complete_done(napi, work_done);
+			/* Reenable interrupts. */
+			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+
+			/* This test here is synchronized by napi_schedule()
+			 * and napi_complete() to close the race condition.
+			 */
+			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
+				tw32(HOSTCC_MODE, tp->coalesce_mode |
+						  HOSTCC_MODE_ENABLE |
+						  tnapi->coal_now);
+			}
+			mmiowb();
+			break;
+		}
+	}
+
+	return work_done;
+
+tx_recovery:
+	/* work_done is guaranteed to be less than budget. */
+	napi_complete(napi);
+	tg3_reset_task_schedule(tp);
+	return work_done;
+}
+
+static void tg3_process_error(struct tg3 *tp)
+{
+	u32 val;
+	bool real_error = false;
+
+	if (tg3_flag(tp, ERROR_PROCESSED))
+		return;
+
+	/* Check Flow Attention register */
+	val = tr32(HOSTCC_FLOW_ATTN);
+	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
+		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
+		real_error = true;
+	}
+
+	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
+		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
+		real_error = true;
+	}
+
+	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
+		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
+		real_error = true;
+	}
+
+	if (!real_error)
+		return;
+
+	tg3_dump_state(tp);
+
+	tg3_flag_set(tp, ERROR_PROCESSED);
+	tg3_reset_task_schedule(tp);
+}
+
+static int tg3_poll(struct napi_struct *napi, int budget)
+{
+	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+	struct tg3 *tp = tnapi->tp;
+	int work_done = 0;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	while (1) {
+		if (sblk->status & SD_STATUS_ERROR)
+			tg3_process_error(tp);
+
+		tg3_poll_link(tp);
+
+		work_done = tg3_poll_work(tnapi, work_done, budget);
+
+		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+			goto tx_recovery;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		if (tg3_flag(tp, TAGGED_STATUS)) {
+			/* tp->last_tag is used in tg3_int_reenable() below
+			 * to tell the hw how much work has been processed,
+			 * so we must read it before checking for more work.
+			 */
+			tnapi->last_tag = sblk->status_tag;
+			tnapi->last_irq_tag = tnapi->last_tag;
+			rmb();
+		} else
+			sblk->status &= ~SD_STATUS_UPDATED;
+
+		if (likely(!tg3_has_work(tnapi))) {
+			napi_complete_done(napi, work_done);
+			tg3_int_reenable(tnapi);
+			break;
+		}
+	}
+
+	return work_done;
+
+tx_recovery:
+	/* work_done is guaranteed to be less than budget. */
+	napi_complete(napi);
+	tg3_reset_task_schedule(tp);
+	return work_done;
+}
+
+static void tg3_napi_disable(struct tg3 *tp)
+{
+	int i;
+
+	for (i = tp->irq_cnt - 1; i >= 0; i--)
+		napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		napi_enable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_init(struct tg3 *tp)
+{
+	int i;
+
+	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+	for (i = 1; i < tp->irq_cnt; i++)
+		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+}
+
+static void tg3_napi_fini(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		netif_napi_del(&tp->napi[i].napi);
+}
+
+static inline void tg3_netif_stop(struct tg3 *tp)
+{
+	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
+	tg3_napi_disable(tp);
+	netif_carrier_off(tp->dev);
+	netif_tx_disable(tp->dev);
+}
+
+/* tp->lock must be held */
+static inline void tg3_netif_start(struct tg3 *tp)
+{
+	tg3_ptp_resume(tp);
+
+	/* NOTE: unconditional netif_tx_wake_all_queues is only
+	 * appropriate so long as all callers are assured to
+	 * have free tx slots (such as after tg3_init_hw)
+	 */
+	netif_tx_wake_all_queues(tp->dev);
+
+	if (tp->link_up)
+		netif_carrier_on(tp->dev);
+
+	tg3_napi_enable(tp);
+	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
+	tg3_enable_ints(tp);
+}
+
+static void tg3_irq_quiesce(struct tg3 *tp)
+	__releases(tp->lock)
+	__acquires(tp->lock)
+{
+	int i;
+
+	BUG_ON(tp->irq_sync);
+
+	tp->irq_sync = 1;
+	smp_mb();
+
+	spin_unlock_bh(&tp->lock);
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		synchronize_irq(tp->napi[i].irq_vec);
+
+	spin_lock_bh(&tp->lock);
+}
+
+/* Fully shutdown all tg3 driver activity elsewhere in the system.
+ * If irq_sync is non-zero, then the IRQ handler must be synchronized
+ * with as well.  Most of the time, this is not necessary except when
+ * shutting down the device.
+ */
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+{
+	spin_lock_bh(&tp->lock);
+	if (irq_sync)
+		tg3_irq_quiesce(tp);
+}
+
+static inline void tg3_full_unlock(struct tg3 *tp)
+{
+	spin_unlock_bh(&tp->lock);
+}
+
+/* One-shot MSI handler - Chip automatically disables interrupt
+ * after sending MSI so driver doesn't have to do it.
+ */
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+
+	prefetch(tnapi->hw_status);
+	if (tnapi->rx_rcb)
+		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+	if (likely(!tg3_irq_sync(tp)))
+		napi_schedule(&tnapi->napi);
+
+	return IRQ_HANDLED;
+}
+
+/* MSI ISR - No need to check for interrupt sharing and no need to
+ * flush status block and interrupt mailbox. PCI ordering rules
+ * guarantee that MSI will arrive after the status block.
+ */
+static irqreturn_t tg3_msi(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+
+	prefetch(tnapi->hw_status);
+	if (tnapi->rx_rcb)
+		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+	/*
+	 * Writing any value to intr-mbox-0 clears PCI INTA# and
+	 * chip-internal interrupt pending events.
+	 * Writing non-zero to intr-mbox-0 additional tells the
+	 * NIC to stop sending us irqs, engaging "in-intr-handler"
+	 * event coalescing.
+	 */
+	tw32_mailbox(tnapi->int_mbox, 0x00000001);
+	if (likely(!tg3_irq_sync(tp)))
+		napi_schedule(&tnapi->napi);
+
+	return IRQ_RETVAL(1);
+}
+
+static irqreturn_t tg3_interrupt(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+	unsigned int handled = 1;
+
+	/* In INTx mode, it is possible for the interrupt to arrive at
+	 * the CPU before the status block posted prior to the interrupt.
+	 * Reading the PCI State register will confirm whether the
+	 * interrupt is ours and will flush the status block.
+	 */
+	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
+		if (tg3_flag(tp, CHIP_RESETTING) ||
+		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+			handled = 0;
+			goto out;
+		}
+	}
+
+	/*
+	 * Writing any value to intr-mbox-0 clears PCI INTA# and
+	 * chip-internal interrupt pending events.
+	 * Writing non-zero to intr-mbox-0 additional tells the
+	 * NIC to stop sending us irqs, engaging "in-intr-handler"
+	 * event coalescing.
+	 *
+	 * Flush the mailbox to de-assert the IRQ immediately to prevent
+	 * spurious interrupts.  The flush impacts performance but
+	 * excessive spurious interrupts can be worse in some cases.
+	 */
+	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+	if (tg3_irq_sync(tp))
+		goto out;
+	sblk->status &= ~SD_STATUS_UPDATED;
+	if (likely(tg3_has_work(tnapi))) {
+		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+		napi_schedule(&tnapi->napi);
+	} else {
+		/* No work, shared interrupt perhaps?  re-enable
+		 * interrupts, and flush that PCI write
+		 */
+		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+			       0x00000000);
+	}
+out:
+	return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+	unsigned int handled = 1;
+
+	/* In INTx mode, it is possible for the interrupt to arrive at
+	 * the CPU before the status block posted prior to the interrupt.
+	 * Reading the PCI State register will confirm whether the
+	 * interrupt is ours and will flush the status block.
+	 */
+	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
+		if (tg3_flag(tp, CHIP_RESETTING) ||
+		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+			handled = 0;
+			goto out;
+		}
+	}
+
+	/*
+	 * writing any value to intr-mbox-0 clears PCI INTA# and
+	 * chip-internal interrupt pending events.
+	 * writing non-zero to intr-mbox-0 additional tells the
+	 * NIC to stop sending us irqs, engaging "in-intr-handler"
+	 * event coalescing.
+	 *
+	 * Flush the mailbox to de-assert the IRQ immediately to prevent
+	 * spurious interrupts.  The flush impacts performance but
+	 * excessive spurious interrupts can be worse in some cases.
+	 */
+	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+
+	/*
+	 * In a shared interrupt configuration, sometimes other devices'
+	 * interrupts will scream.  We record the current status tag here
+	 * so that the above check can report that the screaming interrupts
+	 * are unhandled.  Eventually they will be silenced.
+	 */
+	tnapi->last_irq_tag = sblk->status_tag;
+
+	if (tg3_irq_sync(tp))
+		goto out;
+
+	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+	napi_schedule(&tnapi->napi);
+
+out:
+	return IRQ_RETVAL(handled);
+}
+
+/* ISR for interrupt test */
+static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	if ((sblk->status & SD_STATUS_UPDATED) ||
+	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+		tg3_disable_ints(tp);
+		return IRQ_RETVAL(1);
+	}
+	return IRQ_RETVAL(0);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tg3_poll_controller(struct net_device *dev)
+{
+	int i;
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_irq_sync(tp))
+		return;
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+}
+#endif
+
+static void tg3_tx_timeout(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (netif_msg_tx_err(tp)) {
+		netdev_err(dev, "transmit timed out, resetting\n");
+		tg3_dump_state(tp);
+	}
+
+	tg3_reset_task_schedule(tp);
+}
+
+/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+	u32 base = (u32) mapping & 0xffffffff;
+
+	return base + len + 8 < base;
+}
+
+/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
+ * of any 4GB boundaries: 4G, 8G, etc
+ */
+static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+					   u32 len, u32 mss)
+{
+	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
+		u32 base = (u32) mapping & 0xffffffff;
+
+		return ((base + len + (mss & 0x3fff)) < base);
+	}
+	return 0;
+}
+
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+					  int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+	if (tg3_flag(tp, 40BIT_DMA_BUG))
+		return ((u64) mapping + len) > DMA_BIT_MASK(40);
+	return 0;
+#else
+	return 0;
+#endif
+}
+
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+				 dma_addr_t mapping, u32 len, u32 flags,
+				 u32 mss, u32 vlan)
+{
+	txbd->addr_hi = ((u64) mapping >> 32);
+	txbd->addr_lo = ((u64) mapping & 0xffffffff);
+	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+			    dma_addr_t map, u32 len, u32 flags,
+			    u32 mss, u32 vlan)
+{
+	struct tg3 *tp = tnapi->tp;
+	bool hwbug = false;
+
+	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+		hwbug = true;
+
+	if (tg3_4g_overflow_test(map, len))
+		hwbug = true;
+
+	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
+		hwbug = true;
+
+	if (tg3_40bit_overflow_test(tp, map, len))
+		hwbug = true;
+
+	if (tp->dma_limit) {
+		u32 prvidx = *entry;
+		u32 tmp_flag = flags & ~TXD_FLAG_END;
+		while (len > tp->dma_limit && *budget) {
+			u32 frag_len = tp->dma_limit;
+			len -= tp->dma_limit;
+
+			/* Avoid the 8byte DMA problem */
+			if (len <= 8) {
+				len += tp->dma_limit / 2;
+				frag_len = tp->dma_limit / 2;
+			}
+
+			tnapi->tx_buffers[*entry].fragmented = true;
+
+			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+				      frag_len, tmp_flag, mss, vlan);
+			*budget -= 1;
+			prvidx = *entry;
+			*entry = NEXT_TX(*entry);
+
+			map += frag_len;
+		}
+
+		if (len) {
+			if (*budget) {
+				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+					      len, flags, mss, vlan);
+				*budget -= 1;
+				*entry = NEXT_TX(*entry);
+			} else {
+				hwbug = true;
+				tnapi->tx_buffers[prvidx].fragmented = false;
+			}
+		}
+	} else {
+		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+			      len, flags, mss, vlan);
+		*entry = NEXT_TX(*entry);
+	}
+
+	return hwbug;
+}
+
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
+{
+	int i;
+	struct sk_buff *skb;
+	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+	skb = txb->skb;
+	txb->skb = NULL;
+
+	pci_unmap_single(tnapi->tp->pdev,
+			 dma_unmap_addr(txb, mapping),
+			 skb_headlen(skb),
+			 PCI_DMA_TODEVICE);
+
+	while (txb->fragmented) {
+		txb->fragmented = false;
+		entry = NEXT_TX(entry);
+		txb = &tnapi->tx_buffers[entry];
+	}
+
+	for (i = 0; i <= last; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		entry = NEXT_TX(entry);
+		txb = &tnapi->tx_buffers[entry];
+
+		pci_unmap_page(tnapi->tp->pdev,
+			       dma_unmap_addr(txb, mapping),
+			       skb_frag_size(frag), PCI_DMA_TODEVICE);
+
+		while (txb->fragmented) {
+			txb->fragmented = false;
+			entry = NEXT_TX(entry);
+			txb = &tnapi->tx_buffers[entry];
+		}
+	}
+}
+
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+				       struct sk_buff **pskb,
+				       u32 *entry, u32 *budget,
+				       u32 base_flags, u32 mss, u32 vlan)
+{
+	struct tg3 *tp = tnapi->tp;
+	struct sk_buff *new_skb, *skb = *pskb;
+	dma_addr_t new_addr = 0;
+	int ret = 0;
+
+	if (tg3_asic_rev(tp) != ASIC_REV_5701)
+		new_skb = skb_copy(skb, GFP_ATOMIC);
+	else {
+		int more_headroom = 4 - ((unsigned long)skb->data & 3);
+
+		new_skb = skb_copy_expand(skb,
+					  skb_headroom(skb) + more_headroom,
+					  skb_tailroom(skb), GFP_ATOMIC);
+	}
+
+	if (!new_skb) {
+		ret = -1;
+	} else {
+		/* New SKB is guaranteed to be linear. */
+		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+					  PCI_DMA_TODEVICE);
+		/* Make sure the mapping succeeded */
+		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+			dev_kfree_skb_any(new_skb);
+			ret = -1;
+		} else {
+			u32 save_entry = *entry;
+
+			base_flags |= TXD_FLAG_END;
+
+			tnapi->tx_buffers[*entry].skb = new_skb;
+			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
+					   mapping, new_addr);
+
+			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+					    new_skb->len, base_flags,
+					    mss, vlan)) {
+				tg3_tx_skb_unmap(tnapi, save_entry, -1);
+				dev_kfree_skb_any(new_skb);
+				ret = -1;
+			}
+		}
+	}
+
+	dev_kfree_skb_any(skb);
+	*pskb = new_skb;
+	return ret;
+}
+
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+	/* Check if we will never have enough descriptors,
+	 * as gso_segs can be more than current ring size
+	 */
+	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
+static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+
+/* Use GSO to workaround all TSO packets that meet HW bug conditions
+ * indicated in tg3_tx_frag_set()
+ */
+static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
+		       struct netdev_queue *txq, struct sk_buff *skb)
+{
+	struct sk_buff *segs, *nskb;
+	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+
+	/* Estimate the number of fragments in the worst case */
+	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
+			return NETDEV_TX_BUSY;
+
+		netif_tx_wake_queue(txq);
+	}
+
+	segs = skb_gso_segment(skb, tp->dev->features &
+				    ~(NETIF_F_TSO | NETIF_F_TSO6));
+	if (IS_ERR(segs) || !segs)
+		goto tg3_tso_bug_end;
+
+	do {
+		nskb = segs;
+		segs = segs->next;
+		nskb->next = NULL;
+		tg3_start_xmit(nskb, tp->dev);
+	} while (segs);
+
+tg3_tso_bug_end:
+	dev_kfree_skb_any(skb);
+
+	return NETDEV_TX_OK;
+}
+
+/* hard_start_xmit for all devices */
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 len, entry, base_flags, mss, vlan = 0;
+	u32 budget;
+	int i = -1, would_hit_hwbug;
+	dma_addr_t mapping;
+	struct tg3_napi *tnapi;
+	struct netdev_queue *txq;
+	unsigned int last;
+	struct iphdr *iph = NULL;
+	struct tcphdr *tcph = NULL;
+	__sum16 tcp_csum = 0, ip_csum = 0;
+	__be16 ip_tot_len = 0;
+
+	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+	if (tg3_flag(tp, ENABLE_TSS))
+		tnapi++;
+
+	budget = tg3_tx_avail(tnapi);
+
+	/* We are running in BH disabled context with netif_tx_lock
+	 * and TX reclaim runs via tp->napi.poll inside of a software
+	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
+	 * no IRQ context deadlocks to worry about either.  Rejoice!
+	 */
+	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
+		if (!netif_tx_queue_stopped(txq)) {
+			netif_tx_stop_queue(txq);
+
+			/* This is a hard error, log it. */
+			netdev_err(dev,
+				   "BUG! Tx Ring full when queue awake!\n");
+		}
+		return NETDEV_TX_BUSY;
+	}
+
+	entry = tnapi->tx_prod;
+	base_flags = 0;
+
+	mss = skb_shinfo(skb)->gso_size;
+	if (mss) {
+		u32 tcp_opt_len, hdr_len;
+
+		if (skb_cow_head(skb, 0))
+			goto drop;
+
+		iph = ip_hdr(skb);
+		tcp_opt_len = tcp_optlen(skb);
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+
+		/* HW/FW can not correctly segment packets that have been
+		 * vlan encapsulated.
+		 */
+		if (skb->protocol == htons(ETH_P_8021Q) ||
+		    skb->protocol == htons(ETH_P_8021AD)) {
+			if (tg3_tso_bug_gso_check(tnapi, skb))
+				return tg3_tso_bug(tp, tnapi, txq, skb);
+			goto drop;
+		}
+
+		if (!skb_is_gso_v6(skb)) {
+			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+			    tg3_flag(tp, TSO_BUG)) {
+				if (tg3_tso_bug_gso_check(tnapi, skb))
+					return tg3_tso_bug(tp, tnapi, txq, skb);
+				goto drop;
+			}
+			ip_csum = iph->check;
+			ip_tot_len = iph->tot_len;
+			iph->check = 0;
+			iph->tot_len = htons(mss + hdr_len);
+		}
+
+		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+			       TXD_FLAG_CPU_POST_DMA);
+
+		tcph = tcp_hdr(skb);
+		tcp_csum = tcph->check;
+
+		if (tg3_flag(tp, HW_TSO_1) ||
+		    tg3_flag(tp, HW_TSO_2) ||
+		    tg3_flag(tp, HW_TSO_3)) {
+			tcph->check = 0;
+			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+		} else {
+			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 0, IPPROTO_TCP, 0);
+		}
+
+		if (tg3_flag(tp, HW_TSO_3)) {
+			mss |= (hdr_len & 0xc) << 12;
+			if (hdr_len & 0x10)
+				base_flags |= 0x00000010;
+			base_flags |= (hdr_len & 0x3e0) << 5;
+		} else if (tg3_flag(tp, HW_TSO_2))
+			mss |= hdr_len << 9;
+		else if (tg3_flag(tp, HW_TSO_1) ||
+			 tg3_asic_rev(tp) == ASIC_REV_5705) {
+			if (tcp_opt_len || iph->ihl > 5) {
+				int tsflags;
+
+				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+				mss |= (tsflags << 11);
+			}
+		} else {
+			if (tcp_opt_len || iph->ihl > 5) {
+				int tsflags;
+
+				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+				base_flags |= tsflags << 12;
+			}
+		}
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		/* HW/FW can not correctly checksum packets that have been
+		 * vlan encapsulated.
+		 */
+		if (skb->protocol == htons(ETH_P_8021Q) ||
+		    skb->protocol == htons(ETH_P_8021AD)) {
+			if (skb_checksum_help(skb))
+				goto drop;
+		} else  {
+			base_flags |= TXD_FLAG_TCPUDP_CSUM;
+		}
+	}
+
+	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
+		base_flags |= TXD_FLAG_JMB_PKT;
+
+	if (skb_vlan_tag_present(skb)) {
+		base_flags |= TXD_FLAG_VLAN;
+		vlan = skb_vlan_tag_get(skb);
+	}
+
+	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
+	    tg3_flag(tp, TX_TSTAMP_EN)) {
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		base_flags |= TXD_FLAG_HWTSTAMP;
+	}
+
+	len = skb_headlen(skb);
+
+	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping))
+		goto drop;
+
+
+	tnapi->tx_buffers[entry].skb = skb;
+	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+
+	would_hit_hwbug = 0;
+
+	if (tg3_flag(tp, 5701_DMA_BUG))
+		would_hit_hwbug = 1;
+
+	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+			    mss, vlan)) {
+		would_hit_hwbug = 1;
+	} else if (skb_shinfo(skb)->nr_frags > 0) {
+		u32 tmp_mss = mss;
+
+		if (!tg3_flag(tp, HW_TSO_1) &&
+		    !tg3_flag(tp, HW_TSO_2) &&
+		    !tg3_flag(tp, HW_TSO_3))
+			tmp_mss = 0;
+
+		/* Now loop through additional data
+		 * fragments, and queue them.
+		 */
+		last = skb_shinfo(skb)->nr_frags - 1;
+		for (i = 0; i <= last; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+			len = skb_frag_size(frag);
+			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
+						   len, DMA_TO_DEVICE);
+
+			tnapi->tx_buffers[entry].skb = NULL;
+			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+					   mapping);
+			if (dma_mapping_error(&tp->pdev->dev, mapping))
+				goto dma_error;
+
+			if (!budget ||
+			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+					    len, base_flags |
+					    ((i == last) ? TXD_FLAG_END : 0),
+					    tmp_mss, vlan)) {
+				would_hit_hwbug = 1;
+				break;
+			}
+		}
+	}
+
+	if (would_hit_hwbug) {
+		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+
+		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
+			/* If it's a TSO packet, do GSO instead of
+			 * allocating and copying to a large linear SKB
+			 */
+			if (ip_tot_len) {
+				iph->check = ip_csum;
+				iph->tot_len = ip_tot_len;
+			}
+			tcph->check = tcp_csum;
+			return tg3_tso_bug(tp, tnapi, txq, skb);
+		}
+
+		/* If the workaround fails due to memory/mapping
+		 * failure, silently drop this packet.
+		 */
+		entry = tnapi->tx_prod;
+		budget = tg3_tx_avail(tnapi);
+		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
+						base_flags, mss, vlan))
+			goto drop_nofree;
+	}
+
+	skb_tx_timestamp(skb);
+	netdev_tx_sent_queue(txq, skb->len);
+
+	/* Sync BD data before updating mailbox */
+	wmb();
+
+	tnapi->tx_prod = entry;
+	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
+			netif_tx_wake_queue(txq);
+	}
+
+	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
+		/* Packets are ready, update Tx producer idx on card. */
+		tw32_tx_mbox(tnapi->prodmbox, entry);
+		mmiowb();
+	}
+
+	return NETDEV_TX_OK;
+
+dma_error:
+	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
+	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+drop:
+	dev_kfree_skb_any(skb);
+drop_nofree:
+	tp->tx_dropped++;
+	return NETDEV_TX_OK;
+}
+
+static void tg3_mac_loopback(struct tg3 *tp, bool enable)
+{
+	if (enable) {
+		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
+				  MAC_MODE_PORT_MODE_MASK);
+
+		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+
+		if (!tg3_flag(tp, 5705_PLUS))
+			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+		else
+			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+	} else {
+		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+
+		if (tg3_flag(tp, 5705_PLUS) ||
+		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
+		    tg3_asic_rev(tp) == ASIC_REV_5700)
+			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+	}
+
+	tw32(MAC_MODE, tp->mac_mode);
+	udelay(40);
+}
+
+static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
+{
+	u32 val, bmcr, mac_mode, ptest = 0;
+
+	tg3_phy_toggle_apd(tp, false);
+	tg3_phy_toggle_automdix(tp, false);
+
+	if (extlpbk && tg3_phy_set_extloopbk(tp))
+		return -EIO;
+
+	bmcr = BMCR_FULLDPLX;
+	switch (speed) {
+	case SPEED_10:
+		break;
+	case SPEED_100:
+		bmcr |= BMCR_SPEED100;
+		break;
+	case SPEED_1000:
+	default:
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+			speed = SPEED_100;
+			bmcr |= BMCR_SPEED100;
+		} else {
+			speed = SPEED_1000;
+			bmcr |= BMCR_SPEED1000;
+		}
+	}
+
+	if (extlpbk) {
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+			tg3_readphy(tp, MII_CTRL1000, &val);
+			val |= CTL1000_AS_MASTER |
+			       CTL1000_ENABLE_MASTER;
+			tg3_writephy(tp, MII_CTRL1000, val);
+		} else {
+			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
+				MII_TG3_FET_PTEST_TRIM_2;
+			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
+		}
+	} else
+		bmcr |= BMCR_LOOPBACK;
+
+	tg3_writephy(tp, MII_BMCR, bmcr);
+
+	/* The write needs to be flushed for the FETs */
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+		tg3_readphy(tp, MII_BMCR, &bmcr);
+
+	udelay(40);
+
+	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+	    tg3_asic_rev(tp) == ASIC_REV_5785) {
+		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
+			     MII_TG3_FET_PTEST_FRC_TX_LINK |
+			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
+
+		/* The write needs to be flushed for the AC131 */
+		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
+	}
+
+	/* Reset to prevent losing 1st rx packet intermittently */
+	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+	    tg3_flag(tp, 5780_CLASS)) {
+		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+		udelay(10);
+		tw32_f(MAC_RX_MODE, tp->rx_mode);
+	}
+
+	mac_mode = tp->mac_mode &
+		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+	if (speed == SPEED_1000)
+		mac_mode |= MAC_MODE_PORT_MODE_GMII;
+	else
+		mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
+		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+
+		if (masked_phy_id == TG3_PHY_ID_BCM5401)
+			mac_mode &= ~MAC_MODE_LINK_POLARITY;
+		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
+			mac_mode |= MAC_MODE_LINK_POLARITY;
+
+		tg3_writephy(tp, MII_TG3_EXT_CTRL,
+			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+	}
+
+	tw32(MAC_MODE, mac_mode);
+	udelay(40);
+
+	return 0;
+}
+
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (features & NETIF_F_LOOPBACK) {
+		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
+			return;
+
+		spin_lock_bh(&tp->lock);
+		tg3_mac_loopback(tp, true);
+		netif_carrier_on(tp->dev);
+		spin_unlock_bh(&tp->lock);
+		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
+	} else {
+		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+			return;
+
+		spin_lock_bh(&tp->lock);
+		tg3_mac_loopback(tp, false);
+		/* Force link status check */
+		tg3_setup_phy(tp, true);
+		spin_unlock_bh(&tp->lock);
+		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
+	}
+}
+
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
+		features &= ~NETIF_F_ALL_TSO;
+
+	return features;
+}
+
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
+{
+	netdev_features_t changed = dev->features ^ features;
+
+	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
+		tg3_set_loopback(dev, features);
+
+	return 0;
+}
+
+static void tg3_rx_prodring_free(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *tpr)
+{
+	int i;
+
+	if (tpr != &tp->napi[0].prodring) {
+		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+		     i = (i + 1) & tp->rx_std_ring_mask)
+			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
+					tp->rx_pkt_map_sz);
+
+		if (tg3_flag(tp, JUMBO_CAPABLE)) {
+			for (i = tpr->rx_jmb_cons_idx;
+			     i != tpr->rx_jmb_prod_idx;
+			     i = (i + 1) & tp->rx_jmb_ring_mask) {
+				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
+						TG3_RX_JMB_MAP_SZ);
+			}
+		}
+
+		return;
+	}
+
+	for (i = 0; i <= tp->rx_std_ring_mask; i++)
+		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
+				tp->rx_pkt_map_sz);
+
+	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
+			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
+					TG3_RX_JMB_MAP_SZ);
+	}
+}
+
+/* Initialize rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_rx_prodring_alloc(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *tpr)
+{
+	u32 i, rx_pkt_dma_sz;
+
+	tpr->rx_std_cons_idx = 0;
+	tpr->rx_std_prod_idx = 0;
+	tpr->rx_jmb_cons_idx = 0;
+	tpr->rx_jmb_prod_idx = 0;
+
+	if (tpr != &tp->napi[0].prodring) {
+		memset(&tpr->rx_std_buffers[0], 0,
+		       TG3_RX_STD_BUFF_RING_SIZE(tp));
+		if (tpr->rx_jmb_buffers)
+			memset(&tpr->rx_jmb_buffers[0], 0,
+			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
+		goto done;
+	}
+
+	/* Zero out all descriptors. */
+	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
+
+	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
+	if (tg3_flag(tp, 5780_CLASS) &&
+	    tp->dev->mtu > ETH_DATA_LEN)
+		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
+	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
+
+	/* Initialize invariants of the rings, we only set this
+	 * stuff once.  This works because the card does not
+	 * write into the rx buffer posting rings.
+	 */
+	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
+		struct tg3_rx_buffer_desc *rxd;
+
+		rxd = &tpr->rx_std[i];
+		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
+		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
+		rxd->opaque = (RXD_OPAQUE_RING_STD |
+			       (i << RXD_OPAQUE_INDEX_SHIFT));
+	}
+
+	/* Now allocate fresh SKBs for each rx ring. */
+	for (i = 0; i < tp->rx_pending; i++) {
+		unsigned int frag_size;
+
+		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
+				      &frag_size) < 0) {
+			netdev_warn(tp->dev,
+				    "Using a smaller RX standard ring. Only "
+				    "%d out of %d buffers were allocated "
+				    "successfully\n", i, tp->rx_pending);
+			if (i == 0)
+				goto initfail;
+			tp->rx_pending = i;
+			break;
+		}
+	}
+
+	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+		goto done;
+
+	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
+
+	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
+		goto done;
+
+	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
+		struct tg3_rx_buffer_desc *rxd;
+
+		rxd = &tpr->rx_jmb[i].std;
+		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+				  RXD_FLAG_JUMBO;
+		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+		       (i << RXD_OPAQUE_INDEX_SHIFT));
+	}
+
+	for (i = 0; i < tp->rx_jumbo_pending; i++) {
+		unsigned int frag_size;
+
+		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
+				      &frag_size) < 0) {
+			netdev_warn(tp->dev,
+				    "Using a smaller RX jumbo ring. Only %d "
+				    "out of %d buffers were allocated "
+				    "successfully\n", i, tp->rx_jumbo_pending);
+			if (i == 0)
+				goto initfail;
+			tp->rx_jumbo_pending = i;
+			break;
+		}
+	}
+
+done:
+	return 0;
+
+initfail:
+	tg3_rx_prodring_free(tp, tpr);
+	return -ENOMEM;
+}
+
+static void tg3_rx_prodring_fini(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *tpr)
+{
+	kfree(tpr->rx_std_buffers);
+	tpr->rx_std_buffers = NULL;
+	kfree(tpr->rx_jmb_buffers);
+	tpr->rx_jmb_buffers = NULL;
+	if (tpr->rx_std) {
+		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+				  tpr->rx_std, tpr->rx_std_mapping);
+		tpr->rx_std = NULL;
+	}
+	if (tpr->rx_jmb) {
+		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+				  tpr->rx_jmb, tpr->rx_jmb_mapping);
+		tpr->rx_jmb = NULL;
+	}
+}
+
+static int tg3_rx_prodring_init(struct tg3 *tp,
+				struct tg3_rx_prodring_set *tpr)
+{
+	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
+				      GFP_KERNEL);
+	if (!tpr->rx_std_buffers)
+		return -ENOMEM;
+
+	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+					 TG3_RX_STD_RING_BYTES(tp),
+					 &tpr->rx_std_mapping,
+					 GFP_KERNEL);
+	if (!tpr->rx_std)
+		goto err_out;
+
+	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
+					      GFP_KERNEL);
+		if (!tpr->rx_jmb_buffers)
+			goto err_out;
+
+		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+						 TG3_RX_JMB_RING_BYTES(tp),
+						 &tpr->rx_jmb_mapping,
+						 GFP_KERNEL);
+		if (!tpr->rx_jmb)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	tg3_rx_prodring_fini(tp, tpr);
+	return -ENOMEM;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  tp->{tx,}lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void tg3_free_rings(struct tg3 *tp)
+{
+	int i, j;
+
+	for (j = 0; j < tp->irq_cnt; j++) {
+		struct tg3_napi *tnapi = &tp->napi[j];
+
+		tg3_rx_prodring_free(tp, &tnapi->prodring);
+
+		if (!tnapi->tx_buffers)
+			continue;
+
+		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
+
+			if (!skb)
+				continue;
+
+			tg3_tx_skb_unmap(tnapi, i,
+					 skb_shinfo(skb)->nr_frags - 1);
+
+			dev_kfree_skb_any(skb);
+		}
+		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
+	}
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_init_rings(struct tg3 *tp)
+{
+	int i;
+
+	/* Free up all the SKBs. */
+	tg3_free_rings(tp);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tnapi->last_tag = 0;
+		tnapi->last_irq_tag = 0;
+		tnapi->hw_status->status = 0;
+		tnapi->hw_status->status_tag = 0;
+		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+		tnapi->tx_prod = 0;
+		tnapi->tx_cons = 0;
+		if (tnapi->tx_ring)
+			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+		tnapi->rx_rcb_ptr = 0;
+		if (tnapi->rx_rcb)
+			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+		if (tnapi->prodring.rx_std &&
+		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+			tg3_free_rings(tp);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void tg3_mem_tx_release(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_max; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tnapi->tx_ring) {
+			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
+				tnapi->tx_ring, tnapi->tx_desc_mapping);
+			tnapi->tx_ring = NULL;
+		}
+
+		kfree(tnapi->tx_buffers);
+		tnapi->tx_buffers = NULL;
+	}
+}
+
+static int tg3_mem_tx_acquire(struct tg3 *tp)
+{
+	int i;
+	struct tg3_napi *tnapi = &tp->napi[0];
+
+	/* If multivector TSS is enabled, vector 0 does not handle
+	 * tx interrupts.  Don't allocate any resources for it.
+	 */
+	if (tg3_flag(tp, ENABLE_TSS))
+		tnapi++;
+
+	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
+		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
+					    TG3_TX_RING_SIZE, GFP_KERNEL);
+		if (!tnapi->tx_buffers)
+			goto err_out;
+
+		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+						    TG3_TX_RING_BYTES,
+						    &tnapi->tx_desc_mapping,
+						    GFP_KERNEL);
+		if (!tnapi->tx_ring)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	tg3_mem_tx_release(tp);
+	return -ENOMEM;
+}
+
+static void tg3_mem_rx_release(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_max; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tg3_rx_prodring_fini(tp, &tnapi->prodring);
+
+		if (!tnapi->rx_rcb)
+			continue;
+
+		dma_free_coherent(&tp->pdev->dev,
+				  TG3_RX_RCB_RING_BYTES(tp),
+				  tnapi->rx_rcb,
+				  tnapi->rx_rcb_mapping);
+		tnapi->rx_rcb = NULL;
+	}
+}
+
+static int tg3_mem_rx_acquire(struct tg3 *tp)
+{
+	unsigned int i, limit;
+
+	limit = tp->rxq_cnt;
+
+	/* If RSS is enabled, we need a (dummy) producer ring
+	 * set on vector zero.  This is the true hw prodring.
+	 */
+	if (tg3_flag(tp, ENABLE_RSS))
+		limit++;
+
+	for (i = 0; i < limit; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+			goto err_out;
+
+		/* If multivector RSS is enabled, vector 0
+		 * does not handle rx or tx interrupts.
+		 * Don't allocate any resources for it.
+		 */
+		if (!i && tg3_flag(tp, ENABLE_RSS))
+			continue;
+
+		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
+						    TG3_RX_RCB_RING_BYTES(tp),
+						    &tnapi->rx_rcb_mapping,
+						    GFP_KERNEL);
+		if (!tnapi->rx_rcb)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	tg3_mem_rx_release(tp);
+	return -ENOMEM;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tnapi->hw_status) {
+			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+					  tnapi->hw_status,
+					  tnapi->status_mapping);
+			tnapi->hw_status = NULL;
+		}
+	}
+
+	tg3_mem_rx_release(tp);
+	tg3_mem_tx_release(tp);
+
+	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+	tg3_full_lock(tp, 0);
+	if (tp->hw_stats) {
+		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+				  tp->hw_stats, tp->stats_mapping);
+		tp->hw_stats = NULL;
+	}
+	tg3_full_unlock(tp);
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.  Can sleep.
+ */
+static int tg3_alloc_consistent(struct tg3 *tp)
+{
+	int i;
+
+	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
+					   sizeof(struct tg3_hw_stats),
+					   &tp->stats_mapping, GFP_KERNEL);
+	if (!tp->hw_stats)
+		goto err_out;
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		struct tg3_hw_status *sblk;
+
+		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
+						       TG3_HW_STATUS_SIZE,
+						       &tnapi->status_mapping,
+						       GFP_KERNEL);
+		if (!tnapi->hw_status)
+			goto err_out;
+
+		sblk = tnapi->hw_status;
+
+		if (tg3_flag(tp, ENABLE_RSS)) {
+			u16 *prodptr = NULL;
+
+			/*
+			 * When RSS is enabled, the status block format changes
+			 * slightly.  The "rx_jumbo_consumer", "reserved",
+			 * and "rx_mini_consumer" members get mapped to the
+			 * other three rx return ring producer indexes.
+			 */
+			switch (i) {
+			case 1:
+				prodptr = &sblk->idx[0].rx_producer;
+				break;
+			case 2:
+				prodptr = &sblk->rx_jumbo_consumer;
+				break;
+			case 3:
+				prodptr = &sblk->reserved;
+				break;
+			case 4:
+				prodptr = &sblk->rx_mini_consumer;
+				break;
+			}
+			tnapi->rx_rcb_prod_idx = prodptr;
+		} else {
+			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+		}
+	}
+
+	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
+		goto err_out;
+
+	return 0;
+
+err_out:
+	tg3_free_consistent(tp);
+	return -ENOMEM;
+}
+
+#define MAX_WAIT_CNT 1000
+
+/* To stop a block, clear the enable bit and poll till it
+ * clears.  tp->lock is held.
+ */
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
+{
+	unsigned int i;
+	u32 val;
+
+	if (tg3_flag(tp, 5705_PLUS)) {
+		switch (ofs) {
+		case RCVLSC_MODE:
+		case DMAC_MODE:
+		case MBFREE_MODE:
+		case BUFMGR_MODE:
+		case MEMARB_MODE:
+			/* We can't enable/disable these bits of the
+			 * 5705/5750, just say success.
+			 */
+			return 0;
+
+		default:
+			break;
+		}
+	}
+
+	val = tr32(ofs);
+	val &= ~enable_bit;
+	tw32_f(ofs, val);
+
+	for (i = 0; i < MAX_WAIT_CNT; i++) {
+		if (pci_channel_offline(tp->pdev)) {
+			dev_err(&tp->pdev->dev,
+				"tg3_stop_block device offline, "
+				"ofs=%lx enable_bit=%x\n",
+				ofs, enable_bit);
+			return -ENODEV;
+		}
+
+		udelay(100);
+		val = tr32(ofs);
+		if ((val & enable_bit) == 0)
+			break;
+	}
+
+	if (i == MAX_WAIT_CNT && !silent) {
+		dev_err(&tp->pdev->dev,
+			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+			ofs, enable_bit);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_abort_hw(struct tg3 *tp, bool silent)
+{
+	int i, err;
+
+	tg3_disable_ints(tp);
+
+	if (pci_channel_offline(tp->pdev)) {
+		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
+		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+		err = -ENODEV;
+		goto err_no_dev;
+	}
+
+	tp->rx_mode &= ~RX_MODE_ENABLE;
+	tw32_f(MAC_RX_MODE, tp->rx_mode);
+	udelay(10);
+
+	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
+
+	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
+
+	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tp->tx_mode &= ~TX_MODE_ENABLE;
+	tw32_f(MAC_TX_MODE, tp->tx_mode);
+
+	for (i = 0; i < MAX_WAIT_CNT; i++) {
+		udelay(100);
+		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
+			break;
+	}
+	if (i >= MAX_WAIT_CNT) {
+		dev_err(&tp->pdev->dev,
+			"%s timed out, TX_MODE_ENABLE will not clear "
+			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
+		err |= -ENODEV;
+	}
+
+	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
+
+	tw32(FTQ_RESET, 0xffffffff);
+	tw32(FTQ_RESET, 0x00000000);
+
+	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
+
+err_no_dev:
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		if (tnapi->hw_status)
+			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+	}
+
+	return err;
+}
+
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+	u32 val;
+
+	/* Re-enable indirect register accesses. */
+	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			       tp->misc_host_ctrl);
+
+	/* Set MAX PCI retry to zero. */
+	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
+	    tg3_flag(tp, PCIX_MODE))
+		val |= PCISTATE_RETRY_SAME_DMA;
+	/* Allow reads and writes to the APE register and memory space. */
+	if (tg3_flag(tp, ENABLE_APE))
+		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+		       PCISTATE_ALLOW_APE_SHMEM_WR |
+		       PCISTATE_ALLOW_APE_PSPACE_WR;
+	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
+
+	if (!tg3_flag(tp, PCI_EXPRESS)) {
+		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+				      tp->pci_cacheline_sz);
+		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+				      tp->pci_lat_timer);
+	}
+
+	/* Make sure PCI-X relaxed ordering bit is clear. */
+	if (tg3_flag(tp, PCIX_MODE)) {
+		u16 pcix_cmd;
+
+		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				     &pcix_cmd);
+		pcix_cmd &= ~PCI_X_CMD_ERO;
+		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				      pcix_cmd);
+	}
+
+	if (tg3_flag(tp, 5780_CLASS)) {
+
+		/* Chip reset on 5780 will reset MSI enable bit,
+		 * so need to restore it.
+		 */
+		if (tg3_flag(tp, USING_MSI)) {
+			u16 ctrl;
+
+			pci_read_config_word(tp->pdev,
+					     tp->msi_cap + PCI_MSI_FLAGS,
+					     &ctrl);
+			pci_write_config_word(tp->pdev,
+					      tp->msi_cap + PCI_MSI_FLAGS,
+					      ctrl | PCI_MSI_FLAGS_ENABLE);
+			val = tr32(MSGINT_MODE);
+			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+		}
+	}
+}
+
+static void tg3_override_clk(struct tg3 *tp)
+{
+	u32 val;
+
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5717:
+		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+		     TG3_CPMU_MAC_ORIDE_ENABLE);
+		break;
+
+	case ASIC_REV_5719:
+	case ASIC_REV_5720:
+		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+		break;
+
+	default:
+		return;
+	}
+}
+
+static void tg3_restore_clk(struct tg3 *tp)
+{
+	u32 val;
+
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5717:
+		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+		break;
+
+	case ASIC_REV_5719:
+	case ASIC_REV_5720:
+		val = tr32(TG3_CPMU_CLCK_ORIDE);
+		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+		break;
+
+	default:
+		return;
+	}
+}
+
+/* tp->lock is held. */
+static int tg3_chip_reset(struct tg3 *tp)
+	__releases(tp->lock)
+	__acquires(tp->lock)
+{
+	u32 val;
+	void (*write_op)(struct tg3 *, u32, u32);
+	int i, err;
+
+	if (!pci_device_is_present(tp->pdev))
+		return -ENODEV;
+
+	tg3_nvram_lock(tp);
+
+	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
+
+	/* No matching tg3_nvram_unlock() after this because
+	 * chip reset below will undo the nvram lock.
+	 */
+	tp->nvram_lock_cnt = 0;
+
+	/* GRC_MISC_CFG core clock reset will clear the memory
+	 * enable bit in PCI register 4 and the MSI enable bit
+	 * on some chips, so we save relevant registers here.
+	 */
+	tg3_save_pci_state(tp);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
+	    tg3_flag(tp, 5755_PLUS))
+		tw32(GRC_FASTBOOT_PC, 0);
+
+	/*
+	 * We must avoid the readl() that normally takes place.
+	 * It locks machines, causes machine checks, and other
+	 * fun things.  So, temporarily disable the 5701
+	 * hardware workaround, while we do the reset.
+	 */
+	write_op = tp->write32;
+	if (write_op == tg3_write_flush_reg32)
+		tp->write32 = tg3_write32;
+
+	/* Prevent the irq handler from reading or writing PCI registers
+	 * during chip reset when the memory enable bit in the PCI command
+	 * register may be cleared.  The chip does not generate interrupt
+	 * at this time, but the irq handler may still be called due to irq
+	 * sharing or irqpoll.
+	 */
+	tg3_flag_set(tp, CHIP_RESETTING);
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		if (tnapi->hw_status) {
+			tnapi->hw_status->status = 0;
+			tnapi->hw_status->status_tag = 0;
+		}
+		tnapi->last_tag = 0;
+		tnapi->last_irq_tag = 0;
+	}
+	smp_mb();
+
+	tg3_full_unlock(tp);
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		synchronize_irq(tp->napi[i].irq_vec);
+
+	tg3_full_lock(tp, 0);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
+		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+	}
+
+	/* do the reset */
+	val = GRC_MISC_CFG_CORECLK_RESET;
+
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		/* Force PCIe 1.0a mode */
+		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
+		    !tg3_flag(tp, 57765_PLUS) &&
+		    tr32(TG3_PCIE_PHY_TSTCTL) ==
+		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
+			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
+
+		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
+			tw32(GRC_MISC_CFG, (1 << 29));
+			val |= (1 << 29);
+		}
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
+		tw32(GRC_VCPU_EXT_CTRL,
+		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
+	}
+
+	/* Set the clock to the highest frequency to avoid timeouts. With link
+	 * aware mode, the clock speed could be slow and bootcode does not
+	 * complete within the expected time. Override the clock to allow the
+	 * bootcode to finish sooner and then restore it.
+	 */
+	tg3_override_clk(tp);
+
+	/* Manage gphy power for all CPMU absent PCIe devices. */
+	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
+		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+
+	tw32(GRC_MISC_CFG, val);
+
+	/* restore 5701 hardware bug workaround write method */
+	tp->write32 = write_op;
+
+	/* Unfortunately, we have to delay before the PCI read back.
+	 * Some 575X chips even will not respond to a PCI cfg access
+	 * when the reset command is given to the chip.
+	 *
+	 * How do these hardware designers expect things to work
+	 * properly if the PCI write is posted for a long period
+	 * of time?  It is always necessary to have some method by
+	 * which a register read back can occur to push the write
+	 * out which does the reset.
+	 *
+	 * For most tg3 variants the trick below was working.
+	 * Ho hum...
+	 */
+	udelay(120);
+
+	/* Flush PCI posted writes.  The normal MMIO registers
+	 * are inaccessible at this time so this is the only
+	 * way to make this reliably (actually, this is no longer
+	 * the case, see above).  I tried to use indirect
+	 * register read/write but this upset some 5701 variants.
+	 */
+	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
+
+	udelay(120);
+
+	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
+		u16 val16;
+
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
+			int j;
+			u32 cfg_val;
+
+			/* Wait for link training to complete.  */
+			for (j = 0; j < 5000; j++)
+				udelay(100);
+
+			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
+			pci_write_config_dword(tp->pdev, 0xc4,
+					       cfg_val | (1 << 15));
+		}
+
+		/* Clear the "no snoop" and "relaxed ordering" bits. */
+		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
+		/*
+		 * Older PCIe devices only support the 128 byte
+		 * MPS setting.  Enforce the restriction.
+		 */
+		if (!tg3_flag(tp, CPMU_PRESENT))
+			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
+		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
+
+		/* Clear error status */
+		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
+				      PCI_EXP_DEVSTA_CED |
+				      PCI_EXP_DEVSTA_NFED |
+				      PCI_EXP_DEVSTA_FED |
+				      PCI_EXP_DEVSTA_URD);
+	}
+
+	tg3_restore_pci_state(tp);
+
+	tg3_flag_clear(tp, CHIP_RESETTING);
+	tg3_flag_clear(tp, ERROR_PROCESSED);
+
+	val = 0;
+	if (tg3_flag(tp, 5780_CLASS))
+		val = tr32(MEMARB_MODE);
+	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
+		tg3_stop_fw(tp);
+		tw32(0x5000, 0x400);
+	}
+
+	if (tg3_flag(tp, IS_SSB_CORE)) {
+		/*
+		 * BCM4785: In order to avoid repercussions from using
+		 * potentially defective internal ROM, stop the Rx RISC CPU,
+		 * which is not required.
+		 */
+		tg3_stop_fw(tp);
+		tg3_halt_cpu(tp, RX_CPU_BASE);
+	}
+
+	err = tg3_poll_fw(tp);
+	if (err)
+		return err;
+
+	tw32(GRC_MODE, tp->grc_mode);
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
+		val = tr32(0xc4);
+
+		tw32(0xc4, val | (1 << 15));
+	}
+
+	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
+	    tg3_asic_rev(tp) == ASIC_REV_5705) {
+		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
+			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
+		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+	}
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
+		val = tp->mac_mode;
+	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
+		val = tp->mac_mode;
+	} else
+		val = 0;
+
+	tw32_f(MAC_MODE, val);
+	udelay(40);
+
+	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
+
+	tg3_mdio_start(tp);
+
+	if (tg3_flag(tp, PCI_EXPRESS) &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
+	    !tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(0x7c00);
+
+		tw32(0x7c00, val | (1 << 25));
+	}
+
+	tg3_restore_clk(tp);
+
+	/* Reprobe ASF enable state.  */
+	tg3_flag_clear(tp, ENABLE_ASF);
+	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
+
+	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
+	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+		u32 nic_cfg;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+			tg3_flag_set(tp, ENABLE_ASF);
+			tp->last_event_jiffies = jiffies;
+			if (tg3_flag(tp, 5750_PLUS))
+				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
+			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
+				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
+			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
+				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
+		}
+	}
+
+	return 0;
+}
+
+static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
+static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+static void __tg3_set_rx_mode(struct net_device *);
+
+/* tp->lock is held. */
+static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+{
+	int err;
+
+	tg3_stop_fw(tp);
+
+	tg3_write_sig_pre_reset(tp, kind);
+
+	tg3_abort_hw(tp, silent);
+	err = tg3_chip_reset(tp);
+
+	__tg3_set_mac_addr(tp, false);
+
+	tg3_write_sig_legacy(tp, kind);
+	tg3_write_sig_post_reset(tp, kind);
+
+	if (tp->hw_stats) {
+		/* Save the stats across chip resets... */
+		tg3_get_nstats(tp, &tp->net_stats_prev);
+		tg3_get_estats(tp, &tp->estats_prev);
+
+		/* And make sure the next sample is new data */
+		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+	}
+
+	return err;
+}
+
+static int tg3_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	int err = 0;
+	bool skip_mac_1 = false;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	if (!netif_running(dev))
+		return 0;
+
+	if (tg3_flag(tp, ENABLE_ASF)) {
+		u32 addr0_high, addr0_low, addr1_high, addr1_low;
+
+		addr0_high = tr32(MAC_ADDR_0_HIGH);
+		addr0_low = tr32(MAC_ADDR_0_LOW);
+		addr1_high = tr32(MAC_ADDR_1_HIGH);
+		addr1_low = tr32(MAC_ADDR_1_LOW);
+
+		/* Skip MAC addr 1 if ASF is using it. */
+		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
+		    !(addr1_high == 0 && addr1_low == 0))
+			skip_mac_1 = true;
+	}
+	spin_lock_bh(&tp->lock);
+	__tg3_set_mac_addr(tp, skip_mac_1);
+	__tg3_set_rx_mode(dev);
+	spin_unlock_bh(&tp->lock);
+
+	return err;
+}
+
+/* tp->lock is held. */
+static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
+			   dma_addr_t mapping, u32 maxlen_flags,
+			   u32 nic_addr)
+{
+	tg3_write_mem(tp,
+		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
+		      ((u64) mapping >> 32));
+	tg3_write_mem(tp,
+		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
+		      ((u64) mapping & 0xffffffff));
+	tg3_write_mem(tp,
+		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
+		       maxlen_flags);
+
+	if (!tg3_flag(tp, 5705_PLUS))
+		tg3_write_mem(tp,
+			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
+			      nic_addr);
+}
+
+
+static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+	int i = 0;
+
+	if (!tg3_flag(tp, ENABLE_TSS)) {
+		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+	} else {
+		tw32(HOSTCC_TXCOL_TICKS, 0);
+		tw32(HOSTCC_TXMAX_FRAMES, 0);
+		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+		for (; i < tp->txq_cnt; i++) {
+			u32 reg;
+
+			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_coalesce_usecs);
+			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_max_coalesced_frames);
+			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_max_coalesced_frames_irq);
+		}
+	}
+
+	for (; i < tp->irq_max - 1; i++) {
+		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+	}
+}
+
+static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+	int i = 0;
+	u32 limit = tp->rxq_cnt;
+
+	if (!tg3_flag(tp, ENABLE_RSS)) {
+		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+		limit--;
+	} else {
+		tw32(HOSTCC_RXCOL_TICKS, 0);
+		tw32(HOSTCC_RXMAX_FRAMES, 0);
+		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
+	}
+
+	for (; i < limit; i++) {
+		u32 reg;
+
+		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
+		tw32(reg, ec->rx_coalesce_usecs);
+		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
+		tw32(reg, ec->rx_max_coalesced_frames);
+		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
+		tw32(reg, ec->rx_max_coalesced_frames_irq);
+	}
+
+	for (; i < tp->irq_max - 1; i++) {
+		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
+		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
+		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+	}
+}
+
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+	tg3_coal_tx_init(tp, ec);
+	tg3_coal_rx_init(tp, ec);
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		u32 val = ec->stats_block_coalesce_usecs;
+
+		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+		if (!tp->link_up)
+			val = 0;
+
+		tw32(HOSTCC_STAT_COAL_TICKS, val);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_tx_rcbs_disable(struct tg3 *tp)
+{
+	u32 txrcb, limit;
+
+	/* Disable all transmit rings but the first. */
+	if (!tg3_flag(tp, 5705_PLUS))
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+	else if (tg3_flag(tp, 5717_PLUS))
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
+	else if (tg3_flag(tp, 57765_CLASS) ||
+		 tg3_asic_rev(tp) == ASIC_REV_5762)
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
+	else
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+
+	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
+		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
+			      BDINFO_FLAGS_DISABLED);
+}
+
+/* tp->lock is held. */
+static void tg3_tx_rcbs_init(struct tg3 *tp)
+{
+	int i = 0;
+	u32 txrcb = NIC_SRAM_SEND_RCB;
+
+	if (tg3_flag(tp, ENABLE_TSS))
+		i++;
+
+	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (!tnapi->tx_ring)
+			continue;
+
+		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
+			       NIC_SRAM_TX_BUFFER_DESC);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
+{
+	u32 rxrcb, limit;
+
+	/* Disable all receive return rings but the first. */
+	if (tg3_flag(tp, 5717_PLUS))
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
+	else if (!tg3_flag(tp, 5705_PLUS))
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
+	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
+		 tg3_flag(tp, 57765_CLASS))
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
+	else
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+
+	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
+		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
+			      BDINFO_FLAGS_DISABLED);
+}
+
+/* tp->lock is held. */
+static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
+{
+	int i = 0;
+	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
+
+	if (tg3_flag(tp, ENABLE_RSS))
+		i++;
+
+	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (!tnapi->rx_rcb)
+			continue;
+
+		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+			       (tp->rx_ret_ring_mask + 1) <<
+				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_rings_reset(struct tg3 *tp)
+{
+	int i;
+	u32 stblk;
+	struct tg3_napi *tnapi = &tp->napi[0];
+
+	tg3_tx_rcbs_disable(tp);
+
+	tg3_rx_ret_rcbs_disable(tp);
+
+	/* Disable interrupts */
+	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+	tp->napi[0].chk_msi_cnt = 0;
+	tp->napi[0].last_rx_cons = 0;
+	tp->napi[0].last_tx_cons = 0;
+
+	/* Zero mailbox registers. */
+	if (tg3_flag(tp, SUPPORT_MSIX)) {
+		for (i = 1; i < tp->irq_max; i++) {
+			tp->napi[i].tx_prod = 0;
+			tp->napi[i].tx_cons = 0;
+			if (tg3_flag(tp, ENABLE_TSS))
+				tw32_mailbox(tp->napi[i].prodmbox, 0);
+			tw32_rx_mbox(tp->napi[i].consmbox, 0);
+			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+			tp->napi[i].chk_msi_cnt = 0;
+			tp->napi[i].last_rx_cons = 0;
+			tp->napi[i].last_tx_cons = 0;
+		}
+		if (!tg3_flag(tp, ENABLE_TSS))
+			tw32_mailbox(tp->napi[0].prodmbox, 0);
+	} else {
+		tp->napi[0].tx_prod = 0;
+		tp->napi[0].tx_cons = 0;
+		tw32_mailbox(tp->napi[0].prodmbox, 0);
+		tw32_rx_mbox(tp->napi[0].consmbox, 0);
+	}
+
+	/* Make sure the NIC-based send BD rings are disabled. */
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+		for (i = 0; i < 16; i++)
+			tw32_tx_mbox(mbox + i * 8, 0);
+	}
+
+	/* Clear status block in ram. */
+	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+	/* Set status block DMA address */
+	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+	     ((u64) tnapi->status_mapping >> 32));
+	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+	     ((u64) tnapi->status_mapping & 0xffffffff));
+
+	stblk = HOSTCC_STATBLCK_RING1;
+
+	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
+		u64 mapping = (u64)tnapi->status_mapping;
+		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
+		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+		stblk += 8;
+
+		/* Clear status block in ram. */
+		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+	}
+
+	tg3_tx_rcbs_init(tp);
+	tg3_rx_ret_rcbs_init(tp);
+}
+
+static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+{
+	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
+
+	if (!tg3_flag(tp, 5750_PLUS) ||
+	    tg3_flag(tp, 5780_CLASS) ||
+	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
+	    tg3_flag(tp, 57765_PLUS))
+		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
+	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+		 tg3_asic_rev(tp) == ASIC_REV_5787)
+		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
+	else
+		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
+
+	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
+	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
+
+	val = min(nic_rep_thresh, host_rep_thresh);
+	tw32(RCVBDI_STD_THRESH, val);
+
+	if (tg3_flag(tp, 57765_PLUS))
+		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
+
+	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+		return;
+
+	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
+
+	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
+
+	val = min(bdcache_maxcnt / 2, host_rep_thresh);
+	tw32(RCVBDI_JUMBO_THRESH, val);
+
+	if (tg3_flag(tp, 57765_PLUS))
+		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+}
+
+static inline u32 calc_crc(unsigned char *buf, int len)
+{
+	u32 reg;
+	u32 tmp;
+	int j, k;
+
+	reg = 0xffffffff;
+
+	for (j = 0; j < len; j++) {
+		reg ^= buf[j];
+
+		for (k = 0; k < 8; k++) {
+			tmp = reg & 0x01;
+
+			reg >>= 1;
+
+			if (tmp)
+				reg ^= 0xedb88320;
+		}
+	}
+
+	return ~reg;
+}
+
+static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
+{
+	/* accept or reject all multicast frames */
+	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
+	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
+	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
+	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
+}
+
+static void __tg3_set_rx_mode(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 rx_mode;
+
+	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
+				  RX_MODE_KEEP_VLAN_TAG);
+
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
+	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
+	 * flag clear.
+	 */
+	if (!tg3_flag(tp, ENABLE_ASF))
+		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#endif
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Promiscuous mode. */
+		rx_mode |= RX_MODE_PROMISC;
+	} else if (dev->flags & IFF_ALLMULTI) {
+		/* Accept all multicast. */
+		tg3_set_multi(tp, 1);
+	} else if (netdev_mc_empty(dev)) {
+		/* Reject all multicast. */
+		tg3_set_multi(tp, 0);
+	} else {
+		/* Accept one or more multicast(s). */
+		struct netdev_hw_addr *ha;
+		u32 mc_filter[4] = { 0, };
+		u32 regidx;
+		u32 bit;
+		u32 crc;
+
+		netdev_for_each_mc_addr(ha, dev) {
+			crc = calc_crc(ha->addr, ETH_ALEN);
+			bit = ~crc & 0x7f;
+			regidx = (bit & 0x60) >> 5;
+			bit &= 0x1f;
+			mc_filter[regidx] |= (1 << bit);
+		}
+
+		tw32(MAC_HASH_REG_0, mc_filter[0]);
+		tw32(MAC_HASH_REG_1, mc_filter[1]);
+		tw32(MAC_HASH_REG_2, mc_filter[2]);
+		tw32(MAC_HASH_REG_3, mc_filter[3]);
+	}
+
+	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+		rx_mode |= RX_MODE_PROMISC;
+	} else if (!(dev->flags & IFF_PROMISC)) {
+		/* Add all entries into to the mac addr filter list */
+		int i = 0;
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr(ha, dev) {
+			__tg3_set_one_mac_addr(tp, ha->addr,
+					       i + TG3_UCAST_ADDR_IDX(tp));
+			i++;
+		}
+	}
+
+	if (rx_mode != tp->rx_mode) {
+		tp->rx_mode = rx_mode;
+		tw32_f(MAC_RX_MODE, rx_mode);
+		udelay(10);
+	}
+}
+
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
+{
+	int i;
+
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+	int i;
+
+	if (!tg3_flag(tp, SUPPORT_MSIX))
+		return;
+
+	if (tp->rxq_cnt == 1) {
+		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+		return;
+	}
+
+	/* Validate table against current IRQ count */
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
+			break;
+	}
+
+	if (i != TG3_RSS_INDIR_TBL_SIZE)
+		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+	int i = 0;
+	u32 reg = MAC_RSS_INDIR_TBL_0;
+
+	while (i < TG3_RSS_INDIR_TBL_SIZE) {
+		u32 val = tp->rss_ind_tbl[i];
+		i++;
+		for (; i % 8; i++) {
+			val <<= 4;
+			val |= tp->rss_ind_tbl[i];
+		}
+		tw32(reg, val);
+		reg += 4;
+	}
+}
+
+static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
+{
+	if (tg3_asic_rev(tp) == ASIC_REV_5719)
+		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
+	else
+		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
+}
+
+/* tp->lock is held. */
+static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
+{
+	u32 val, rdmac_mode;
+	int i, err, limit;
+	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+	tg3_disable_ints(tp);
+
+	tg3_stop_fw(tp);
+
+	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
+
+	if (tg3_flag(tp, INIT_COMPLETE))
+		tg3_abort_hw(tp, 1);
+
+	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
+		tg3_phy_pull_config(tp);
+		tg3_eee_pull_config(tp, NULL);
+		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+	}
+
+	/* Enable MAC control of LPI */
+	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+		tg3_setup_eee(tp);
+
+	if (reset_phy)
+		tg3_phy_reset(tp);
+
+	err = tg3_chip_reset(tp);
+	if (err)
+		return err;
+
+	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
+
+	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
+		val = tr32(TG3_CPMU_CTRL);
+		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
+		tw32(TG3_CPMU_CTRL, val);
+
+		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+		val |= CPMU_LSPD_10MB_MACCLK_6_25;
+		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+
+		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
+		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
+		val |= CPMU_LNK_AWARE_MACCLK_6_25;
+		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
+
+		val = tr32(TG3_CPMU_HST_ACC);
+		val &= ~CPMU_HST_ACC_MACCLK_MASK;
+		val |= CPMU_HST_ACC_MACCLK_6_25;
+		tw32(TG3_CPMU_HST_ACC, val);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
+		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
+		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
+		       PCIE_PWR_MGMT_L1_THRESH_4MS;
+		tw32(PCIE_PWR_MGMT_THRESH, val);
+
+		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
+		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
+
+		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
+
+		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+	}
+
+	if (tg3_flag(tp, L1PLLPD_EN)) {
+		u32 grc_mode = tr32(GRC_MODE);
+
+		/* Access the lower 1K of PL PCIE block registers. */
+		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+		tw32(GRC_MODE, grc_mode);
+	}
+
+	if (tg3_flag(tp, 57765_CLASS)) {
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
+			u32 grc_mode = tr32(GRC_MODE);
+
+			/* Access the lower 1K of PL PCIE block registers. */
+			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+			val = tr32(TG3_PCIE_TLDLPL_PORT +
+				   TG3_PCIE_PL_LO_PHYCTL5);
+			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+			tw32(GRC_MODE, grc_mode);
+		}
+
+		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
+			u32 grc_mode;
+
+			/* Fix transmit hangs */
+			val = tr32(TG3_CPMU_PADRNG_CTL);
+			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
+			tw32(TG3_CPMU_PADRNG_CTL, val);
+
+			grc_mode = tr32(GRC_MODE);
+
+			/* Access the lower 1K of DL PCIE block registers. */
+			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
+
+			val = tr32(TG3_PCIE_TLDLPL_PORT +
+				   TG3_PCIE_DL_LO_FTSMAX);
+			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
+			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
+			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
+
+			tw32(GRC_MODE, grc_mode);
+		}
+
+		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+		val |= CPMU_LSPD_10MB_MACCLK_6_25;
+		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+	}
+
+	/* This works around an issue with Athlon chipsets on
+	 * B3 tigon3 silicon.  This bit has no effect on any
+	 * other revision.  But do not set this on PCI Express
+	 * chips and don't even touch the clocks if the CPMU is present.
+	 */
+	if (!tg3_flag(tp, CPMU_PRESENT)) {
+		if (!tg3_flag(tp, PCI_EXPRESS))
+			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
+		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+	}
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
+	    tg3_flag(tp, PCIX_MODE)) {
+		val = tr32(TG3PCI_PCISTATE);
+		val |= PCISTATE_RETRY_SAME_DMA;
+		tw32(TG3PCI_PCISTATE, val);
+	}
+
+	if (tg3_flag(tp, ENABLE_APE)) {
+		/* Allow reads and writes to the
+		 * APE register and memory space.
+		 */
+		val = tr32(TG3PCI_PCISTATE);
+		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+		       PCISTATE_ALLOW_APE_SHMEM_WR |
+		       PCISTATE_ALLOW_APE_PSPACE_WR;
+		tw32(TG3PCI_PCISTATE, val);
+	}
+
+	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
+		/* Enable some hw fixes.  */
+		val = tr32(TG3PCI_MSI_DATA);
+		val |= (1 << 26) | (1 << 28) | (1 << 29);
+		tw32(TG3PCI_MSI_DATA, val);
+	}
+
+	/* Descriptor ring init may make accesses to the
+	 * NIC SRAM area to setup the TX descriptors, so we
+	 * can only do this after the hardware has been
+	 * successfully reset.
+	 */
+	err = tg3_init_rings(tp);
+	if (err)
+		return err;
+
+	if (tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(TG3PCI_DMA_RW_CTRL) &
+		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
+			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
+		if (!tg3_flag(tp, 57765_CLASS) &&
+		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
+		    tg3_asic_rev(tp) != ASIC_REV_5762)
+			val |= DMA_RWCTRL_TAGGED_STAT_WA;
+		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
+	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
+		   tg3_asic_rev(tp) != ASIC_REV_5761) {
+		/* This value is determined during the probe time DMA
+		 * engine test, tg3_test_dma.
+		 */
+		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+	}
+
+	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
+			  GRC_MODE_4X_NIC_SEND_RINGS |
+			  GRC_MODE_NO_TX_PHDR_CSUM |
+			  GRC_MODE_NO_RX_PHDR_CSUM);
+	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+
+	/* Pseudo-header checksum is done by hardware logic and not
+	 * the offload processers, so make the chip do the pseudo-
+	 * header checksums on receive.  For transmit it is more
+	 * convenient to do the pseudo-header checksum in software
+	 * as Linux does that on transmit for us in all cases.
+	 */
+	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+
+	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
+	if (tp->rxptpctl)
+		tw32(TG3_RX_PTP_CTL,
+		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
+
+	if (tg3_flag(tp, PTP_CAPABLE))
+		val |= GRC_MODE_TIME_SYNC_ENABLE;
+
+	tw32(GRC_MODE, tp->grc_mode | val);
+
+	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
+	val = tr32(GRC_MISC_CFG);
+	val &= ~0xff;
+	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
+	tw32(GRC_MISC_CFG, val);
+
+	/* Initialize MBUF/DESC pool. */
+	if (tg3_flag(tp, 5750_PLUS)) {
+		/* Do nothing.  */
+	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
+		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
+		if (tg3_asic_rev(tp) == ASIC_REV_5704)
+			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
+		else
+			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
+		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
+		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
+	} else if (tg3_flag(tp, TSO_CAPABLE)) {
+		int fw_len;
+
+		fw_len = tp->fw_len;
+		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
+		tw32(BUFMGR_MB_POOL_ADDR,
+		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
+		tw32(BUFMGR_MB_POOL_SIZE,
+		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
+	}
+
+	if (tp->dev->mtu <= ETH_DATA_LEN) {
+		tw32(BUFMGR_MB_RDMA_LOW_WATER,
+		     tp->bufmgr_config.mbuf_read_dma_low_water);
+		tw32(BUFMGR_MB_MACRX_LOW_WATER,
+		     tp->bufmgr_config.mbuf_mac_rx_low_water);
+		tw32(BUFMGR_MB_HIGH_WATER,
+		     tp->bufmgr_config.mbuf_high_water);
+	} else {
+		tw32(BUFMGR_MB_RDMA_LOW_WATER,
+		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
+		tw32(BUFMGR_MB_MACRX_LOW_WATER,
+		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
+		tw32(BUFMGR_MB_HIGH_WATER,
+		     tp->bufmgr_config.mbuf_high_water_jumbo);
+	}
+	tw32(BUFMGR_DMA_LOW_WATER,
+	     tp->bufmgr_config.dma_low_water);
+	tw32(BUFMGR_DMA_HIGH_WATER,
+	     tp->bufmgr_config.dma_high_water);
+
+	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
+	if (tg3_asic_rev(tp) == ASIC_REV_5719)
+		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
+		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
+	tw32(BUFMGR_MODE, val);
+	for (i = 0; i < 2000; i++) {
+		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
+			break;
+		udelay(10);
+	}
+	if (i >= 2000) {
+		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
+		return -ENODEV;
+	}
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
+		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
+
+	tg3_setup_rxbd_thresholds(tp);
+
+	/* Initialize TG3_BDINFO's at:
+	 *  RCVDBDI_STD_BD:	standard eth size rx ring
+	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
+	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
+	 *
+	 * like so:
+	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
+	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
+	 *                              ring attribute flags
+	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
+	 *
+	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
+	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
+	 *
+	 * The size of each ring is fixed in the firmware, but the location is
+	 * configurable.
+	 */
+	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+	     ((u64) tpr->rx_std_mapping >> 32));
+	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+	     ((u64) tpr->rx_std_mapping & 0xffffffff));
+	if (!tg3_flag(tp, 5717_PLUS))
+		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
+		     NIC_SRAM_RX_BUFFER_DESC);
+
+	/* Disable the mini ring */
+	if (!tg3_flag(tp, 5705_PLUS))
+		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
+		     BDINFO_FLAGS_DISABLED);
+
+	/* Program the jumbo buffer descriptor ring control
+	 * blocks on those devices that have them.
+	 */
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
+
+		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+			     ((u64) tpr->rx_jmb_mapping >> 32));
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
+			val = TG3_RX_JMB_RING_SIZE(tp) <<
+			      BDINFO_FLAGS_MAXLEN_SHIFT;
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+			     val | BDINFO_FLAGS_USE_EXT_RECV);
+			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
+			    tg3_flag(tp, 57765_CLASS) ||
+			    tg3_asic_rev(tp) == ASIC_REV_5762)
+				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
+				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
+		} else {
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+			     BDINFO_FLAGS_DISABLED);
+		}
+
+		if (tg3_flag(tp, 57765_PLUS)) {
+			val = TG3_RX_STD_RING_SIZE(tp);
+			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
+			val |= (TG3_RX_STD_DMA_SZ << 2);
+		} else
+			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
+	} else
+		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
+
+	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
+
+	tpr->rx_std_prod_idx = tp->rx_pending;
+	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
+
+	tpr->rx_jmb_prod_idx =
+		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
+	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
+
+	tg3_rings_reset(tp);
+
+	/* Initialize MAC address and backoff seed. */
+	__tg3_set_mac_addr(tp, false);
+
+	/* MTU + ethernet header + FCS + optional VLAN tag */
+	tw32(MAC_RX_MTU_SIZE,
+	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
+	/* The slot time is changed by tg3_setup_phy if we
+	 * run at gigabit with half duplex.
+	 */
+	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+	      (6 << TX_LENGTHS_IPG_SHIFT) |
+	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762)
+		val |= tr32(MAC_TX_LENGTHS) &
+		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
+			TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+	tw32(MAC_TX_LENGTHS, val);
+
+	/* Receive rules. */
+	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
+	tw32(RCVLPC_CONFIG, 0x0181);
+
+	/* Calculate RDMAC_MODE setting early, we need it to determine
+	 * the RCVLPC_STATE_ENABLE mask.
+	 */
+	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
+		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
+		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
+		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
+		      RDMAC_MODE_LNGREAD_ENAB);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717)
+		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57780)
+		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
+			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
+			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
+		if (tg3_flag(tp, TSO_CAPABLE) &&
+		    tg3_asic_rev(tp) == ASIC_REV_5705) {
+			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
+		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+			   !tg3_flag(tp, IS_5788)) {
+			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+		}
+	}
+
+	if (tg3_flag(tp, PCI_EXPRESS))
+		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+		tp->dma_limit = 0;
+		if (tp->dev->mtu <= ETH_DATA_LEN) {
+			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
+		}
+	}
+
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3))
+		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
+
+	if (tg3_flag(tp, 57765_PLUS) ||
+	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57780)
+		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762)
+		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
+	    tg3_flag(tp, 57765_PLUS)) {
+		u32 tgtreg;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5762)
+			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
+		else
+			tgtreg = TG3_RDMA_RSRVCTRL_REG;
+
+		val = tr32(tgtreg);
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5762) {
+			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
+		}
+		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762) {
+		u32 tgtreg;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5762)
+			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
+		else
+			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
+
+		val = tr32(tgtreg);
+		tw32(tgtreg, val |
+		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
+		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
+	}
+
+	/* Receive/send statistics. */
+	if (tg3_flag(tp, 5750_PLUS)) {
+		val = tr32(RCVLPC_STATS_ENABLE);
+		val &= ~RCVLPC_STATSENAB_DACK_FIX;
+		tw32(RCVLPC_STATS_ENABLE, val);
+	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+		   tg3_flag(tp, TSO_CAPABLE)) {
+		val = tr32(RCVLPC_STATS_ENABLE);
+		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
+		tw32(RCVLPC_STATS_ENABLE, val);
+	} else {
+		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
+	}
+	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
+	tw32(SNDDATAI_STATSENAB, 0xffffff);
+	tw32(SNDDATAI_STATSCTRL,
+	     (SNDDATAI_SCTRL_ENABLE |
+	      SNDDATAI_SCTRL_FASTUPD));
+
+	/* Setup host coalescing engine. */
+	tw32(HOSTCC_MODE, 0);
+	for (i = 0; i < 2000; i++) {
+		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
+			break;
+		udelay(10);
+	}
+
+	__tg3_set_coalesce(tp, &tp->coal);
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		/* Status/statistics block address.  See tg3_timer,
+		 * the tg3_periodic_fetch_stats call there, and
+		 * tg3_get_stats to see how this works for 5705/5750 chips.
+		 */
+		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+		     ((u64) tp->stats_mapping >> 32));
+		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+		     ((u64) tp->stats_mapping & 0xffffffff));
+		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
+
+		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
+
+		/* Clear statistics and status block memory areas */
+		for (i = NIC_SRAM_STATS_BLK;
+		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
+		     i += sizeof(u32)) {
+			tg3_write_mem(tp, i, 0);
+			udelay(40);
+		}
+	}
+
+	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
+
+	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
+	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
+	if (!tg3_flag(tp, 5705_PLUS))
+		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
+
+	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		/* reset to prevent losing 1st rx packet intermittently */
+		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+		udelay(10);
+	}
+
+	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
+			MAC_MODE_FHDE_ENABLE;
+	if (tg3_flag(tp, ENABLE_APE))
+		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+	if (!tg3_flag(tp, 5705_PLUS) &&
+	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+	    tg3_asic_rev(tp) != ASIC_REV_5700)
+		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
+	udelay(40);
+
+	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
+	 * If TG3_FLAG_IS_NIC is zero, we should read the
+	 * register to preserve the GPIO settings for LOMs. The GPIOs,
+	 * whether used as inputs or outputs, are set by boot code after
+	 * reset.
+	 */
+	if (!tg3_flag(tp, IS_NIC)) {
+		u32 gpio_mask;
+
+		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5752)
+			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
+				     GRC_LCLCTRL_GPIO_OUTPUT3;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5755)
+			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+		tp->grc_local_ctrl &= ~gpio_mask;
+		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
+
+		/* GPIO1 must be driven high for eeprom write protect */
+		if (tg3_flag(tp, EEPROM_WRITE_PROT))
+			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+					       GRC_LCLCTRL_GPIO_OUTPUT1);
+	}
+	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+	udelay(100);
+
+	if (tg3_flag(tp, USING_MSIX)) {
+		val = tr32(MSGINT_MODE);
+		val |= MSGINT_MODE_ENABLE;
+		if (tp->irq_cnt > 1)
+			val |= MSGINT_MODE_MULTIVEC_EN;
+		if (!tg3_flag(tp, 1SHOT_MSI))
+			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
+		tw32(MSGINT_MODE, val);
+	}
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
+		udelay(40);
+	}
+
+	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
+	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
+	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
+	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
+	       WDMAC_MODE_LNGREAD_ENAB);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
+		if (tg3_flag(tp, TSO_CAPABLE) &&
+		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
+		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
+			/* nothing */
+		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+			   !tg3_flag(tp, IS_5788)) {
+			val |= WDMAC_MODE_RX_ACCEL;
+		}
+	}
+
+	/* Enable host coalescing bug fix */
+	if (tg3_flag(tp, 5755_PLUS))
+		val |= WDMAC_MODE_STATUS_TAG_FIX;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5785)
+		val |= WDMAC_MODE_BURST_ALL_DATA;
+
+	tw32_f(WDMAC_MODE, val);
+	udelay(40);
+
+	if (tg3_flag(tp, PCIX_MODE)) {
+		u16 pcix_cmd;
+
+		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				     &pcix_cmd);
+		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
+			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
+			pcix_cmd |= PCI_X_CMD_READ_2K;
+		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
+			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
+			pcix_cmd |= PCI_X_CMD_READ_2K;
+		}
+		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				      pcix_cmd);
+	}
+
+	tw32_f(RDMAC_MODE, rdmac_mode);
+	udelay(40);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720) {
+		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
+			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
+				break;
+		}
+		if (i < TG3_NUM_RDMA_CHANNELS) {
+			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+			val |= tg3_lso_rd_dma_workaround_bit(tp);
+			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
+			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
+		}
+	}
+
+	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+	if (!tg3_flag(tp, 5705_PLUS))
+		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5761)
+		tw32(SNDDATAC_MODE,
+		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
+	else
+		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+
+	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
+	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
+	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
+	if (tg3_flag(tp, LRG_PROD_RING_CAP))
+		val |= RCVDBDI_MODE_LRG_RING_SZ;
+	tw32(RCVDBDI_MODE, val);
+	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3))
+		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
+	if (tg3_flag(tp, ENABLE_TSS))
+		val |= SNDBDI_MODE_MULTI_TXQ_EN;
+	tw32(SNDBDI_MODE, val);
+	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+		err = tg3_load_5701_a0_firmware_fix(tp);
+		if (err)
+			return err;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+		/* Ignore any errors for the firmware download. If download
+		 * fails, the device will operate with EEE disabled
+		 */
+		tg3_load_57766_firmware(tp);
+	}
+
+	if (tg3_flag(tp, TSO_CAPABLE)) {
+		err = tg3_load_tso_firmware(tp);
+		if (err)
+			return err;
+	}
+
+	tp->tx_mode = TX_MODE_ENABLE;
+
+	if (tg3_flag(tp, 5755_PLUS) ||
+	    tg3_asic_rev(tp) == ASIC_REV_5906)
+		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762) {
+		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
+		tp->tx_mode &= ~val;
+		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
+	}
+
+	tw32_f(MAC_TX_MODE, tp->tx_mode);
+	udelay(100);
+
+	if (tg3_flag(tp, ENABLE_RSS)) {
+		u32 rss_key[10];
+
+		tg3_rss_write_indir_tbl(tp);
+
+		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
+
+		for (i = 0; i < 10 ; i++)
+			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
+	}
+
+	tp->rx_mode = RX_MODE_ENABLE;
+	if (tg3_flag(tp, 5755_PLUS))
+		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5762)
+		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
+
+	if (tg3_flag(tp, ENABLE_RSS))
+		tp->rx_mode |= RX_MODE_RSS_ENABLE |
+			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
+			       RX_MODE_RSS_IPV6_HASH_EN |
+			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
+			       RX_MODE_RSS_IPV4_HASH_EN |
+			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
+
+	tw32_f(MAC_RX_MODE, tp->rx_mode);
+	udelay(10);
+
+	tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+		udelay(10);
+	}
+	tw32_f(MAC_RX_MODE, tp->rx_mode);
+	udelay(10);
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
+		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+			/* Set drive transmission level to 1.2V  */
+			/* only if the signal pre-emphasis bit is not set  */
+			val = tr32(MAC_SERDES_CFG);
+			val &= 0xfffff000;
+			val |= 0x880;
+			tw32(MAC_SERDES_CFG, val);
+		}
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
+			tw32(MAC_SERDES_CFG, 0x616000);
+	}
+
+	/* Prevent chip from dropping frames when flow control
+	 * is enabled.
+	 */
+	if (tg3_flag(tp, 57765_CLASS))
+		val = 1;
+	else
+		val = 2;
+	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
+	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+		/* Use hardware link auto-negotiation */
+		tg3_flag_set(tp, HW_AUTONEG);
+	}
+
+	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+	    tg3_asic_rev(tp) == ASIC_REV_5714) {
+		u32 tmp;
+
+		tmp = tr32(SERDES_RX_CTRL);
+		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
+		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
+		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
+		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+	}
+
+	if (!tg3_flag(tp, USE_PHYLIB)) {
+		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+
+		err = tg3_setup_phy(tp, false);
+		if (err)
+			return err;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+			u32 tmp;
+
+			/* Clear CRC stats. */
+			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
+				tg3_writephy(tp, MII_TG3_TEST1,
+					     tmp | MII_TG3_TEST1_CRC_EN);
+				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
+			}
+		}
+	}
+
+	__tg3_set_rx_mode(tp->dev);
+
+	/* Initialize receive rules. */
+	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
+	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
+	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
+	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
+
+	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
+		limit = 8;
+	else
+		limit = 16;
+	if (tg3_flag(tp, ENABLE_ASF))
+		limit -= 4;
+	switch (limit) {
+	case 16:
+		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
+	case 15:
+		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
+	case 14:
+		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
+	case 13:
+		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
+	case 12:
+		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
+	case 11:
+		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
+	case 10:
+		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
+	case 9:
+		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
+	case 8:
+		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
+	case 7:
+		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
+	case 6:
+		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
+	case 5:
+		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
+	case 4:
+		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
+	case 3:
+		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
+	case 2:
+	case 1:
+
+	default:
+		break;
+	}
+
+	if (tg3_flag(tp, ENABLE_APE))
+		/* Write our heartbeat update interval to APE. */
+		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
+				APE_HOST_HEARTBEAT_INT_DISABLE);
+
+	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
+
+	return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing.  Invoked with tp->lock held.
+ */
+static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
+{
+	/* Chip may have been just powered on. If so, the boot code may still
+	 * be running initialization. Wait for it to finish to avoid races in
+	 * accessing the hardware.
+	 */
+	tg3_enable_register_access(tp);
+	tg3_poll_fw(tp);
+
+	tg3_switch_clocks(tp);
+
+	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+	return tg3_reset_hw(tp, reset_phy);
+}
+
+static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
+{
+	int i;
+
+	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
+		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
+
+		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
+		off += len;
+
+		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
+		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
+			memset(ocir, 0, TG3_OCIR_LEN);
+	}
+}
+
+/* sysfs attributes for hwmon */
+static ssize_t tg3_show_temp(struct device *dev,
+			     struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct tg3 *tp = dev_get_drvdata(dev);
+	u32 temperature;
+
+	spin_lock_bh(&tp->lock);
+	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+				sizeof(temperature));
+	spin_unlock_bh(&tp->lock);
+	return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+			  TG3_TEMP_SENSOR_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+			  TG3_TEMP_CAUTION_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+			  TG3_TEMP_MAX_OFFSET);
+
+static struct attribute *tg3_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(tg3);
+
+static void tg3_hwmon_close(struct tg3 *tp)
+{
+	if (tp->hwmon_dev) {
+		hwmon_device_unregister(tp->hwmon_dev);
+		tp->hwmon_dev = NULL;
+	}
+}
+
+static void tg3_hwmon_open(struct tg3 *tp)
+{
+	int i;
+	u32 size = 0;
+	struct pci_dev *pdev = tp->pdev;
+	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
+
+	tg3_sd_scan_scratchpad(tp, ocirs);
+
+	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
+		if (!ocirs[i].src_data_length)
+			continue;
+
+		size += ocirs[i].src_hdr_length;
+		size += ocirs[i].src_data_length;
+	}
+
+	if (!size)
+		return;
+
+	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
+							  tp, tg3_groups);
+	if (IS_ERR(tp->hwmon_dev)) {
+		tp->hwmon_dev = NULL;
+		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
+	}
+}
+
+
+#define TG3_STAT_ADD32(PSTAT, REG) \
+do {	u32 __val = tr32(REG); \
+	(PSTAT)->low += __val; \
+	if ((PSTAT)->low < __val) \
+		(PSTAT)->high += 1; \
+} while (0)
+
+static void tg3_periodic_fetch_stats(struct tg3 *tp)
+{
+	struct tg3_hw_stats *sp = tp->hw_stats;
+
+	if (!tp->link_up)
+		return;
+
+	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
+	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
+	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
+	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
+	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
+	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
+	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
+	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
+	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
+	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
+	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
+	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
+		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
+		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
+		u32 val;
+
+		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
+		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
+		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
+	}
+
+	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
+	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
+	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
+	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
+	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
+	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
+	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
+	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
+	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
+	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
+	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
+	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
+
+	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
+	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
+		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+	} else {
+		u32 val = tr32(HOSTCC_FLOW_ATTN);
+		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
+		if (val) {
+			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
+			sp->rx_discards.low += val;
+			if (sp->rx_discards.low < val)
+				sp->rx_discards.high += 1;
+		}
+		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
+	}
+	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
+}
+
+static void tg3_chk_missed_msi(struct tg3 *tp)
+{
+	u32 i;
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tg3_has_work(tnapi)) {
+			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
+			    tnapi->last_tx_cons == tnapi->tx_cons) {
+				if (tnapi->chk_msi_cnt < 1) {
+					tnapi->chk_msi_cnt++;
+					return;
+				}
+				tg3_msi(0, tnapi);
+			}
+		}
+		tnapi->chk_msi_cnt = 0;
+		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
+		tnapi->last_tx_cons = tnapi->tx_cons;
+	}
+}
+
+static void tg3_timer(unsigned long __opaque)
+{
+	struct tg3 *tp = (struct tg3 *) __opaque;
+
+	spin_lock(&tp->lock);
+
+	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+		spin_unlock(&tp->lock);
+		goto restart_timer;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_flag(tp, 57765_CLASS))
+		tg3_chk_missed_msi(tp);
+
+	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+		/* BCM4785: Flush posted writes from GbE to host memory. */
+		tr32(HOSTCC_MODE);
+	}
+
+	if (!tg3_flag(tp, TAGGED_STATUS)) {
+		/* All of this garbage is because when using non-tagged
+		 * IRQ status the mailbox/status_block protocol the chip
+		 * uses with the cpu is race prone.
+		 */
+		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
+			tw32(GRC_LOCAL_CTRL,
+			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+		} else {
+			tw32(HOSTCC_MODE, tp->coalesce_mode |
+			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
+		}
+
+		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+			spin_unlock(&tp->lock);
+			tg3_reset_task_schedule(tp);
+			goto restart_timer;
+		}
+	}
+
+	/* This part only runs once per second. */
+	if (!--tp->timer_counter) {
+		if (tg3_flag(tp, 5705_PLUS))
+			tg3_periodic_fetch_stats(tp);
+
+		if (tp->setlpicnt && !--tp->setlpicnt)
+			tg3_phy_eee_enable(tp);
+
+		if (tg3_flag(tp, USE_LINKCHG_REG)) {
+			u32 mac_stat;
+			int phy_event;
+
+			mac_stat = tr32(MAC_STATUS);
+
+			phy_event = 0;
+			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
+				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
+					phy_event = 1;
+			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
+				phy_event = 1;
+
+			if (phy_event)
+				tg3_setup_phy(tp, false);
+		} else if (tg3_flag(tp, POLL_SERDES)) {
+			u32 mac_stat = tr32(MAC_STATUS);
+			int need_setup = 0;
+
+			if (tp->link_up &&
+			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
+				need_setup = 1;
+			}
+			if (!tp->link_up &&
+			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
+					 MAC_STATUS_SIGNAL_DET))) {
+				need_setup = 1;
+			}
+			if (need_setup) {
+				if (!tp->serdes_counter) {
+					tw32_f(MAC_MODE,
+					     (tp->mac_mode &
+					      ~MAC_MODE_PORT_MODE_MASK));
+					udelay(40);
+					tw32_f(MAC_MODE, tp->mac_mode);
+					udelay(40);
+				}
+				tg3_setup_phy(tp, false);
+			}
+		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+			   tg3_flag(tp, 5780_CLASS)) {
+			tg3_serdes_parallel_detect(tp);
+		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+			u32 cpmu = tr32(TG3_CPMU_STATUS);
+			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+					 TG3_CPMU_STATUS_LINK_MASK);
+
+			if (link_up != tp->link_up)
+				tg3_setup_phy(tp, false);
+		}
+
+		tp->timer_counter = tp->timer_multiplier;
+	}
+
+	/* Heartbeat is only sent once every 2 seconds.
+	 *
+	 * The heartbeat is to tell the ASF firmware that the host
+	 * driver is still alive.  In the event that the OS crashes,
+	 * ASF needs to reset the hardware to free up the FIFO space
+	 * that may be filled with rx packets destined for the host.
+	 * If the FIFO is full, ASF will no longer function properly.
+	 *
+	 * Unintended resets have been reported on real time kernels
+	 * where the timer doesn't run on time.  Netpoll will also have
+	 * same problem.
+	 *
+	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
+	 * to check the ring condition when the heartbeat is expiring
+	 * before doing the reset.  This will prevent most unintended
+	 * resets.
+	 */
+	if (!--tp->asf_counter) {
+		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+			tg3_wait_for_event_ack(tp);
+
+			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
+				      FWCMD_NICDRV_ALIVE3);
+			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+				      TG3_FW_UPDATE_TIMEOUT_SEC);
+
+			tg3_generate_fw_event(tp);
+		}
+		tp->asf_counter = tp->asf_multiplier;
+	}
+
+	spin_unlock(&tp->lock);
+
+restart_timer:
+	tp->timer.expires = jiffies + tp->timer_offset;
+	add_timer(&tp->timer);
+}
+
+static void tg3_timer_init(struct tg3 *tp)
+{
+	if (tg3_flag(tp, TAGGED_STATUS) &&
+	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
+	    !tg3_flag(tp, 57765_CLASS))
+		tp->timer_offset = HZ;
+	else
+		tp->timer_offset = HZ / 10;
+
+	BUG_ON(tp->timer_offset > HZ);
+
+	tp->timer_multiplier = (HZ / tp->timer_offset);
+	tp->asf_multiplier = (HZ / tp->timer_offset) *
+			     TG3_FW_UPDATE_FREQ_SEC;
+
+	init_timer(&tp->timer);
+	tp->timer.data = (unsigned long) tp;
+	tp->timer.function = tg3_timer;
+}
+
+static void tg3_timer_start(struct tg3 *tp)
+{
+	tp->asf_counter   = tp->asf_multiplier;
+	tp->timer_counter = tp->timer_multiplier;
+
+	tp->timer.expires = jiffies + tp->timer_offset;
+	add_timer(&tp->timer);
+}
+
+static void tg3_timer_stop(struct tg3 *tp)
+{
+	del_timer_sync(&tp->timer);
+}
+
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
+	__releases(tp->lock)
+	__acquires(tp->lock)
+{
+	int err;
+
+	err = tg3_init_hw(tp, reset_phy);
+	if (err) {
+		netdev_err(tp->dev,
+			   "Failed to re-initialize device, aborting\n");
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		tg3_full_unlock(tp);
+		tg3_timer_stop(tp);
+		tp->irq_sync = 0;
+		tg3_napi_enable(tp);
+		dev_close(tp->dev);
+		tg3_full_lock(tp, 0);
+	}
+	return err;
+}
+
+static void tg3_reset_task(struct work_struct *work)
+{
+	struct tg3 *tp = container_of(work, struct tg3, reset_task);
+	int err;
+
+	rtnl_lock();
+	tg3_full_lock(tp, 0);
+
+	if (!netif_running(tp->dev)) {
+		tg3_flag_clear(tp, RESET_TASK_PENDING);
+		tg3_full_unlock(tp);
+		rtnl_unlock();
+		return;
+	}
+
+	tg3_full_unlock(tp);
+
+	tg3_phy_stop(tp);
+
+	tg3_netif_stop(tp);
+
+	tg3_full_lock(tp, 1);
+
+	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
+		tp->write32_tx_mbox = tg3_write32_tx_mbox;
+		tp->write32_rx_mbox = tg3_write_flush_reg32;
+		tg3_flag_set(tp, MBOX_WRITE_REORDER);
+		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+	}
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+	err = tg3_init_hw(tp, true);
+	if (err)
+		goto out;
+
+	tg3_netif_start(tp);
+
+out:
+	tg3_full_unlock(tp);
+
+	if (!err)
+		tg3_phy_start(tp);
+
+	tg3_flag_clear(tp, RESET_TASK_PENDING);
+	rtnl_unlock();
+}
+
+static int tg3_request_irq(struct tg3 *tp, int irq_num)
+{
+	irq_handler_t fn;
+	unsigned long flags;
+	char *name;
+	struct tg3_napi *tnapi = &tp->napi[irq_num];
+
+	if (tp->irq_cnt == 1)
+		name = tp->dev->name;
+	else {
+		name = &tnapi->irq_lbl[0];
+		if (tnapi->tx_buffers && tnapi->rx_rcb)
+			snprintf(name, IFNAMSIZ,
+				 "%s-txrx-%d", tp->dev->name, irq_num);
+		else if (tnapi->tx_buffers)
+			snprintf(name, IFNAMSIZ,
+				 "%s-tx-%d", tp->dev->name, irq_num);
+		else if (tnapi->rx_rcb)
+			snprintf(name, IFNAMSIZ,
+				 "%s-rx-%d", tp->dev->name, irq_num);
+		else
+			snprintf(name, IFNAMSIZ,
+				 "%s-%d", tp->dev->name, irq_num);
+		name[IFNAMSIZ-1] = 0;
+	}
+
+	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+		fn = tg3_msi;
+		if (tg3_flag(tp, 1SHOT_MSI))
+			fn = tg3_msi_1shot;
+		flags = 0;
+	} else {
+		fn = tg3_interrupt;
+		if (tg3_flag(tp, TAGGED_STATUS))
+			fn = tg3_interrupt_tagged;
+		flags = IRQF_SHARED;
+	}
+
+	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
+}
+
+static int tg3_test_interrupt(struct tg3 *tp)
+{
+	struct tg3_napi *tnapi = &tp->napi[0];
+	struct net_device *dev = tp->dev;
+	int err, i, intr_ok = 0;
+	u32 val;
+
+	if (!netif_running(dev))
+		return -ENODEV;
+
+	tg3_disable_ints(tp);
+
+	free_irq(tnapi->irq_vec, tnapi);
+
+	/*
+	 * Turn off MSI one shot mode.  Otherwise this test has no
+	 * observable way to know whether the interrupt was delivered.
+	 */
+	if (tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
+		tw32(MSGINT_MODE, val);
+	}
+
+	err = request_irq(tnapi->irq_vec, tg3_test_isr,
+			  IRQF_SHARED, dev->name, tnapi);
+	if (err)
+		return err;
+
+	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
+	tg3_enable_ints(tp);
+
+	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+	       tnapi->coal_now);
+
+	for (i = 0; i < 5; i++) {
+		u32 int_mbox, misc_host_ctrl;
+
+		int_mbox = tr32_mailbox(tnapi->int_mbox);
+		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+
+		if ((int_mbox != 0) ||
+		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
+			intr_ok = 1;
+			break;
+		}
+
+		if (tg3_flag(tp, 57765_PLUS) &&
+		    tnapi->hw_status->status_tag != tnapi->last_tag)
+			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+		msleep(10);
+	}
+
+	tg3_disable_ints(tp);
+
+	free_irq(tnapi->irq_vec, tnapi);
+
+	err = tg3_request_irq(tp, 0);
+
+	if (err)
+		return err;
+
+	if (intr_ok) {
+		/* Reenable MSI one shot mode. */
+		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
+			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
+			tw32(MSGINT_MODE, val);
+		}
+		return 0;
+	}
+
+	return -EIO;
+}
+
+/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
+ * successfully restored
+ */
+static int tg3_test_msi(struct tg3 *tp)
+{
+	int err;
+	u16 pci_cmd;
+
+	if (!tg3_flag(tp, USING_MSI))
+		return 0;
+
+	/* Turn off SERR reporting in case MSI terminates with Master
+	 * Abort.
+	 */
+	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+	pci_write_config_word(tp->pdev, PCI_COMMAND,
+			      pci_cmd & ~PCI_COMMAND_SERR);
+
+	err = tg3_test_interrupt(tp);
+
+	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+	if (!err)
+		return 0;
+
+	/* other failures */
+	if (err != -EIO)
+		return err;
+
+	/* MSI test failed, go back to INTx mode */
+	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+		    "to INTx mode. Please report this failure to the PCI "
+		    "maintainer and include system chipset information\n");
+
+	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+	pci_disable_msi(tp->pdev);
+
+	tg3_flag_clear(tp, USING_MSI);
+	tp->napi[0].irq_vec = tp->pdev->irq;
+
+	err = tg3_request_irq(tp, 0);
+	if (err)
+		return err;
+
+	/* Need to reset the chip because the MSI cycle may have terminated
+	 * with Master Abort.
+	 */
+	tg3_full_lock(tp, 1);
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	err = tg3_init_hw(tp, true);
+
+	tg3_full_unlock(tp);
+
+	if (err)
+		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+	return err;
+}
+
+static int tg3_request_firmware(struct tg3 *tp)
+{
+	const struct tg3_firmware_hdr *fw_hdr;
+
+	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
+		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+			   tp->fw_needed);
+		return -ENOENT;
+	}
+
+	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+
+	/* Firmware blob starts with version numbers, followed by
+	 * start address and _full_ length including BSS sections
+	 * (which must be longer than the actual data, of course
+	 */
+
+	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
+	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
+		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+			   tp->fw_len, tp->fw_needed);
+		release_firmware(tp->fw);
+		tp->fw = NULL;
+		return -EINVAL;
+	}
+
+	/* We no longer need firmware; we have it. */
+	tp->fw_needed = NULL;
+	return 0;
+}
+
+static u32 tg3_irq_count(struct tg3 *tp)
+{
+	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
+
+	if (irq_cnt > 1) {
+		/* We want as many rx rings enabled as there are cpus.
+		 * In multiqueue MSI-X mode, the first MSI-X vector
+		 * only deals with link interrupts, etc, so we add
+		 * one to the number of vectors we are requesting.
+		 */
+		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
+	}
+
+	return irq_cnt;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+	int i, rc;
+	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
+
+	tp->txq_cnt = tp->txq_req;
+	tp->rxq_cnt = tp->rxq_req;
+	if (!tp->rxq_cnt)
+		tp->rxq_cnt = netif_get_num_default_rss_queues();
+	if (tp->rxq_cnt > tp->rxq_max)
+		tp->rxq_cnt = tp->rxq_max;
+
+	/* Disable multiple TX rings by default.  Simple round-robin hardware
+	 * scheduling of the TX rings can cause starvation of rings with
+	 * small packets when other rings have TSO or jumbo packets.
+	 */
+	if (!tp->txq_req)
+		tp->txq_cnt = 1;
+
+	tp->irq_cnt = tg3_irq_count(tp);
+
+	for (i = 0; i < tp->irq_max; i++) {
+		msix_ent[i].entry  = i;
+		msix_ent[i].vector = 0;
+	}
+
+	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
+	if (rc < 0) {
+		return false;
+	} else if (rc < tp->irq_cnt) {
+		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+			      tp->irq_cnt, rc);
+		tp->irq_cnt = rc;
+		tp->rxq_cnt = max(rc - 1, 1);
+		if (tp->txq_cnt)
+			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
+	}
+
+	for (i = 0; i < tp->irq_max; i++)
+		tp->napi[i].irq_vec = msix_ent[i].vector;
+
+	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
+		pci_disable_msix(tp->pdev);
+		return false;
+	}
+
+	if (tp->irq_cnt == 1)
+		return true;
+
+	tg3_flag_set(tp, ENABLE_RSS);
+
+	if (tp->txq_cnt > 1)
+		tg3_flag_set(tp, ENABLE_TSS);
+
+	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
+
+	return true;
+}
+
+static void tg3_ints_init(struct tg3 *tp)
+{
+	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
+	    !tg3_flag(tp, TAGGED_STATUS)) {
+		/* All MSI supporting chips should support tagged
+		 * status.  Assert that this is the case.
+		 */
+		netdev_warn(tp->dev,
+			    "MSI without TAGGED_STATUS? Not using MSI\n");
+		goto defcfg;
+	}
+
+	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
+		tg3_flag_set(tp, USING_MSIX);
+	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
+		tg3_flag_set(tp, USING_MSI);
+
+	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+		u32 msi_mode = tr32(MSGINT_MODE);
+		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
+			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+		if (!tg3_flag(tp, 1SHOT_MSI))
+			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
+		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+	}
+defcfg:
+	if (!tg3_flag(tp, USING_MSIX)) {
+		tp->irq_cnt = 1;
+		tp->napi[0].irq_vec = tp->pdev->irq;
+	}
+
+	if (tp->irq_cnt == 1) {
+		tp->txq_cnt = 1;
+		tp->rxq_cnt = 1;
+		netif_set_real_num_tx_queues(tp->dev, 1);
+		netif_set_real_num_rx_queues(tp->dev, 1);
+	}
+}
+
+static void tg3_ints_fini(struct tg3 *tp)
+{
+	if (tg3_flag(tp, USING_MSIX))
+		pci_disable_msix(tp->pdev);
+	else if (tg3_flag(tp, USING_MSI))
+		pci_disable_msi(tp->pdev);
+	tg3_flag_clear(tp, USING_MSI);
+	tg3_flag_clear(tp, USING_MSIX);
+	tg3_flag_clear(tp, ENABLE_RSS);
+	tg3_flag_clear(tp, ENABLE_TSS);
+}
+
+static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
+		     bool init)
+{
+	struct net_device *dev = tp->dev;
+	int i, err;
+
+	/*
+	 * Setup interrupts first so we know how
+	 * many NAPI resources to allocate
+	 */
+	tg3_ints_init(tp);
+
+	tg3_rss_check_indir_tbl(tp);
+
+	/* The placement of this call is tied
+	 * to the setup and use of Host TX descriptors.
+	 */
+	err = tg3_alloc_consistent(tp);
+	if (err)
+		goto out_ints_fini;
+
+	tg3_napi_init(tp);
+
+	tg3_napi_enable(tp);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		err = tg3_request_irq(tp, i);
+		if (err) {
+			for (i--; i >= 0; i--) {
+				tnapi = &tp->napi[i];
+				free_irq(tnapi->irq_vec, tnapi);
+			}
+			goto out_napi_fini;
+		}
+	}
+
+	tg3_full_lock(tp, 0);
+
+	if (init)
+		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
+
+	err = tg3_init_hw(tp, reset_phy);
+	if (err) {
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		tg3_free_rings(tp);
+	}
+
+	tg3_full_unlock(tp);
+
+	if (err)
+		goto out_free_irq;
+
+	if (test_irq && tg3_flag(tp, USING_MSI)) {
+		err = tg3_test_msi(tp);
+
+		if (err) {
+			tg3_full_lock(tp, 0);
+			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+			tg3_free_rings(tp);
+			tg3_full_unlock(tp);
+
+			goto out_napi_fini;
+		}
+
+		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
+			u32 val = tr32(PCIE_TRANSACTION_CFG);
+
+			tw32(PCIE_TRANSACTION_CFG,
+			     val | PCIE_TRANS_CFG_1SHOT_MSI);
+		}
+	}
+
+	tg3_phy_start(tp);
+
+	tg3_hwmon_open(tp);
+
+	tg3_full_lock(tp, 0);
+
+	tg3_timer_start(tp);
+	tg3_flag_set(tp, INIT_COMPLETE);
+	tg3_enable_ints(tp);
+
+	tg3_ptp_resume(tp);
+
+	tg3_full_unlock(tp);
+
+	netif_tx_start_all_queues(dev);
+
+	/*
+	 * Reset loopback feature if it was turned on while the device was down
+	 * make sure that it's installed properly now.
+	 */
+	if (dev->features & NETIF_F_LOOPBACK)
+		tg3_set_loopback(dev, dev->features);
+
+	return 0;
+
+out_free_irq:
+	for (i = tp->irq_cnt - 1; i >= 0; i--) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		free_irq(tnapi->irq_vec, tnapi);
+	}
+
+out_napi_fini:
+	tg3_napi_disable(tp);
+	tg3_napi_fini(tp);
+	tg3_free_consistent(tp);
+
+out_ints_fini:
+	tg3_ints_fini(tp);
+
+	return err;
+}
+
+static void tg3_stop(struct tg3 *tp)
+{
+	int i;
+
+	tg3_reset_task_cancel(tp);
+	tg3_netif_stop(tp);
+
+	tg3_timer_stop(tp);
+
+	tg3_hwmon_close(tp);
+
+	tg3_phy_stop(tp);
+
+	tg3_full_lock(tp, 1);
+
+	tg3_disable_ints(tp);
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	tg3_free_rings(tp);
+	tg3_flag_clear(tp, INIT_COMPLETE);
+
+	tg3_full_unlock(tp);
+
+	for (i = tp->irq_cnt - 1; i >= 0; i--) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		free_irq(tnapi->irq_vec, tnapi);
+	}
+
+	tg3_ints_fini(tp);
+
+	tg3_napi_fini(tp);
+
+	tg3_free_consistent(tp);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+
+	if (tp->pcierr_recovery) {
+		netdev_err(dev, "Failed to open device. PCI error recovery "
+			   "in progress\n");
+		return -EAGAIN;
+	}
+
+	if (tp->fw_needed) {
+		err = tg3_request_firmware(tp);
+		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+			if (err) {
+				netdev_warn(tp->dev, "EEE capability disabled\n");
+				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+				netdev_warn(tp->dev, "EEE capability restored\n");
+				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+			}
+		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+			if (err)
+				return err;
+		} else if (err) {
+			netdev_warn(tp->dev, "TSO capability disabled\n");
+			tg3_flag_clear(tp, TSO_CAPABLE);
+		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
+			netdev_notice(tp->dev, "TSO capability restored\n");
+			tg3_flag_set(tp, TSO_CAPABLE);
+		}
+	}
+
+	tg3_carrier_off(tp);
+
+	err = tg3_power_up(tp);
+	if (err)
+		return err;
+
+	tg3_full_lock(tp, 0);
+
+	tg3_disable_ints(tp);
+	tg3_flag_clear(tp, INIT_COMPLETE);
+
+	tg3_full_unlock(tp);
+
+	err = tg3_start(tp,
+			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
+			true, true);
+	if (err) {
+		tg3_frob_aux_power(tp, false);
+		pci_set_power_state(tp->pdev, PCI_D3hot);
+	}
+
+	return err;
+}
+
+static int tg3_close(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tp->pcierr_recovery) {
+		netdev_err(dev, "Failed to close device. PCI error recovery "
+			   "in progress\n");
+		return -EAGAIN;
+	}
+
+	tg3_stop(tp);
+
+	/* Clear stats across close / open calls */
+	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
+
+	if (pci_device_is_present(tp->pdev)) {
+		tg3_power_down_prepare(tp);
+
+		tg3_carrier_off(tp);
+	}
+	return 0;
+}
+
+static inline u64 get_stat64(tg3_stat64_t *val)
+{
+       return ((u64)val->high << 32) | ((u64)val->low);
+}
+
+static u64 tg3_calc_crc_errors(struct tg3 *tp)
+{
+	struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
+		u32 val;
+
+		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
+			tg3_writephy(tp, MII_TG3_TEST1,
+				     val | MII_TG3_TEST1_CRC_EN);
+			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
+		} else
+			val = 0;
+
+		tp->phy_crc_errors += val;
+
+		return tp->phy_crc_errors;
+	}
+
+	return get_stat64(&hw_stats->rx_fcs_errors);
+}
+
+#define ESTAT_ADD(member) \
+	estats->member =	old_estats->member + \
+				get_stat64(&hw_stats->member)
+
+static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
+{
+	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
+	struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+	ESTAT_ADD(rx_octets);
+	ESTAT_ADD(rx_fragments);
+	ESTAT_ADD(rx_ucast_packets);
+	ESTAT_ADD(rx_mcast_packets);
+	ESTAT_ADD(rx_bcast_packets);
+	ESTAT_ADD(rx_fcs_errors);
+	ESTAT_ADD(rx_align_errors);
+	ESTAT_ADD(rx_xon_pause_rcvd);
+	ESTAT_ADD(rx_xoff_pause_rcvd);
+	ESTAT_ADD(rx_mac_ctrl_rcvd);
+	ESTAT_ADD(rx_xoff_entered);
+	ESTAT_ADD(rx_frame_too_long_errors);
+	ESTAT_ADD(rx_jabbers);
+	ESTAT_ADD(rx_undersize_packets);
+	ESTAT_ADD(rx_in_length_errors);
+	ESTAT_ADD(rx_out_length_errors);
+	ESTAT_ADD(rx_64_or_less_octet_packets);
+	ESTAT_ADD(rx_65_to_127_octet_packets);
+	ESTAT_ADD(rx_128_to_255_octet_packets);
+	ESTAT_ADD(rx_256_to_511_octet_packets);
+	ESTAT_ADD(rx_512_to_1023_octet_packets);
+	ESTAT_ADD(rx_1024_to_1522_octet_packets);
+	ESTAT_ADD(rx_1523_to_2047_octet_packets);
+	ESTAT_ADD(rx_2048_to_4095_octet_packets);
+	ESTAT_ADD(rx_4096_to_8191_octet_packets);
+	ESTAT_ADD(rx_8192_to_9022_octet_packets);
+
+	ESTAT_ADD(tx_octets);
+	ESTAT_ADD(tx_collisions);
+	ESTAT_ADD(tx_xon_sent);
+	ESTAT_ADD(tx_xoff_sent);
+	ESTAT_ADD(tx_flow_control);
+	ESTAT_ADD(tx_mac_errors);
+	ESTAT_ADD(tx_single_collisions);
+	ESTAT_ADD(tx_mult_collisions);
+	ESTAT_ADD(tx_deferred);
+	ESTAT_ADD(tx_excessive_collisions);
+	ESTAT_ADD(tx_late_collisions);
+	ESTAT_ADD(tx_collide_2times);
+	ESTAT_ADD(tx_collide_3times);
+	ESTAT_ADD(tx_collide_4times);
+	ESTAT_ADD(tx_collide_5times);
+	ESTAT_ADD(tx_collide_6times);
+	ESTAT_ADD(tx_collide_7times);
+	ESTAT_ADD(tx_collide_8times);
+	ESTAT_ADD(tx_collide_9times);
+	ESTAT_ADD(tx_collide_10times);
+	ESTAT_ADD(tx_collide_11times);
+	ESTAT_ADD(tx_collide_12times);
+	ESTAT_ADD(tx_collide_13times);
+	ESTAT_ADD(tx_collide_14times);
+	ESTAT_ADD(tx_collide_15times);
+	ESTAT_ADD(tx_ucast_packets);
+	ESTAT_ADD(tx_mcast_packets);
+	ESTAT_ADD(tx_bcast_packets);
+	ESTAT_ADD(tx_carrier_sense_errors);
+	ESTAT_ADD(tx_discards);
+	ESTAT_ADD(tx_errors);
+
+	ESTAT_ADD(dma_writeq_full);
+	ESTAT_ADD(dma_write_prioq_full);
+	ESTAT_ADD(rxbds_empty);
+	ESTAT_ADD(rx_discards);
+	ESTAT_ADD(rx_errors);
+	ESTAT_ADD(rx_threshold_hit);
+
+	ESTAT_ADD(dma_readq_full);
+	ESTAT_ADD(dma_read_prioq_full);
+	ESTAT_ADD(tx_comp_queue_full);
+
+	ESTAT_ADD(ring_set_send_prod_index);
+	ESTAT_ADD(ring_status_update);
+	ESTAT_ADD(nic_irqs);
+	ESTAT_ADD(nic_avoided_irqs);
+	ESTAT_ADD(nic_tx_threshold_hit);
+
+	ESTAT_ADD(mbuf_lwm_thresh_hit);
+}
+
+static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+{
+	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+	struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+	stats->rx_packets = old_stats->rx_packets +
+		get_stat64(&hw_stats->rx_ucast_packets) +
+		get_stat64(&hw_stats->rx_mcast_packets) +
+		get_stat64(&hw_stats->rx_bcast_packets);
+
+	stats->tx_packets = old_stats->tx_packets +
+		get_stat64(&hw_stats->tx_ucast_packets) +
+		get_stat64(&hw_stats->tx_mcast_packets) +
+		get_stat64(&hw_stats->tx_bcast_packets);
+
+	stats->rx_bytes = old_stats->rx_bytes +
+		get_stat64(&hw_stats->rx_octets);
+	stats->tx_bytes = old_stats->tx_bytes +
+		get_stat64(&hw_stats->tx_octets);
+
+	stats->rx_errors = old_stats->rx_errors +
+		get_stat64(&hw_stats->rx_errors);
+	stats->tx_errors = old_stats->tx_errors +
+		get_stat64(&hw_stats->tx_errors) +
+		get_stat64(&hw_stats->tx_mac_errors) +
+		get_stat64(&hw_stats->tx_carrier_sense_errors) +
+		get_stat64(&hw_stats->tx_discards);
+
+	stats->multicast = old_stats->multicast +
+		get_stat64(&hw_stats->rx_mcast_packets);
+	stats->collisions = old_stats->collisions +
+		get_stat64(&hw_stats->tx_collisions);
+
+	stats->rx_length_errors = old_stats->rx_length_errors +
+		get_stat64(&hw_stats->rx_frame_too_long_errors) +
+		get_stat64(&hw_stats->rx_undersize_packets);
+
+	stats->rx_frame_errors = old_stats->rx_frame_errors +
+		get_stat64(&hw_stats->rx_align_errors);
+	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
+		get_stat64(&hw_stats->tx_discards);
+	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
+		get_stat64(&hw_stats->tx_carrier_sense_errors);
+
+	stats->rx_crc_errors = old_stats->rx_crc_errors +
+		tg3_calc_crc_errors(tp);
+
+	stats->rx_missed_errors = old_stats->rx_missed_errors +
+		get_stat64(&hw_stats->rx_discards);
+
+	stats->rx_dropped = tp->rx_dropped;
+	stats->tx_dropped = tp->tx_dropped;
+}
+
+static int tg3_get_regs_len(struct net_device *dev)
+{
+	return TG3_REG_BLK_SIZE;
+}
+
+static void tg3_get_regs(struct net_device *dev,
+		struct ethtool_regs *regs, void *_p)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	regs->version = 0;
+
+	memset(_p, 0, TG3_REG_BLK_SIZE);
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		return;
+
+	tg3_full_lock(tp, 0);
+
+	tg3_dump_legacy_regs(tp, (u32 *)_p);
+
+	tg3_full_unlock(tp);
+}
+
+static int tg3_get_eeprom_len(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	return tp->nvram_size;
+}
+
+static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int ret, cpmu_restore = 0;
+	u8  *pd;
+	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
+	__be32 val;
+
+	if (tg3_flag(tp, NO_NVRAM))
+		return -EINVAL;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+	eeprom->len = 0;
+
+	eeprom->magic = TG3_EEPROM_MAGIC;
+
+	/* Override clock, link aware and link idle modes */
+	if (tg3_flag(tp, CPMU_PRESENT)) {
+		cpmu_val = tr32(TG3_CPMU_CTRL);
+		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
+				CPMU_CTRL_LINK_IDLE_MODE)) {
+			tw32(TG3_CPMU_CTRL, cpmu_val &
+					    ~(CPMU_CTRL_LINK_AWARE_MODE |
+					     CPMU_CTRL_LINK_IDLE_MODE));
+			cpmu_restore = 1;
+		}
+	}
+	tg3_override_clk(tp);
+
+	if (offset & 3) {
+		/* adjustments to start on required 4 byte boundary */
+		b_offset = offset & 3;
+		b_count = 4 - b_offset;
+		if (b_count > len) {
+			/* i.e. offset=1 len=2 */
+			b_count = len;
+		}
+		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
+		if (ret)
+			goto eeprom_done;
+		memcpy(data, ((char *)&val) + b_offset, b_count);
+		len -= b_count;
+		offset += b_count;
+		eeprom->len += b_count;
+	}
+
+	/* read bytes up to the last 4 byte boundary */
+	pd = &data[eeprom->len];
+	for (i = 0; i < (len - (len & 3)); i += 4) {
+		ret = tg3_nvram_read_be32(tp, offset + i, &val);
+		if (ret) {
+			if (i)
+				i -= 4;
+			eeprom->len += i;
+			goto eeprom_done;
+		}
+		memcpy(pd + i, &val, 4);
+		if (need_resched()) {
+			if (signal_pending(current)) {
+				eeprom->len += i;
+				ret = -EINTR;
+				goto eeprom_done;
+			}
+			cond_resched();
+		}
+	}
+	eeprom->len += i;
+
+	if (len & 3) {
+		/* read last bytes not ending on 4 byte boundary */
+		pd = &data[eeprom->len];
+		b_count = len & 3;
+		b_offset = offset + len - b_count;
+		ret = tg3_nvram_read_be32(tp, b_offset, &val);
+		if (ret)
+			goto eeprom_done;
+		memcpy(pd, &val, b_count);
+		eeprom->len += b_count;
+	}
+	ret = 0;
+
+eeprom_done:
+	/* Restore clock, link aware and link idle modes */
+	tg3_restore_clk(tp);
+	if (cpmu_restore)
+		tw32(TG3_CPMU_CTRL, cpmu_val);
+
+	return ret;
+}
+
+static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int ret;
+	u32 offset, len, b_offset, odd_len;
+	u8 *buf;
+	__be32 start = 0, end;
+
+	if (tg3_flag(tp, NO_NVRAM) ||
+	    eeprom->magic != TG3_EEPROM_MAGIC)
+		return -EINVAL;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+
+	if ((b_offset = (offset & 3))) {
+		/* adjustments to start on required 4 byte boundary */
+		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
+		if (ret)
+			return ret;
+		len += b_offset;
+		offset &= ~3;
+		if (len < 4)
+			len = 4;
+	}
+
+	odd_len = 0;
+	if (len & 3) {
+		/* adjustments to end on required 4 byte boundary */
+		odd_len = 1;
+		len = (len + 3) & ~3;
+		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
+		if (ret)
+			return ret;
+	}
+
+	buf = data;
+	if (b_offset || odd_len) {
+		buf = kmalloc(len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		if (b_offset)
+			memcpy(buf, &start, 4);
+		if (odd_len)
+			memcpy(buf+len-4, &end, 4);
+		memcpy(buf + b_offset, data, eeprom->len);
+	}
+
+	ret = tg3_nvram_write_block(tp, offset, len, buf);
+
+	if (buf != data)
+		kfree(buf);
+
+	return ret;
+}
+
+static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		struct phy_device *phydev;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+		return phy_ethtool_gset(phydev, cmd);
+	}
+
+	cmd->supported = (SUPPORTED_Autoneg);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+		cmd->supported |= (SUPPORTED_1000baseT_Half |
+				   SUPPORTED_1000baseT_Full);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+		cmd->supported |= (SUPPORTED_100baseT_Half |
+				  SUPPORTED_100baseT_Full |
+				  SUPPORTED_10baseT_Half |
+				  SUPPORTED_10baseT_Full |
+				  SUPPORTED_TP);
+		cmd->port = PORT_TP;
+	} else {
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->port = PORT_FIBRE;
+	}
+
+	cmd->advertising = tp->link_config.advertising;
+	if (tg3_flag(tp, PAUSE_AUTONEG)) {
+		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
+			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+				cmd->advertising |= ADVERTISED_Pause;
+			} else {
+				cmd->advertising |= ADVERTISED_Pause |
+						    ADVERTISED_Asym_Pause;
+			}
+		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+			cmd->advertising |= ADVERTISED_Asym_Pause;
+		}
+	}
+	if (netif_running(dev) && tp->link_up) {
+		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
+		cmd->duplex = tp->link_config.active_duplex;
+		cmd->lp_advertising = tp->link_config.rmt_adv;
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+				cmd->eth_tp_mdix = ETH_TP_MDI_X;
+			else
+				cmd->eth_tp_mdix = ETH_TP_MDI;
+		}
+	} else {
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+		cmd->duplex = DUPLEX_UNKNOWN;
+		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+	}
+	cmd->phy_address = tp->phy_addr;
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->autoneg = tp->link_config.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 speed = ethtool_cmd_speed(cmd);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		struct phy_device *phydev;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+		return phy_ethtool_sset(phydev, cmd);
+	}
+
+	if (cmd->autoneg != AUTONEG_ENABLE &&
+	    cmd->autoneg != AUTONEG_DISABLE)
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_DISABLE &&
+	    cmd->duplex != DUPLEX_FULL &&
+	    cmd->duplex != DUPLEX_HALF)
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		u32 mask = ADVERTISED_Autoneg |
+			   ADVERTISED_Pause |
+			   ADVERTISED_Asym_Pause;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+			mask |= ADVERTISED_1000baseT_Half |
+				ADVERTISED_1000baseT_Full;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+			mask |= ADVERTISED_100baseT_Half |
+				ADVERTISED_100baseT_Full |
+				ADVERTISED_10baseT_Half |
+				ADVERTISED_10baseT_Full |
+				ADVERTISED_TP;
+		else
+			mask |= ADVERTISED_FIBRE;
+
+		if (cmd->advertising & ~mask)
+			return -EINVAL;
+
+		mask &= (ADVERTISED_1000baseT_Half |
+			 ADVERTISED_1000baseT_Full |
+			 ADVERTISED_100baseT_Half |
+			 ADVERTISED_100baseT_Full |
+			 ADVERTISED_10baseT_Half |
+			 ADVERTISED_10baseT_Full);
+
+		cmd->advertising &= mask;
+	} else {
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
+			if (speed != SPEED_1000)
+				return -EINVAL;
+
+			if (cmd->duplex != DUPLEX_FULL)
+				return -EINVAL;
+		} else {
+			if (speed != SPEED_100 &&
+			    speed != SPEED_10)
+				return -EINVAL;
+		}
+	}
+
+	tg3_full_lock(tp, 0);
+
+	tp->link_config.autoneg = cmd->autoneg;
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		tp->link_config.advertising = (cmd->advertising |
+					      ADVERTISED_Autoneg);
+		tp->link_config.speed = SPEED_UNKNOWN;
+		tp->link_config.duplex = DUPLEX_UNKNOWN;
+	} else {
+		tp->link_config.advertising = 0;
+		tp->link_config.speed = speed;
+		tp->link_config.duplex = cmd->duplex;
+	}
+
+	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+
+	tg3_warn_mgmt_link_flap(tp);
+
+	if (netif_running(dev))
+		tg3_setup_phy(tp, true);
+
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
+static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+}
+
+static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
+		wol->supported = WAKE_MAGIC;
+	else
+		wol->supported = 0;
+	wol->wolopts = 0;
+	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
+		wol->wolopts = WAKE_MAGIC;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct device *dp = &tp->pdev->dev;
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+	if ((wol->wolopts & WAKE_MAGIC) &&
+	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
+		return -EINVAL;
+
+	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
+	if (device_may_wakeup(dp))
+		tg3_flag_set(tp, WOL_ENABLE);
+	else
+		tg3_flag_clear(tp, WOL_ENABLE);
+
+	return 0;
+}
+
+static u32 tg3_get_msglevel(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	return tp->msg_enable;
+}
+
+static void tg3_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	tp->msg_enable = value;
+}
+
+static int tg3_nway_reset(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int r;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+		return -EINVAL;
+
+	tg3_warn_mgmt_link_flap(tp);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
+	} else {
+		u32 bmcr;
+
+		spin_lock_bh(&tp->lock);
+		r = -EINVAL;
+		tg3_readphy(tp, MII_BMCR, &bmcr);
+		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+		    ((bmcr & BMCR_ANENABLE) ||
+		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
+			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+						   BMCR_ANENABLE);
+			r = 0;
+		}
+		spin_unlock_bh(&tp->lock);
+	}
+
+	return r;
+}
+
+static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	ering->rx_max_pending = tp->rx_std_ring_mask;
+	if (tg3_flag(tp, JUMBO_RING_ENABLE))
+		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
+	else
+		ering->rx_jumbo_max_pending = 0;
+
+	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
+
+	ering->rx_pending = tp->rx_pending;
+	if (tg3_flag(tp, JUMBO_RING_ENABLE))
+		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+	else
+		ering->rx_jumbo_pending = 0;
+
+	ering->tx_pending = tp->napi[0].tx_pending;
+}
+
+static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int i, irq_sync = 0, err = 0;
+
+	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
+	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
+	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
+	    (tg3_flag(tp, TSO_BUG) &&
+	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
+		return -EINVAL;
+
+	if (netif_running(dev)) {
+		tg3_phy_stop(tp);
+		tg3_netif_stop(tp);
+		irq_sync = 1;
+	}
+
+	tg3_full_lock(tp, irq_sync);
+
+	tp->rx_pending = ering->rx_pending;
+
+	if (tg3_flag(tp, MAX_RXPEND_64) &&
+	    tp->rx_pending > 63)
+		tp->rx_pending = 63;
+
+	if (tg3_flag(tp, JUMBO_RING_ENABLE))
+		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+	for (i = 0; i < tp->irq_max; i++)
+		tp->napi[i].tx_pending = ering->tx_pending;
+
+	if (netif_running(dev)) {
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		err = tg3_restart_hw(tp, false);
+		if (!err)
+			tg3_netif_start(tp);
+	}
+
+	tg3_full_unlock(tp);
+
+	if (irq_sync && !err)
+		tg3_phy_start(tp);
+
+	return err;
+}
+
+static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
+
+	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
+		epause->rx_pause = 1;
+	else
+		epause->rx_pause = 0;
+
+	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
+		epause->tx_pause = 1;
+	else
+		epause->tx_pause = 0;
+}
+
+static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int err = 0;
+
+	if (tp->link_config.autoneg == AUTONEG_ENABLE)
+		tg3_warn_mgmt_link_flap(tp);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		u32 newadv;
+		struct phy_device *phydev;
+
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+
+		if (!(phydev->supported & SUPPORTED_Pause) ||
+		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+		     (epause->rx_pause != epause->tx_pause)))
+			return -EINVAL;
+
+		tp->link_config.flowctrl = 0;
+		if (epause->rx_pause) {
+			tp->link_config.flowctrl |= FLOW_CTRL_RX;
+
+			if (epause->tx_pause) {
+				tp->link_config.flowctrl |= FLOW_CTRL_TX;
+				newadv = ADVERTISED_Pause;
+			} else
+				newadv = ADVERTISED_Pause |
+					 ADVERTISED_Asym_Pause;
+		} else if (epause->tx_pause) {
+			tp->link_config.flowctrl |= FLOW_CTRL_TX;
+			newadv = ADVERTISED_Asym_Pause;
+		} else
+			newadv = 0;
+
+		if (epause->autoneg)
+			tg3_flag_set(tp, PAUSE_AUTONEG);
+		else
+			tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+			u32 oldadv = phydev->advertising &
+				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+			if (oldadv != newadv) {
+				phydev->advertising &=
+					~(ADVERTISED_Pause |
+					  ADVERTISED_Asym_Pause);
+				phydev->advertising |= newadv;
+				if (phydev->autoneg) {
+					/*
+					 * Always renegotiate the link to
+					 * inform our link partner of our
+					 * flow control settings, even if the
+					 * flow control is forced.  Let
+					 * tg3_adjust_link() do the final
+					 * flow control setup.
+					 */
+					return phy_start_aneg(phydev);
+				}
+			}
+
+			if (!epause->autoneg)
+				tg3_setup_flow_control(tp, 0, 0);
+		} else {
+			tp->link_config.advertising &=
+					~(ADVERTISED_Pause |
+					  ADVERTISED_Asym_Pause);
+			tp->link_config.advertising |= newadv;
+		}
+	} else {
+		int irq_sync = 0;
+
+		if (netif_running(dev)) {
+			tg3_netif_stop(tp);
+			irq_sync = 1;
+		}
+
+		tg3_full_lock(tp, irq_sync);
+
+		if (epause->autoneg)
+			tg3_flag_set(tp, PAUSE_AUTONEG);
+		else
+			tg3_flag_clear(tp, PAUSE_AUTONEG);
+		if (epause->rx_pause)
+			tp->link_config.flowctrl |= FLOW_CTRL_RX;
+		else
+			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
+		if (epause->tx_pause)
+			tp->link_config.flowctrl |= FLOW_CTRL_TX;
+		else
+			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
+
+		if (netif_running(dev)) {
+			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+			err = tg3_restart_hw(tp, false);
+			if (!err)
+				tg3_netif_start(tp);
+		}
+
+		tg3_full_unlock(tp);
+	}
+
+	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+
+	return err;
+}
+
+static int tg3_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return TG3_NUM_TEST;
+	case ETH_SS_STATS:
+		return TG3_NUM_STATS;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+			 u32 *rules __always_unused)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!tg3_flag(tp, SUPPORT_MSIX))
+		return -EOPNOTSUPP;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		if (netif_running(tp->dev))
+			info->data = tp->rxq_cnt;
+		else {
+			info->data = num_online_cpus();
+			if (info->data > TG3_RSS_MAX_NUM_QS)
+				info->data = TG3_RSS_MAX_NUM_QS;
+		}
+
+		/* The first interrupt vector only
+		 * handles link interrupts.
+		 */
+		info->data -= 1;
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+	u32 size = 0;
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_flag(tp, SUPPORT_MSIX))
+		size = TG3_RSS_INDIR_TBL_SIZE;
+
+	return size;
+}
+
+static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int i;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+	if (!indir)
+		return 0;
+
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+		indir[i] = tp->rss_ind_tbl[i];
+
+	return 0;
+}
+
+static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
+			const u8 hfunc)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	size_t i;
+
+	/* We require at least one supported parameter to be changed and no
+	 * change in any of the unsupported parameters
+	 */
+	if (key ||
+	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+		return -EOPNOTSUPP;
+
+	if (!indir)
+		return 0;
+
+	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+		tp->rss_ind_tbl[i] = indir[i];
+
+	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+		return 0;
+
+	/* It is legal to write the indirection
+	 * table while the device is running.
+	 */
+	tg3_full_lock(tp, 0);
+	tg3_rss_write_indir_tbl(tp);
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
+static void tg3_get_channels(struct net_device *dev,
+			     struct ethtool_channels *channel)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 deflt_qs = netif_get_num_default_rss_queues();
+
+	channel->max_rx = tp->rxq_max;
+	channel->max_tx = tp->txq_max;
+
+	if (netif_running(dev)) {
+		channel->rx_count = tp->rxq_cnt;
+		channel->tx_count = tp->txq_cnt;
+	} else {
+		if (tp->rxq_req)
+			channel->rx_count = tp->rxq_req;
+		else
+			channel->rx_count = min(deflt_qs, tp->rxq_max);
+
+		if (tp->txq_req)
+			channel->tx_count = tp->txq_req;
+		else
+			channel->tx_count = min(deflt_qs, tp->txq_max);
+	}
+}
+
+static int tg3_set_channels(struct net_device *dev,
+			    struct ethtool_channels *channel)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!tg3_flag(tp, SUPPORT_MSIX))
+		return -EOPNOTSUPP;
+
+	if (channel->rx_count > tp->rxq_max ||
+	    channel->tx_count > tp->txq_max)
+		return -EINVAL;
+
+	tp->rxq_req = channel->rx_count;
+	tp->txq_req = channel->tx_count;
+
+	if (!netif_running(dev))
+		return 0;
+
+	tg3_stop(tp);
+
+	tg3_carrier_off(tp);
+
+	tg3_start(tp, true, false, false);
+
+	return 0;
+}
+
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+		break;
+	case ETH_SS_TEST:
+		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
+		break;
+	default:
+		WARN_ON(1);	/* we need a WARN() */
+		break;
+	}
+}
+
+static int tg3_set_phys_id(struct net_device *dev,
+			    enum ethtool_phys_id_state state)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!netif_running(tp->dev))
+		return -EAGAIN;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_ON:
+		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+		     LED_CTRL_1000MBPS_ON |
+		     LED_CTRL_100MBPS_ON |
+		     LED_CTRL_10MBPS_ON |
+		     LED_CTRL_TRAFFIC_OVERRIDE |
+		     LED_CTRL_TRAFFIC_BLINK |
+		     LED_CTRL_TRAFFIC_LED);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+		     LED_CTRL_TRAFFIC_OVERRIDE);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		tw32(MAC_LED_CTRL, tp->led_ctrl);
+		break;
+	}
+
+	return 0;
+}
+
+static void tg3_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *estats, u64 *tmp_stats)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tp->hw_stats)
+		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
+	else
+		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
+}
+
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+{
+	int i;
+	__be32 *buf;
+	u32 offset = 0, len = 0;
+	u32 magic, val;
+
+	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
+		return NULL;
+
+	if (magic == TG3_EEPROM_MAGIC) {
+		for (offset = TG3_NVM_DIR_START;
+		     offset < TG3_NVM_DIR_END;
+		     offset += TG3_NVM_DIRENT_SIZE) {
+			if (tg3_nvram_read(tp, offset, &val))
+				return NULL;
+
+			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
+			    TG3_NVM_DIRTYPE_EXTVPD)
+				break;
+		}
+
+		if (offset != TG3_NVM_DIR_END) {
+			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
+			if (tg3_nvram_read(tp, offset + 4, &offset))
+				return NULL;
+
+			offset = tg3_nvram_logical_addr(tp, offset);
+		}
+	}
+
+	if (!offset || !len) {
+		offset = TG3_NVM_VPD_OFF;
+		len = TG3_NVM_VPD_LEN;
+	}
+
+	buf = kmalloc(len, GFP_KERNEL);
+	if (buf == NULL)
+		return NULL;
+
+	if (magic == TG3_EEPROM_MAGIC) {
+		for (i = 0; i < len; i += 4) {
+			/* The data is in little-endian format in NVRAM.
+			 * Use the big-endian read routines to preserve
+			 * the byte order as it exists in NVRAM.
+			 */
+			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
+				goto error;
+		}
+	} else {
+		u8 *ptr;
+		ssize_t cnt;
+		unsigned int pos = 0;
+
+		ptr = (u8 *)&buf[0];
+		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
+			cnt = pci_read_vpd(tp->pdev, pos,
+					   len - pos, ptr);
+			if (cnt == -ETIMEDOUT || cnt == -EINTR)
+				cnt = 0;
+			else if (cnt < 0)
+				goto error;
+		}
+		if (pos != len)
+			goto error;
+	}
+
+	*vpdlen = len;
+
+	return buf;
+
+error:
+	kfree(buf);
+	return NULL;
+}
+
+#define NVRAM_TEST_SIZE 0x100
+#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
+#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
+#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
+#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
+#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
+#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
+#define NVRAM_SELFBOOT_HW_SIZE 0x20
+#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+	u32 csum, magic, len;
+	__be32 *buf;
+	int i, j, k, err = 0, size;
+
+	if (tg3_flag(tp, NO_NVRAM))
+		return 0;
+
+	if (tg3_nvram_read(tp, 0, &magic) != 0)
+		return -EIO;
+
+	if (magic == TG3_EEPROM_MAGIC)
+		size = NVRAM_TEST_SIZE;
+	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
+		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
+		    TG3_EEPROM_SB_FORMAT_1) {
+			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
+			case TG3_EEPROM_SB_REVISION_0:
+				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_2:
+				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_3:
+				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_4:
+				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_5:
+				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_6:
+				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
+				break;
+			default:
+				return -EIO;
+			}
+		} else
+			return 0;
+	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+		size = NVRAM_SELFBOOT_HW_SIZE;
+	else
+		return -EIO;
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	err = -EIO;
+	for (i = 0, j = 0; i < size; i += 4, j++) {
+		err = tg3_nvram_read_be32(tp, i, &buf[j]);
+		if (err)
+			break;
+	}
+	if (i < size)
+		goto out;
+
+	/* Selfboot format */
+	magic = be32_to_cpu(buf[0]);
+	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
+	    TG3_EEPROM_MAGIC_FW) {
+		u8 *buf8 = (u8 *) buf, csum8 = 0;
+
+		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
+		    TG3_EEPROM_SB_REVISION_2) {
+			/* For rev 2, the csum doesn't include the MBA. */
+			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
+				csum8 += buf8[i];
+			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
+				csum8 += buf8[i];
+		} else {
+			for (i = 0; i < size; i++)
+				csum8 += buf8[i];
+		}
+
+		if (csum8 == 0) {
+			err = 0;
+			goto out;
+		}
+
+		err = -EIO;
+		goto out;
+	}
+
+	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
+	    TG3_EEPROM_MAGIC_HW) {
+		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
+		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
+		u8 *buf8 = (u8 *) buf;
+
+		/* Separate the parity bits and the data bytes.  */
+		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
+			if ((i == 0) || (i == 8)) {
+				int l;
+				u8 msk;
+
+				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
+					parity[k++] = buf8[i] & msk;
+				i++;
+			} else if (i == 16) {
+				int l;
+				u8 msk;
+
+				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
+					parity[k++] = buf8[i] & msk;
+				i++;
+
+				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
+					parity[k++] = buf8[i] & msk;
+				i++;
+			}
+			data[j++] = buf8[i];
+		}
+
+		err = -EIO;
+		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
+			u8 hw8 = hweight8(data[i]);
+
+			if ((hw8 & 0x1) && parity[i])
+				goto out;
+			else if (!(hw8 & 0x1) && !parity[i])
+				goto out;
+		}
+		err = 0;
+		goto out;
+	}
+
+	err = -EIO;
+
+	/* Bootstrap checksum at offset 0x10 */
+	csum = calc_crc((unsigned char *) buf, 0x10);
+	if (csum != le32_to_cpu(buf[0x10/4]))
+		goto out;
+
+	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+	if (csum != le32_to_cpu(buf[0xfc/4]))
+		goto out;
+
+	kfree(buf);
+
+	buf = tg3_vpd_readblock(tp, &len);
+	if (!buf)
+		return -ENOMEM;
+
+	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
+	if (i > 0) {
+		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+		if (j < 0)
+			goto out;
+
+		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
+			goto out;
+
+		i += PCI_VPD_LRDT_TAG_SIZE;
+		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+					      PCI_VPD_RO_KEYWORD_CHKSUM);
+		if (j > 0) {
+			u8 csum8 = 0;
+
+			j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+			for (i = 0; i <= j; i++)
+				csum8 += ((u8 *)buf)[i];
+
+			if (csum8)
+				goto out;
+		}
+	}
+
+	err = 0;
+
+out:
+	kfree(buf);
+	return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC	2
+#define TG3_COPPER_TIMEOUT_SEC	6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+	int i, max;
+
+	if (!netif_running(tp->dev))
+		return -ENODEV;
+
+	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+		max = TG3_SERDES_TIMEOUT_SEC;
+	else
+		max = TG3_COPPER_TIMEOUT_SEC;
+
+	for (i = 0; i < max; i++) {
+		if (tp->link_up)
+			return 0;
+
+		if (msleep_interruptible(1000))
+			break;
+	}
+
+	return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+	int i, is_5705, is_5750;
+	u32 offset, read_mask, write_mask, val, save_val, read_val;
+	static struct {
+		u16 offset;
+		u16 flags;
+#define TG3_FL_5705	0x1
+#define TG3_FL_NOT_5705	0x2
+#define TG3_FL_NOT_5788	0x4
+#define TG3_FL_NOT_5750	0x8
+		u32 read_mask;
+		u32 write_mask;
+	} reg_tbl[] = {
+		/* MAC Control Registers */
+		{ MAC_MODE, TG3_FL_NOT_5705,
+			0x00000000, 0x00ef6f8c },
+		{ MAC_MODE, TG3_FL_5705,
+			0x00000000, 0x01ef6b8c },
+		{ MAC_STATUS, TG3_FL_NOT_5705,
+			0x03800107, 0x00000000 },
+		{ MAC_STATUS, TG3_FL_5705,
+			0x03800100, 0x00000000 },
+		{ MAC_ADDR_0_HIGH, 0x0000,
+			0x00000000, 0x0000ffff },
+		{ MAC_ADDR_0_LOW, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_RX_MTU_SIZE, 0x0000,
+			0x00000000, 0x0000ffff },
+		{ MAC_TX_MODE, 0x0000,
+			0x00000000, 0x00000070 },
+		{ MAC_TX_LENGTHS, 0x0000,
+			0x00000000, 0x00003fff },
+		{ MAC_RX_MODE, TG3_FL_NOT_5705,
+			0x00000000, 0x000007fc },
+		{ MAC_RX_MODE, TG3_FL_5705,
+			0x00000000, 0x000007dc },
+		{ MAC_HASH_REG_0, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_HASH_REG_1, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_HASH_REG_2, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_HASH_REG_3, 0x0000,
+			0x00000000, 0xffffffff },
+
+		/* Receive Data and Receive BD Initiator Control Registers. */
+		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+			0x00000000, 0x00000003 },
+		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_STD_BD+0, 0x0000,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_STD_BD+4, 0x0000,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_STD_BD+8, 0x0000,
+			0x00000000, 0xffff0002 },
+		{ RCVDBDI_STD_BD+0xc, 0x0000,
+			0x00000000, 0xffffffff },
+
+		/* Receive BD Initiator Control Registers. */
+		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVBDI_STD_THRESH, TG3_FL_5705,
+			0x00000000, 0x000003ff },
+		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+
+		/* Host Coalescing Control Registers. */
+		{ HOSTCC_MODE, TG3_FL_NOT_5705,
+			0x00000000, 0x00000004 },
+		{ HOSTCC_MODE, TG3_FL_5705,
+			0x00000000, 0x000000f6 },
+		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+			0x00000000, 0x000003ff },
+		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+			0x00000000, 0x000003ff },
+		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+			0xffffffff, 0x00000000 },
+		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+			0xffffffff, 0x00000000 },
+
+		/* Buffer Manager Control Registers. */
+		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
+			0x00000000, 0x007fff80 },
+		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
+			0x00000000, 0x007fffff },
+		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+			0x00000000, 0x0000003f },
+		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+			0x00000000, 0x000001ff },
+		{ BUFMGR_MB_HIGH_WATER, 0x0000,
+			0x00000000, 0x000001ff },
+		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+			0xffffffff, 0x00000000 },
+		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+			0xffffffff, 0x00000000 },
+
+		/* Mailbox Registers */
+		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+			0x00000000, 0x000001ff },
+		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+			0x00000000, 0x000001ff },
+		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+			0x00000000, 0x000007ff },
+		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+			0x00000000, 0x000001ff },
+
+		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
+	};
+
+	is_5705 = is_5750 = 0;
+	if (tg3_flag(tp, 5705_PLUS)) {
+		is_5705 = 1;
+		if (tg3_flag(tp, 5750_PLUS))
+			is_5750 = 1;
+	}
+
+	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+			continue;
+
+		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+			continue;
+
+		if (tg3_flag(tp, IS_5788) &&
+		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
+			continue;
+
+		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
+			continue;
+
+		offset = (u32) reg_tbl[i].offset;
+		read_mask = reg_tbl[i].read_mask;
+		write_mask = reg_tbl[i].write_mask;
+
+		/* Save the original register content */
+		save_val = tr32(offset);
+
+		/* Determine the read-only value. */
+		read_val = save_val & read_mask;
+
+		/* Write zero to the register, then make sure the read-only bits
+		 * are not changed and the read/write bits are all zeros.
+		 */
+		tw32(offset, 0);
+
+		val = tr32(offset);
+
+		/* Test the read-only and read/write bits. */
+		if (((val & read_mask) != read_val) || (val & write_mask))
+			goto out;
+
+		/* Write ones to all the bits defined by RdMask and WrMask, then
+		 * make sure the read-only bits are not changed and the
+		 * read/write bits are all ones.
+		 */
+		tw32(offset, read_mask | write_mask);
+
+		val = tr32(offset);
+
+		/* Test the read-only bits. */
+		if ((val & read_mask) != read_val)
+			goto out;
+
+		/* Test the read/write bits. */
+		if ((val & write_mask) != write_mask)
+			goto out;
+
+		tw32(offset, save_val);
+	}
+
+	return 0;
+
+out:
+	if (netif_msg_hw(tp))
+		netdev_err(tp->dev,
+			   "Register test failed at offset %x\n", offset);
+	tw32(offset, save_val);
+	return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+	int i;
+	u32 j;
+
+	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
+		for (j = 0; j < len; j += 4) {
+			u32 val;
+
+			tg3_write_mem(tp, offset + j, test_pattern[i]);
+			tg3_read_mem(tp, offset + j, &val);
+			if (val != test_pattern[i])
+				return -EIO;
+		}
+	}
+	return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+	static struct mem_entry {
+		u32 offset;
+		u32 len;
+	} mem_tbl_570x[] = {
+		{ 0x00000000, 0x00b50},
+		{ 0x00002000, 0x1c000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5705[] = {
+		{ 0x00000100, 0x0000c},
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00800},
+		{ 0x00006000, 0x01000},
+		{ 0x00008000, 0x02000},
+		{ 0x00010000, 0x0e000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5755[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00800},
+		{ 0x00006000, 0x00800},
+		{ 0x00008000, 0x02000},
+		{ 0x00010000, 0x0c000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5906[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00400},
+		{ 0x00006000, 0x00400},
+		{ 0x00008000, 0x01000},
+		{ 0x00010000, 0x01000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5717[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00010000, 0x0a000},
+		{ 0x00020000, 0x13c00},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_57765[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00800},
+		{ 0x00006000, 0x09800},
+		{ 0x00010000, 0x0a000},
+		{ 0xffffffff, 0x00000}
+	};
+	struct mem_entry *mem_tbl;
+	int err = 0;
+	int i;
+
+	if (tg3_flag(tp, 5717_PLUS))
+		mem_tbl = mem_tbl_5717;
+	else if (tg3_flag(tp, 57765_CLASS) ||
+		 tg3_asic_rev(tp) == ASIC_REV_5762)
+		mem_tbl = mem_tbl_57765;
+	else if (tg3_flag(tp, 5755_PLUS))
+		mem_tbl = mem_tbl_5755;
+	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
+		mem_tbl = mem_tbl_5906;
+	else if (tg3_flag(tp, 5705_PLUS))
+		mem_tbl = mem_tbl_5705;
+	else
+		mem_tbl = mem_tbl_570x;
+
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+#define TG3_TSO_MSS		500
+
+#define TG3_TSO_IP_HDR_LEN	20
+#define TG3_TSO_TCP_HDR_LEN	20
+#define TG3_TSO_TCP_OPT_LEN	12
+
+static const u8 tg3_tso_header[] = {
+0x08, 0x00,
+0x45, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00,
+0x40, 0x06, 0x00, 0x00,
+0x0a, 0x00, 0x00, 0x01,
+0x0a, 0x00, 0x00, 0x02,
+0x0d, 0x00, 0xe0, 0x00,
+0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x02, 0x00,
+0x80, 0x10, 0x10, 0x00,
+0x14, 0x09, 0x00, 0x00,
+0x01, 0x01, 0x08, 0x0a,
+0x11, 0x11, 0x11, 0x11,
+0x11, 0x11, 0x11, 0x11,
+};
+
+static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
+{
+	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
+	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+	u32 budget;
+	struct sk_buff *skb;
+	u8 *tx_data, *rx_data;
+	dma_addr_t map;
+	int num_pkts, tx_len, rx_len, i, err;
+	struct tg3_rx_buffer_desc *desc;
+	struct tg3_napi *tnapi, *rnapi;
+	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+	tnapi = &tp->napi[0];
+	rnapi = &tp->napi[0];
+	if (tp->irq_cnt > 1) {
+		if (tg3_flag(tp, ENABLE_RSS))
+			rnapi = &tp->napi[1];
+		if (tg3_flag(tp, ENABLE_TSS))
+			tnapi = &tp->napi[1];
+	}
+	coal_now = tnapi->coal_now | rnapi->coal_now;
+
+	err = -EIO;
+
+	tx_len = pktsz;
+	skb = netdev_alloc_skb(tp->dev, tx_len);
+	if (!skb)
+		return -ENOMEM;
+
+	tx_data = skb_put(skb, tx_len);
+	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+	memset(tx_data + ETH_ALEN, 0x0, 8);
+
+	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
+
+	if (tso_loopback) {
+		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
+
+		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
+			      TG3_TSO_TCP_OPT_LEN;
+
+		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
+		       sizeof(tg3_tso_header));
+		mss = TG3_TSO_MSS;
+
+		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
+		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
+
+		/* Set the total length field in the IP header */
+		iph->tot_len = htons((u16)(mss + hdr_len));
+
+		base_flags = (TXD_FLAG_CPU_PRE_DMA |
+			      TXD_FLAG_CPU_POST_DMA);
+
+		if (tg3_flag(tp, HW_TSO_1) ||
+		    tg3_flag(tp, HW_TSO_2) ||
+		    tg3_flag(tp, HW_TSO_3)) {
+			struct tcphdr *th;
+			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
+			th = (struct tcphdr *)&tx_data[val];
+			th->check = 0;
+		} else
+			base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+		if (tg3_flag(tp, HW_TSO_3)) {
+			mss |= (hdr_len & 0xc) << 12;
+			if (hdr_len & 0x10)
+				base_flags |= 0x00000010;
+			base_flags |= (hdr_len & 0x3e0) << 5;
+		} else if (tg3_flag(tp, HW_TSO_2))
+			mss |= hdr_len << 9;
+		else if (tg3_flag(tp, HW_TSO_1) ||
+			 tg3_asic_rev(tp) == ASIC_REV_5705) {
+			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
+		} else {
+			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
+		}
+
+		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
+	} else {
+		num_pkts = 1;
+		data_off = ETH_HLEN;
+
+		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+		    tx_len > VLAN_ETH_FRAME_LEN)
+			base_flags |= TXD_FLAG_JMB_PKT;
+	}
+
+	for (i = data_off; i < tx_len; i++)
+		tx_data[i] = (u8) (i & 0xff);
+
+	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, map)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	val = tnapi->tx_prod;
+	tnapi->tx_buffers[val].skb = skb;
+	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
+	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+	       rnapi->coal_now);
+
+	udelay(10);
+
+	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
+
+	budget = tg3_tx_avail(tnapi);
+	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+			    base_flags | TXD_FLAG_END, mss, 0)) {
+		tnapi->tx_buffers[val].skb = NULL;
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	tnapi->tx_prod++;
+
+	/* Sync BD data before updating mailbox */
+	wmb();
+
+	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+	tr32_mailbox(tnapi->prodmbox);
+
+	udelay(10);
+
+	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
+	for (i = 0; i < 35; i++) {
+		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+		       coal_now);
+
+		udelay(10);
+
+		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+		rx_idx = rnapi->hw_status->idx[0].rx_producer;
+		if ((tx_idx == tnapi->tx_prod) &&
+		    (rx_idx == (rx_start_idx + num_pkts)))
+			break;
+	}
+
+	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
+	dev_kfree_skb(skb);
+
+	if (tx_idx != tnapi->tx_prod)
+		goto out;
+
+	if (rx_idx != rx_start_idx + num_pkts)
+		goto out;
+
+	val = data_off;
+	while (rx_idx != rx_start_idx) {
+		desc = &rnapi->rx_rcb[rx_start_idx++];
+		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+
+		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+			goto out;
+
+		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
+			 - ETH_FCS_LEN;
+
+		if (!tso_loopback) {
+			if (rx_len != tx_len)
+				goto out;
+
+			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
+				if (opaque_key != RXD_OPAQUE_RING_STD)
+					goto out;
+			} else {
+				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
+					goto out;
+			}
+		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
+			goto out;
+		}
+
+		if (opaque_key == RXD_OPAQUE_RING_STD) {
+			rx_data = tpr->rx_std_buffers[desc_idx].data;
+			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
+					     mapping);
+		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
+			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
+					     mapping);
+		} else
+			goto out;
+
+		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
+					    PCI_DMA_FROMDEVICE);
+
+		rx_data += TG3_RX_OFFSET(tp);
+		for (i = data_off; i < rx_len; i++, val++) {
+			if (*(rx_data + i) != (u8) (val & 0xff))
+				goto out;
+		}
+	}
+
+	err = 0;
+
+	/* tg3_free_rings will unmap and free the rx_data */
+out:
+	return err;
+}
+
+#define TG3_STD_LOOPBACK_FAILED		1
+#define TG3_JMB_LOOPBACK_FAILED		2
+#define TG3_TSO_LOOPBACK_FAILED		4
+#define TG3_LOOPBACK_FAILED \
+	(TG3_STD_LOOPBACK_FAILED | \
+	 TG3_JMB_LOOPBACK_FAILED | \
+	 TG3_TSO_LOOPBACK_FAILED)
+
+static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
+{
+	int err = -EIO;
+	u32 eee_cap;
+	u32 jmb_pkt_sz = 9000;
+
+	if (tp->dma_limit)
+		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
+
+	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
+	if (!netif_running(tp->dev)) {
+		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		if (do_extlpbk)
+			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		goto done;
+	}
+
+	err = tg3_reset_hw(tp, true);
+	if (err) {
+		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		if (do_extlpbk)
+			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+		goto done;
+	}
+
+	if (tg3_flag(tp, ENABLE_RSS)) {
+		int i;
+
+		/* Reroute all rx packets to the 1st queue */
+		for (i = MAC_RSS_INDIR_TBL_0;
+		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
+			tw32(i, 0x0);
+	}
+
+	/* HW errata - mac loopback fails in some cases on 5780.
+	 * Normal traffic and PHY loopback are not affected by
+	 * errata.  Also, the MAC loopback test is deprecated for
+	 * all newer ASIC revisions.
+	 */
+	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
+	    !tg3_flag(tp, CPMU_PRESENT)) {
+		tg3_mac_loopback(tp, true);
+
+		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
+
+		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
+			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
+
+		tg3_mac_loopback(tp, false);
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+	    !tg3_flag(tp, USE_PHYLIB)) {
+		int i;
+
+		tg3_phy_lpbk_set(tp, 0, false);
+
+		/* Wait for link */
+		for (i = 0; i < 100; i++) {
+			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+				break;
+			mdelay(1);
+		}
+
+		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
+		if (tg3_flag(tp, TSO_CAPABLE) &&
+		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
+		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
+			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
+
+		if (do_extlpbk) {
+			tg3_phy_lpbk_set(tp, 0, true);
+
+			/* All link indications report up, but the hardware
+			 * isn't really ready for about 20 msec.  Double it
+			 * to be sure.
+			 */
+			mdelay(40);
+
+			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+				data[TG3_EXT_LOOPB_TEST] |=
+							TG3_STD_LOOPBACK_FAILED;
+			if (tg3_flag(tp, TSO_CAPABLE) &&
+			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+				data[TG3_EXT_LOOPB_TEST] |=
+							TG3_TSO_LOOPBACK_FAILED;
+			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
+				data[TG3_EXT_LOOPB_TEST] |=
+							TG3_JMB_LOOPBACK_FAILED;
+		}
+
+		/* Re-enable gphy autopowerdown. */
+		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+			tg3_phy_toggle_apd(tp, true);
+	}
+
+	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
+	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
+
+done:
+	tp->phy_flags |= eee_cap;
+
+	return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+			  u64 *data)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+		if (tg3_power_up(tp)) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
+			return;
+		}
+		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
+	}
+
+	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+	if (tg3_test_nvram(tp) != 0) {
+		etest->flags |= ETH_TEST_FL_FAILED;
+		data[TG3_NVRAM_TEST] = 1;
+	}
+	if (!doextlpbk && tg3_test_link(tp)) {
+		etest->flags |= ETH_TEST_FL_FAILED;
+		data[TG3_LINK_TEST] = 1;
+	}
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int err, err2 = 0, irq_sync = 0;
+
+		if (netif_running(dev)) {
+			tg3_phy_stop(tp);
+			tg3_netif_stop(tp);
+			irq_sync = 1;
+		}
+
+		tg3_full_lock(tp, irq_sync);
+		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+		err = tg3_nvram_lock(tp);
+		tg3_halt_cpu(tp, RX_CPU_BASE);
+		if (!tg3_flag(tp, 5705_PLUS))
+			tg3_halt_cpu(tp, TX_CPU_BASE);
+		if (!err)
+			tg3_nvram_unlock(tp);
+
+		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+			tg3_phy_reset(tp);
+
+		if (tg3_test_registers(tp) != 0) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			data[TG3_REGISTER_TEST] = 1;
+		}
+
+		if (tg3_test_memory(tp) != 0) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			data[TG3_MEMORY_TEST] = 1;
+		}
+
+		if (doextlpbk)
+			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+
+		if (tg3_test_loopback(tp, data, doextlpbk))
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		tg3_full_unlock(tp);
+
+		if (tg3_test_interrupt(tp) != 0) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			data[TG3_INTERRUPT_TEST] = 1;
+		}
+
+		tg3_full_lock(tp, 0);
+
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		if (netif_running(dev)) {
+			tg3_flag_set(tp, INIT_COMPLETE);
+			err2 = tg3_restart_hw(tp, true);
+			if (!err2)
+				tg3_netif_start(tp);
+		}
+
+		tg3_full_unlock(tp);
+
+		if (irq_sync && !err2)
+			tg3_phy_start(tp);
+	}
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		tg3_power_down_prepare(tp);
+
+}
+
+static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct hwtstamp_config stmpconf;
+
+	if (!tg3_flag(tp, PTP_CAPABLE))
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
+		return -EFAULT;
+
+	if (stmpconf.flags)
+		return -EINVAL;
+
+	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
+	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
+		return -ERANGE;
+
+	switch (stmpconf.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		tp->rxptpctl = 0;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
+			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
+			       TG3_RX_PTP_CTL_SYNC_EVNT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
+			       TG3_RX_PTP_CTL_DELAY_REQ;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
+			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
+			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
+			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
+			       TG3_RX_PTP_CTL_SYNC_EVNT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
+			       TG3_RX_PTP_CTL_SYNC_EVNT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
+			       TG3_RX_PTP_CTL_SYNC_EVNT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
+			       TG3_RX_PTP_CTL_DELAY_REQ;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
+			       TG3_RX_PTP_CTL_DELAY_REQ;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
+			       TG3_RX_PTP_CTL_DELAY_REQ;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	if (netif_running(dev) && tp->rxptpctl)
+		tw32(TG3_RX_PTP_CTL,
+		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
+
+	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+		tg3_flag_set(tp, TX_TSTAMP_EN);
+	else
+		tg3_flag_clear(tp, TX_TSTAMP_EN);
+
+	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+		-EFAULT : 0;
+}
+
+static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct hwtstamp_config stmpconf;
+
+	if (!tg3_flag(tp, PTP_CAPABLE))
+		return -EOPNOTSUPP;
+
+	stmpconf.flags = 0;
+	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+
+	switch (tp->rxptpctl) {
+	case 0:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		return -ERANGE;
+	}
+
+	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+		-EFAULT : 0;
+}
+
+static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		struct phy_device *phydev;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+		return phy_mii_ioctl(phydev, ifr, cmd);
+	}
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = tp->phy_addr;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u32 mii_regval;
+
+		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+			break;			/* We have no PHY */
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&tp->lock);
+		err = __tg3_readphy(tp, data->phy_id & 0x1f,
+				    data->reg_num & 0x1f, &mii_regval);
+		spin_unlock_bh(&tp->lock);
+
+		data->val_out = mii_regval;
+
+		return err;
+	}
+
+	case SIOCSMIIREG:
+		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+			break;			/* We have no PHY */
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&tp->lock);
+		err = __tg3_writephy(tp, data->phy_id & 0x1f,
+				     data->reg_num & 0x1f, data->val_in);
+		spin_unlock_bh(&tp->lock);
+
+		return err;
+
+	case SIOCSHWTSTAMP:
+		return tg3_hwtstamp_set(dev, ifr);
+
+	case SIOCGHWTSTAMP:
+		return tg3_hwtstamp_get(dev, ifr);
+
+	default:
+		/* do nothing */
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	memcpy(ec, &tp->coal, sizeof(*ec));
+	return 0;
+}
+
+static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
+	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
+		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
+		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
+		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
+	}
+
+	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
+	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
+	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
+	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
+	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
+	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
+	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
+	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
+		return -EINVAL;
+
+	/* No rx interrupts will be generated if both are zero */
+	if ((ec->rx_coalesce_usecs == 0) &&
+	    (ec->rx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	/* No tx interrupts will be generated if both are zero */
+	if ((ec->tx_coalesce_usecs == 0) &&
+	    (ec->tx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	/* Only copy relevant parameters, ignore all others. */
+	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
+	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
+	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
+
+	if (netif_running(dev)) {
+		tg3_full_lock(tp, 0);
+		__tg3_set_coalesce(tp, &tp->coal);
+		tg3_full_unlock(tp);
+	}
+	return 0;
+}
+
+static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+		netdev_warn(tp->dev, "Board does not support EEE!\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (edata->advertised != tp->eee.advertised) {
+		netdev_warn(tp->dev,
+			    "Direct manipulation of EEE advertisement is not supported\n");
+		return -EINVAL;
+	}
+
+	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
+		netdev_warn(tp->dev,
+			    "Maximal Tx Lpi timer supported is %#x(u)\n",
+			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
+		return -EINVAL;
+	}
+
+	tp->eee = *edata;
+
+	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+	tg3_warn_mgmt_link_flap(tp);
+
+	if (netif_running(tp->dev)) {
+		tg3_full_lock(tp, 0);
+		tg3_setup_eee(tp);
+		tg3_phy_reset(tp);
+		tg3_full_unlock(tp);
+	}
+
+	return 0;
+}
+
+static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+		netdev_warn(tp->dev,
+			    "Board does not support EEE!\n");
+		return -EOPNOTSUPP;
+	}
+
+	*edata = tp->eee;
+	return 0;
+}
+
+static const struct ethtool_ops tg3_ethtool_ops = {
+	.get_settings		= tg3_get_settings,
+	.set_settings		= tg3_set_settings,
+	.get_drvinfo		= tg3_get_drvinfo,
+	.get_regs_len		= tg3_get_regs_len,
+	.get_regs		= tg3_get_regs,
+	.get_wol		= tg3_get_wol,
+	.set_wol		= tg3_set_wol,
+	.get_msglevel		= tg3_get_msglevel,
+	.set_msglevel		= tg3_set_msglevel,
+	.nway_reset		= tg3_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_eeprom_len		= tg3_get_eeprom_len,
+	.get_eeprom		= tg3_get_eeprom,
+	.set_eeprom		= tg3_set_eeprom,
+	.get_ringparam		= tg3_get_ringparam,
+	.set_ringparam		= tg3_set_ringparam,
+	.get_pauseparam		= tg3_get_pauseparam,
+	.set_pauseparam		= tg3_set_pauseparam,
+	.self_test		= tg3_self_test,
+	.get_strings		= tg3_get_strings,
+	.set_phys_id		= tg3_set_phys_id,
+	.get_ethtool_stats	= tg3_get_ethtool_stats,
+	.get_coalesce		= tg3_get_coalesce,
+	.set_coalesce		= tg3_set_coalesce,
+	.get_sset_count		= tg3_get_sset_count,
+	.get_rxnfc		= tg3_get_rxnfc,
+	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
+	.get_rxfh		= tg3_get_rxfh,
+	.set_rxfh		= tg3_set_rxfh,
+	.get_channels		= tg3_get_channels,
+	.set_channels		= tg3_set_channels,
+	.get_ts_info		= tg3_get_ts_info,
+	.get_eee		= tg3_get_eee,
+	.set_eee		= tg3_set_eee,
+};
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+						struct rtnl_link_stats64 *stats)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	spin_lock_bh(&tp->lock);
+	if (!tp->hw_stats) {
+		*stats = tp->net_stats_prev;
+		spin_unlock_bh(&tp->lock);
+		return stats;
+	}
+
+	tg3_get_nstats(tp, stats);
+	spin_unlock_bh(&tp->lock);
+
+	return stats;
+}
+
+static void tg3_set_rx_mode(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return;
+
+	tg3_full_lock(tp, 0);
+	__tg3_set_rx_mode(dev);
+	tg3_full_unlock(tp);
+}
+
+static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
+			       int new_mtu)
+{
+	dev->mtu = new_mtu;
+
+	if (new_mtu > ETH_DATA_LEN) {
+		if (tg3_flag(tp, 5780_CLASS)) {
+			netdev_update_features(dev);
+			tg3_flag_clear(tp, TSO_CAPABLE);
+		} else {
+			tg3_flag_set(tp, JUMBO_RING_ENABLE);
+		}
+	} else {
+		if (tg3_flag(tp, 5780_CLASS)) {
+			tg3_flag_set(tp, TSO_CAPABLE);
+			netdev_update_features(dev);
+		}
+		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
+	}
+}
+
+static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+	bool reset_phy = false;
+
+	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		/* We'll just catch it later when the
+		 * device is up'd.
+		 */
+		tg3_set_mtu(dev, tp, new_mtu);
+		return 0;
+	}
+
+	tg3_phy_stop(tp);
+
+	tg3_netif_stop(tp);
+
+	tg3_set_mtu(dev, tp, new_mtu);
+
+	tg3_full_lock(tp, 1);
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+	/* Reset PHY, otherwise the read DMA engine will be in a mode that
+	 * breaks all requests to 256 bytes.
+	 */
+	if (tg3_asic_rev(tp) == ASIC_REV_57766)
+		reset_phy = true;
+
+	err = tg3_restart_hw(tp, reset_phy);
+
+	if (!err)
+		tg3_netif_start(tp);
+
+	tg3_full_unlock(tp);
+
+	if (!err)
+		tg3_phy_start(tp);
+
+	return err;
+}
+
+static const struct net_device_ops tg3_netdev_ops = {
+	.ndo_open		= tg3_open,
+	.ndo_stop		= tg3_close,
+	.ndo_start_xmit		= tg3_start_xmit,
+	.ndo_get_stats64	= tg3_get_stats64,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= tg3_set_rx_mode,
+	.ndo_set_mac_address	= tg3_set_mac_addr,
+	.ndo_do_ioctl		= tg3_ioctl,
+	.ndo_tx_timeout		= tg3_tx_timeout,
+	.ndo_change_mtu		= tg3_change_mtu,
+	.ndo_fix_features	= tg3_fix_features,
+	.ndo_set_features	= tg3_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= tg3_poll_controller,
+#endif
+};
+
+static void tg3_get_eeprom_size(struct tg3 *tp)
+{
+	u32 cursize, val, magic;
+
+	tp->nvram_size = EEPROM_CHIP_SIZE;
+
+	if (tg3_nvram_read(tp, 0, &magic) != 0)
+		return;
+
+	if ((magic != TG3_EEPROM_MAGIC) &&
+	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
+	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
+		return;
+
+	/*
+	 * Size the chip by reading offsets at increasing powers of two.
+	 * When we encounter our validation signature, we know the addressing
+	 * has wrapped around, and thus have our chip size.
+	 */
+	cursize = 0x10;
+
+	while (cursize < tp->nvram_size) {
+		if (tg3_nvram_read(tp, cursize, &val) != 0)
+			return;
+
+		if (val == magic)
+			break;
+
+		cursize <<= 1;
+	}
+
+	tp->nvram_size = cursize;
+}
+
+static void tg3_get_nvram_size(struct tg3 *tp)
+{
+	u32 val;
+
+	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
+		return;
+
+	/* Selfboot format */
+	if (val != TG3_EEPROM_MAGIC) {
+		tg3_get_eeprom_size(tp);
+		return;
+	}
+
+	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+		if (val != 0) {
+			/* This is confusing.  We want to operate on the
+			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
+			 * call will read from NVRAM and byteswap the data
+			 * according to the byteswapping settings for all
+			 * other register accesses.  This ensures the data we
+			 * want will always reside in the lower 16-bits.
+			 * However, the data in NVRAM is in LE format, which
+			 * means the data from the NVRAM read will always be
+			 * opposite the endianness of the CPU.  The 16-bit
+			 * byteswap then brings the data to CPU endianness.
+			 */
+			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
+			return;
+		}
+	}
+	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+}
+
+static void tg3_get_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
+		tg3_flag_set(tp, FLASH);
+	} else {
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+	    tg3_flag(tp, 5780_CLASS)) {
+		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
+		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+			tp->nvram_jedecnum = JEDEC_ATMEL;
+			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			break;
+		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+			tp->nvram_jedecnum = JEDEC_ATMEL;
+			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+			break;
+		case FLASH_VENDOR_ATMEL_EEPROM:
+			tp->nvram_jedecnum = JEDEC_ATMEL;
+			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			break;
+		case FLASH_VENDOR_ST:
+			tp->nvram_jedecnum = JEDEC_ST;
+			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			break;
+		case FLASH_VENDOR_SAIFUN:
+			tp->nvram_jedecnum = JEDEC_SAIFUN;
+			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+			break;
+		case FLASH_VENDOR_SST_SMALL:
+		case FLASH_VENDOR_SST_LARGE:
+			tp->nvram_jedecnum = JEDEC_SST;
+			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+			break;
+		}
+	} else {
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+	}
+}
+
+static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
+{
+	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
+	case FLASH_5752PAGE_SIZE_256:
+		tp->nvram_pagesize = 256;
+		break;
+	case FLASH_5752PAGE_SIZE_512:
+		tp->nvram_pagesize = 512;
+		break;
+	case FLASH_5752PAGE_SIZE_1K:
+		tp->nvram_pagesize = 1024;
+		break;
+	case FLASH_5752PAGE_SIZE_2K:
+		tp->nvram_pagesize = 2048;
+		break;
+	case FLASH_5752PAGE_SIZE_4K:
+		tp->nvram_pagesize = 4096;
+		break;
+	case FLASH_5752PAGE_SIZE_264:
+		tp->nvram_pagesize = 264;
+		break;
+	case FLASH_5752PAGE_SIZE_528:
+		tp->nvram_pagesize = 528;
+		break;
+	}
+}
+
+static void tg3_get_5752_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	/* NVRAM protection for TPM */
+	if (nvcfg1 & (1 << 27))
+		tg3_flag_set(tp, PROTECTED_NVRAM);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
+	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		break;
+	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		break;
+	}
+
+	if (tg3_flag(tp, FLASH)) {
+		tg3_nvram_get_pagesize(tp, nvcfg1);
+	} else {
+		/* For eeprom, set pagesize to maximum eeprom size */
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+	}
+}
+
+static void tg3_get_5755_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1, protect = 0;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	/* NVRAM protection for TPM */
+	if (nvcfg1 & (1 << 27)) {
+		tg3_flag_set(tp, PROTECTED_NVRAM);
+		protect = 1;
+	}
+
+	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+	switch (nvcfg1) {
+	case FLASH_5755VENDOR_ATMEL_FLASH_1:
+	case FLASH_5755VENDOR_ATMEL_FLASH_2:
+	case FLASH_5755VENDOR_ATMEL_FLASH_3:
+	case FLASH_5755VENDOR_ATMEL_FLASH_5:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 264;
+		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
+			tp->nvram_size = (protect ? 0x3e200 :
+					  TG3_NVRAM_SIZE_512KB);
+		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
+			tp->nvram_size = (protect ? 0x1f200 :
+					  TG3_NVRAM_SIZE_256KB);
+		else
+			tp->nvram_size = (protect ? 0x1f200 :
+					  TG3_NVRAM_SIZE_128KB);
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 256;
+		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
+			tp->nvram_size = (protect ?
+					  TG3_NVRAM_SIZE_64KB :
+					  TG3_NVRAM_SIZE_128KB);
+		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
+			tp->nvram_size = (protect ?
+					  TG3_NVRAM_SIZE_64KB :
+					  TG3_NVRAM_SIZE_256KB);
+		else
+			tp->nvram_size = (protect ?
+					  TG3_NVRAM_SIZE_128KB :
+					  TG3_NVRAM_SIZE_512KB);
+		break;
+	}
+}
+
+static void tg3_get_5787_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		break;
+	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+	case FLASH_5755VENDOR_ATMEL_FLASH_1:
+	case FLASH_5755VENDOR_ATMEL_FLASH_2:
+	case FLASH_5755VENDOR_ATMEL_FLASH_3:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 264;
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 256;
+		break;
+	}
+}
+
+static void tg3_get_5761_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1, protect = 0;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	/* NVRAM protection for TPM */
+	if (nvcfg1 & (1 << 27)) {
+		tg3_flag_set(tp, PROTECTED_NVRAM);
+		protect = 1;
+	}
+
+	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+	switch (nvcfg1) {
+	case FLASH_5761VENDOR_ATMEL_ADB021D:
+	case FLASH_5761VENDOR_ATMEL_ADB041D:
+	case FLASH_5761VENDOR_ATMEL_ADB081D:
+	case FLASH_5761VENDOR_ATMEL_ADB161D:
+	case FLASH_5761VENDOR_ATMEL_MDB021D:
+	case FLASH_5761VENDOR_ATMEL_MDB041D:
+	case FLASH_5761VENDOR_ATMEL_MDB081D:
+	case FLASH_5761VENDOR_ATMEL_MDB161D:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+		tp->nvram_pagesize = 256;
+		break;
+	case FLASH_5761VENDOR_ST_A_M45PE20:
+	case FLASH_5761VENDOR_ST_A_M45PE40:
+	case FLASH_5761VENDOR_ST_A_M45PE80:
+	case FLASH_5761VENDOR_ST_A_M45PE16:
+	case FLASH_5761VENDOR_ST_M_M45PE20:
+	case FLASH_5761VENDOR_ST_M_M45PE40:
+	case FLASH_5761VENDOR_ST_M_M45PE80:
+	case FLASH_5761VENDOR_ST_M_M45PE16:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 256;
+		break;
+	}
+
+	if (protect) {
+		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
+	} else {
+		switch (nvcfg1) {
+		case FLASH_5761VENDOR_ATMEL_ADB161D:
+		case FLASH_5761VENDOR_ATMEL_MDB161D:
+		case FLASH_5761VENDOR_ST_A_M45PE16:
+		case FLASH_5761VENDOR_ST_M_M45PE16:
+			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
+			break;
+		case FLASH_5761VENDOR_ATMEL_ADB081D:
+		case FLASH_5761VENDOR_ATMEL_MDB081D:
+		case FLASH_5761VENDOR_ST_A_M45PE80:
+		case FLASH_5761VENDOR_ST_M_M45PE80:
+			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+			break;
+		case FLASH_5761VENDOR_ATMEL_ADB041D:
+		case FLASH_5761VENDOR_ATMEL_MDB041D:
+		case FLASH_5761VENDOR_ST_A_M45PE40:
+		case FLASH_5761VENDOR_ST_M_M45PE40:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		case FLASH_5761VENDOR_ATMEL_ADB021D:
+		case FLASH_5761VENDOR_ATMEL_MDB021D:
+		case FLASH_5761VENDOR_ST_A_M45PE20:
+		case FLASH_5761VENDOR_ST_M_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		}
+	}
+}
+
+static void tg3_get_5906_nvram_info(struct tg3 *tp)
+{
+	tp->nvram_jedecnum = JEDEC_ATMEL;
+	tg3_flag_set(tp, NVRAM_BUFFERED);
+	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+}
+
+static void tg3_get_57780_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		return;
+	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		}
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5752VENDOR_ST_M45PE10:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		case FLASH_5752VENDOR_ST_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_5752VENDOR_ST_M45PE40:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		}
+		break;
+	default:
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tg3_nvram_get_pagesize(tp, nvcfg1);
+	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+
+static void tg3_get_5717_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5717VENDOR_ATMEL_EEPROM:
+	case FLASH_5717VENDOR_MICRO_EEPROM:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		return;
+	case FLASH_5717VENDOR_ATMEL_MDB011D:
+	case FLASH_5717VENDOR_ATMEL_ADB011B:
+	case FLASH_5717VENDOR_ATMEL_ADB011D:
+	case FLASH_5717VENDOR_ATMEL_MDB021D:
+	case FLASH_5717VENDOR_ATMEL_ADB021B:
+	case FLASH_5717VENDOR_ATMEL_ADB021D:
+	case FLASH_5717VENDOR_ATMEL_45USPT:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5717VENDOR_ATMEL_MDB021D:
+			/* Detect size with tg3_nvram_get_size() */
+			break;
+		case FLASH_5717VENDOR_ATMEL_ADB021B:
+		case FLASH_5717VENDOR_ATMEL_ADB021D:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		default:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	case FLASH_5717VENDOR_ST_M_M25PE10:
+	case FLASH_5717VENDOR_ST_A_M25PE10:
+	case FLASH_5717VENDOR_ST_M_M45PE10:
+	case FLASH_5717VENDOR_ST_A_M45PE10:
+	case FLASH_5717VENDOR_ST_M_M25PE20:
+	case FLASH_5717VENDOR_ST_A_M25PE20:
+	case FLASH_5717VENDOR_ST_M_M45PE20:
+	case FLASH_5717VENDOR_ST_A_M45PE20:
+	case FLASH_5717VENDOR_ST_25USPT:
+	case FLASH_5717VENDOR_ST_45USPT:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5717VENDOR_ST_M_M25PE20:
+		case FLASH_5717VENDOR_ST_M_M45PE20:
+			/* Detect size with tg3_nvram_get_size() */
+			break;
+		case FLASH_5717VENDOR_ST_A_M25PE20:
+		case FLASH_5717VENDOR_ST_A_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		default:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	default:
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tg3_nvram_get_pagesize(tp, nvcfg1);
+	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+static void tg3_get_5720_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1, nvmpinstrp;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
+			tg3_flag_set(tp, NO_NVRAM);
+			return;
+		}
+
+		switch (nvmpinstrp) {
+		case FLASH_5762_EEPROM_HD:
+			nvmpinstrp = FLASH_5720_EEPROM_HD;
+			break;
+		case FLASH_5762_EEPROM_LD:
+			nvmpinstrp = FLASH_5720_EEPROM_LD;
+			break;
+		case FLASH_5720VENDOR_M_ST_M45PE20:
+			/* This pinstrap supports multiple sizes, so force it
+			 * to read the actual size from location 0xf0.
+			 */
+			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
+			break;
+		}
+	}
+
+	switch (nvmpinstrp) {
+	case FLASH_5720_EEPROM_HD:
+	case FLASH_5720_EEPROM_LD:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
+			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+		else
+			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
+		return;
+	case FLASH_5720VENDOR_M_ATMEL_DB011D:
+	case FLASH_5720VENDOR_A_ATMEL_DB011B:
+	case FLASH_5720VENDOR_A_ATMEL_DB011D:
+	case FLASH_5720VENDOR_M_ATMEL_DB021D:
+	case FLASH_5720VENDOR_A_ATMEL_DB021B:
+	case FLASH_5720VENDOR_A_ATMEL_DB021D:
+	case FLASH_5720VENDOR_M_ATMEL_DB041D:
+	case FLASH_5720VENDOR_A_ATMEL_DB041B:
+	case FLASH_5720VENDOR_A_ATMEL_DB041D:
+	case FLASH_5720VENDOR_M_ATMEL_DB081D:
+	case FLASH_5720VENDOR_A_ATMEL_DB081D:
+	case FLASH_5720VENDOR_ATMEL_45USPT:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvmpinstrp) {
+		case FLASH_5720VENDOR_M_ATMEL_DB021D:
+		case FLASH_5720VENDOR_A_ATMEL_DB021B:
+		case FLASH_5720VENDOR_A_ATMEL_DB021D:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_5720VENDOR_M_ATMEL_DB041D:
+		case FLASH_5720VENDOR_A_ATMEL_DB041B:
+		case FLASH_5720VENDOR_A_ATMEL_DB041D:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		case FLASH_5720VENDOR_M_ATMEL_DB081D:
+		case FLASH_5720VENDOR_A_ATMEL_DB081D:
+			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+			break;
+		default:
+			if (tg3_asic_rev(tp) != ASIC_REV_5762)
+				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	case FLASH_5720VENDOR_M_ST_M25PE10:
+	case FLASH_5720VENDOR_M_ST_M45PE10:
+	case FLASH_5720VENDOR_A_ST_M25PE10:
+	case FLASH_5720VENDOR_A_ST_M45PE10:
+	case FLASH_5720VENDOR_M_ST_M25PE20:
+	case FLASH_5720VENDOR_M_ST_M45PE20:
+	case FLASH_5720VENDOR_A_ST_M25PE20:
+	case FLASH_5720VENDOR_A_ST_M45PE20:
+	case FLASH_5720VENDOR_M_ST_M25PE40:
+	case FLASH_5720VENDOR_M_ST_M45PE40:
+	case FLASH_5720VENDOR_A_ST_M25PE40:
+	case FLASH_5720VENDOR_A_ST_M45PE40:
+	case FLASH_5720VENDOR_M_ST_M25PE80:
+	case FLASH_5720VENDOR_M_ST_M45PE80:
+	case FLASH_5720VENDOR_A_ST_M25PE80:
+	case FLASH_5720VENDOR_A_ST_M45PE80:
+	case FLASH_5720VENDOR_ST_25USPT:
+	case FLASH_5720VENDOR_ST_45USPT:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvmpinstrp) {
+		case FLASH_5720VENDOR_M_ST_M25PE20:
+		case FLASH_5720VENDOR_M_ST_M45PE20:
+		case FLASH_5720VENDOR_A_ST_M25PE20:
+		case FLASH_5720VENDOR_A_ST_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_5720VENDOR_M_ST_M25PE40:
+		case FLASH_5720VENDOR_M_ST_M45PE40:
+		case FLASH_5720VENDOR_A_ST_M25PE40:
+		case FLASH_5720VENDOR_A_ST_M45PE40:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		case FLASH_5720VENDOR_M_ST_M25PE80:
+		case FLASH_5720VENDOR_M_ST_M45PE80:
+		case FLASH_5720VENDOR_A_ST_M25PE80:
+		case FLASH_5720VENDOR_A_ST_M45PE80:
+			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+			break;
+		default:
+			if (tg3_asic_rev(tp) != ASIC_REV_5762)
+				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	default:
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tg3_nvram_get_pagesize(tp, nvcfg1);
+	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+		u32 val;
+
+		if (tg3_nvram_read(tp, 0, &val))
+			return;
+
+		if (val != TG3_EEPROM_MAGIC &&
+		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
+			tg3_flag_set(tp, NO_NVRAM);
+	}
+}
+
+/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+static void tg3_nvram_init(struct tg3 *tp)
+{
+	if (tg3_flag(tp, IS_SSB_CORE)) {
+		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
+		tg3_flag_clear(tp, NVRAM);
+		tg3_flag_clear(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tw32_f(GRC_EEPROM_ADDR,
+	     (EEPROM_ADDR_FSM_RESET |
+	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
+	       EEPROM_ADDR_CLKPERD_SHIFT)));
+
+	msleep(1);
+
+	/* Enable seeprom accesses. */
+	tw32_f(GRC_LOCAL_CTRL,
+	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
+	udelay(100);
+
+	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+	    tg3_asic_rev(tp) != ASIC_REV_5701) {
+		tg3_flag_set(tp, NVRAM);
+
+		if (tg3_nvram_lock(tp)) {
+			netdev_warn(tp->dev,
+				    "Cannot get nvram lock, %s failed\n",
+				    __func__);
+			return;
+		}
+		tg3_enable_nvram_access(tp);
+
+		tp->nvram_size = 0;
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5752)
+			tg3_get_5752_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
+			tg3_get_5755_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
+			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
+			 tg3_asic_rev(tp) == ASIC_REV_5785)
+			tg3_get_5787_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
+			tg3_get_5761_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
+			tg3_get_5906_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
+			 tg3_flag(tp, 57765_CLASS))
+			tg3_get_57780_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+			 tg3_asic_rev(tp) == ASIC_REV_5719)
+			tg3_get_5717_nvram_info(tp);
+		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+			 tg3_asic_rev(tp) == ASIC_REV_5762)
+			tg3_get_5720_nvram_info(tp);
+		else
+			tg3_get_nvram_info(tp);
+
+		if (tp->nvram_size == 0)
+			tg3_get_nvram_size(tp);
+
+		tg3_disable_nvram_access(tp);
+		tg3_nvram_unlock(tp);
+
+	} else {
+		tg3_flag_clear(tp, NVRAM);
+		tg3_flag_clear(tp, NVRAM_BUFFERED);
+
+		tg3_get_eeprom_size(tp);
+	}
+}
+
+struct subsys_tbl_ent {
+	u16 subsys_vendor, subsys_devid;
+	u32 phy_id;
+};
+
+static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
+	/* Broadcom boards. */
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
+
+	/* 3com boards. */
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
+
+	/* DELL boards. */
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
+
+	/* Compaq boards. */
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
+
+	/* IBM boards. */
+	{ TG3PCI_SUBVENDOR_ID_IBM,
+	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
+};
+
+static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
+		if ((subsys_id_to_phy_id[i].subsys_vendor ==
+		     tp->pdev->subsystem_vendor) &&
+		    (subsys_id_to_phy_id[i].subsys_devid ==
+		     tp->pdev->subsystem_device))
+			return &subsys_id_to_phy_id[i];
+	}
+	return NULL;
+}
+
+static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+{
+	u32 val;
+
+	tp->phy_id = TG3_PHY_ID_INVALID;
+	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+	/* Assume an onboard device and WOL capable by default.  */
+	tg3_flag_set(tp, EEPROM_WRITE_PROT);
+	tg3_flag_set(tp, WOL_CAP);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
+			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+			tg3_flag_set(tp, IS_NIC);
+		}
+		val = tr32(VCPU_CFGSHDW);
+		if (val & VCPU_CFGSHDW_ASPM_DBNC)
+			tg3_flag_set(tp, ASPM_WORKAROUND);
+		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
+		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
+			tg3_flag_set(tp, WOL_ENABLE);
+			device_set_wakeup_enable(&tp->pdev->dev, true);
+		}
+		goto done;
+	}
+
+	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+		u32 nic_cfg, led_cfg;
+		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+		u32 nic_phy_id, ver, eeprom_phy_id;
+		int eeprom_phy_serdes = 0;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+		tp->nic_sram_data_cfg = nic_cfg;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
+		ver >>= NIC_SRAM_DATA_VER_SHIFT;
+		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
+		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
+		    (ver > 0) && (ver < 0x100))
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5785)
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5720)
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+
+		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
+		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
+			eeprom_phy_serdes = 1;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
+		if (nic_phy_id != 0) {
+			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
+			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
+
+			eeprom_phy_id  = (id1 >> 16) << 10;
+			eeprom_phy_id |= (id2 & 0xfc00) << 16;
+			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
+		} else
+			eeprom_phy_id = 0;
+
+		tp->phy_id = eeprom_phy_id;
+		if (eeprom_phy_serdes) {
+			if (!tg3_flag(tp, 5705_PLUS))
+				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+			else
+				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
+		}
+
+		if (tg3_flag(tp, 5750_PLUS))
+			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
+				    SHASTA_EXT_LED_MODE_MASK);
+		else
+			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
+
+		switch (led_cfg) {
+		default:
+		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
+			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+			break;
+
+		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
+			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+			break;
+
+		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
+			tp->led_ctrl = LED_CTRL_MODE_MAC;
+
+			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
+			 * read on some older 5700/5701 bootcode.
+			 */
+			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+			    tg3_asic_rev(tp) == ASIC_REV_5701)
+				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+			break;
+
+		case SHASTA_EXT_LED_SHARED:
+			tp->led_ctrl = LED_CTRL_MODE_SHARED;
+			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
+				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+						 LED_CTRL_MODE_PHY_2);
+
+			if (tg3_flag(tp, 5717_PLUS) ||
+			    tg3_asic_rev(tp) == ASIC_REV_5762)
+				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+						LED_CTRL_BLINK_RATE_MASK;
+
+			break;
+
+		case SHASTA_EXT_LED_MAC:
+			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
+			break;
+
+		case SHASTA_EXT_LED_COMBO:
+			tp->led_ctrl = LED_CTRL_MODE_COMBO;
+			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
+				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+						 LED_CTRL_MODE_PHY_2);
+			break;
+
+		}
+
+		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
+		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
+		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+
+		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
+			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
+			tg3_flag_set(tp, EEPROM_WRITE_PROT);
+			if ((tp->pdev->subsystem_vendor ==
+			     PCI_VENDOR_ID_ARIMA) &&
+			    (tp->pdev->subsystem_device == 0x205a ||
+			     tp->pdev->subsystem_device == 0x2063))
+				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+		} else {
+			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+			tg3_flag_set(tp, IS_NIC);
+		}
+
+		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+			tg3_flag_set(tp, ENABLE_ASF);
+			if (tg3_flag(tp, 5750_PLUS))
+				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+		}
+
+		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
+		    tg3_flag(tp, 5750_PLUS))
+			tg3_flag_set(tp, ENABLE_APE);
+
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
+		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
+			tg3_flag_clear(tp, WOL_CAP);
+
+		if (tg3_flag(tp, WOL_CAP) &&
+		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
+			tg3_flag_set(tp, WOL_ENABLE);
+			device_set_wakeup_enable(&tp->pdev->dev, true);
+		}
+
+		if (cfg2 & (1 << 17))
+			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
+
+		/* serdes signal pre-emphasis in register 0x590 set by */
+		/* bootcode if bit 18 is set */
+		if (cfg2 & (1 << 18))
+			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
+
+		if ((tg3_flag(tp, 57765_PLUS) ||
+		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
+		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
+			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
+
+		if (tg3_flag(tp, PCI_EXPRESS)) {
+			u32 cfg3;
+
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
+			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
+			    !tg3_flag(tp, 57765_PLUS) &&
+			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
+				tg3_flag_set(tp, ASPM_WORKAROUND);
+			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
+				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
+			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
+				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
+		}
+
+		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
+			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
+		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
+			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
+		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
+			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+
+		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
+	}
+done:
+	if (tg3_flag(tp, WOL_CAP))
+		device_set_wakeup_enable(&tp->pdev->dev,
+					 tg3_flag(tp, WOL_ENABLE));
+	else
+		device_set_wakeup_capable(&tp->pdev->dev, false);
+}
+
+static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+	int i, err;
+	u32 val2, off = offset * 8;
+
+	err = tg3_nvram_lock(tp);
+	if (err)
+		return err;
+
+	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
+	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
+			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
+	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
+	udelay(10);
+
+	for (i = 0; i < 100; i++) {
+		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
+		if (val2 & APE_OTP_STATUS_CMD_DONE) {
+			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
+			break;
+		}
+		udelay(10);
+	}
+
+	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
+
+	tg3_nvram_unlock(tp);
+	if (val2 & APE_OTP_STATUS_CMD_DONE)
+		return 0;
+
+	return -EBUSY;
+}
+
+static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
+{
+	int i;
+	u32 val;
+
+	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
+	tw32(OTP_CTRL, cmd);
+
+	/* Wait for up to 1 ms for command to execute. */
+	for (i = 0; i < 100; i++) {
+		val = tr32(OTP_STATUS);
+		if (val & OTP_STATUS_CMD_DONE)
+			break;
+		udelay(10);
+	}
+
+	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
+}
+
+/* Read the gphy configuration from the OTP region of the chip.  The gphy
+ * configuration is a 32-bit value that straddles the alignment boundary.
+ * We do two 32-bit reads and then shift and merge the results.
+ */
+static u32 tg3_read_otp_phycfg(struct tg3 *tp)
+{
+	u32 bhalf_otp, thalf_otp;
+
+	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
+
+	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
+		return 0;
+
+	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
+
+	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+		return 0;
+
+	thalf_otp = tr32(OTP_READ_DATA);
+
+	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
+
+	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+		return 0;
+
+	bhalf_otp = tr32(OTP_READ_DATA);
+
+	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
+}
+
+static void tg3_phy_init_link_config(struct tg3 *tp)
+{
+	u32 adv = ADVERTISED_Autoneg;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+			adv |= ADVERTISED_1000baseT_Half;
+		adv |= ADVERTISED_1000baseT_Full;
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		adv |= ADVERTISED_100baseT_Half |
+		       ADVERTISED_100baseT_Full |
+		       ADVERTISED_10baseT_Half |
+		       ADVERTISED_10baseT_Full |
+		       ADVERTISED_TP;
+	else
+		adv |= ADVERTISED_FIBRE;
+
+	tp->link_config.advertising = adv;
+	tp->link_config.speed = SPEED_UNKNOWN;
+	tp->link_config.duplex = DUPLEX_UNKNOWN;
+	tp->link_config.autoneg = AUTONEG_ENABLE;
+	tp->link_config.active_speed = SPEED_UNKNOWN;
+	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
+
+	tp->old_link = -1;
+}
+
+static int tg3_phy_probe(struct tg3 *tp)
+{
+	u32 hw_phy_id_1, hw_phy_id_2;
+	u32 hw_phy_id, hw_phy_id_masked;
+	int err;
+
+	/* flow control autonegotiation is default behavior */
+	tg3_flag_set(tp, PAUSE_AUTONEG);
+	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
+	if (tg3_flag(tp, ENABLE_APE)) {
+		switch (tp->pci_fn) {
+		case 0:
+			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
+			break;
+		case 1:
+			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
+			break;
+		case 2:
+			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
+			break;
+		case 3:
+			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
+			break;
+		}
+	}
+
+	if (!tg3_flag(tp, ENABLE_ASF) &&
+	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
+
+	if (tg3_flag(tp, USE_PHYLIB))
+		return tg3_phy_init(tp);
+
+	/* Reading the PHY ID register can conflict with ASF
+	 * firmware access to the PHY hardware.
+	 */
+	err = 0;
+	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
+		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
+	} else {
+		/* Now read the physical PHY_ID from the chip and verify
+		 * that it is sane.  If it doesn't look good, we fall back
+		 * to either the hard-coded table based PHY_ID and failing
+		 * that the value found in the eeprom area.
+		 */
+		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
+		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
+
+		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
+		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
+		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
+
+		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
+	}
+
+	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
+		tp->phy_id = hw_phy_id;
+		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
+			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+		else
+			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
+	} else {
+		if (tp->phy_id != TG3_PHY_ID_INVALID) {
+			/* Do nothing, phy ID already set up in
+			 * tg3_get_eeprom_hw_cfg().
+			 */
+		} else {
+			struct subsys_tbl_ent *p;
+
+			/* No eeprom signature?  Try the hardcoded
+			 * subsys device table.
+			 */
+			p = tg3_lookup_by_subsys(tp);
+			if (p) {
+				tp->phy_id = p->phy_id;
+			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
+				/* For now we saw the IDs 0xbc050cd0,
+				 * 0xbc050f80 and 0xbc050c30 on devices
+				 * connected to an BCM4785 and there are
+				 * probably more. Just assume that the phy is
+				 * supported when it is connected to a SSB core
+				 * for now.
+				 */
+				return -ENODEV;
+			}
+
+			if (!tp->phy_id ||
+			    tp->phy_id == TG3_PHY_ID_BCM8002)
+				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+		}
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
+	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
+	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
+	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
+	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
+	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
+		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+
+		tp->eee.supported = SUPPORTED_100baseT_Full |
+				    SUPPORTED_1000baseT_Full;
+		tp->eee.advertised = ADVERTISED_100baseT_Full |
+				     ADVERTISED_1000baseT_Full;
+		tp->eee.eee_enabled = 1;
+		tp->eee.tx_lpi_enabled = 1;
+		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
+	}
+
+	tg3_phy_init_link_config(tp);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+	    !tg3_flag(tp, ENABLE_APE) &&
+	    !tg3_flag(tp, ENABLE_ASF)) {
+		u32 bmsr, dummy;
+
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+		    (bmsr & BMSR_LSTATUS))
+			goto skip_phy_reset;
+
+		err = tg3_phy_reset(tp);
+		if (err)
+			return err;
+
+		tg3_phy_set_wirespeed(tp);
+
+		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
+			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+					    tp->link_config.flowctrl);
+
+			tg3_writephy(tp, MII_BMCR,
+				     BMCR_ANENABLE | BMCR_ANRESTART);
+		}
+	}
+
+skip_phy_reset:
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		err = tg3_init_5401phy_dsp(tp);
+		if (err)
+			return err;
+
+		err = tg3_init_5401phy_dsp(tp);
+	}
+
+	return err;
+}
+
+static void tg3_read_vpd(struct tg3 *tp)
+{
+	u8 *vpd_data;
+	unsigned int block_end, rosize, len;
+	u32 vpdlen;
+	int j, i = 0;
+
+	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
+	if (!vpd_data)
+		goto out_no_vpd;
+
+	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto out_not_found;
+
+	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
+	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
+	i += PCI_VPD_LRDT_TAG_SIZE;
+
+	if (block_end > vpdlen)
+		goto out_not_found;
+
+	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (j > 0) {
+		len = pci_vpd_info_field_size(&vpd_data[j]);
+
+		j += PCI_VPD_INFO_FLD_HDR_SIZE;
+		if (j + len > block_end || len != 4 ||
+		    memcmp(&vpd_data[j], "1028", 4))
+			goto partno;
+
+		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+					      PCI_VPD_RO_KEYWORD_VENDOR0);
+		if (j < 0)
+			goto partno;
+
+		len = pci_vpd_info_field_size(&vpd_data[j]);
+
+		j += PCI_VPD_INFO_FLD_HDR_SIZE;
+		if (j + len > block_end)
+			goto partno;
+
+		if (len >= sizeof(tp->fw_ver))
+			len = sizeof(tp->fw_ver) - 1;
+		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
+			 &vpd_data[j]);
+	}
+
+partno:
+	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_PARTNO);
+	if (i < 0)
+		goto out_not_found;
+
+	len = pci_vpd_info_field_size(&vpd_data[i]);
+
+	i += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (len > TG3_BPN_SIZE ||
+	    (len + i) > vpdlen)
+		goto out_not_found;
+
+	memcpy(tp->board_part_number, &vpd_data[i], len);
+
+out_not_found:
+	kfree(vpd_data);
+	if (tp->board_part_number[0])
+		return;
+
+out_no_vpd:
+	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
+			strcpy(tp->board_part_number, "BCM5717");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
+			strcpy(tp->board_part_number, "BCM5718");
+		else
+			goto nomatch;
+	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
+			strcpy(tp->board_part_number, "BCM57780");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
+			strcpy(tp->board_part_number, "BCM57760");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
+			strcpy(tp->board_part_number, "BCM57790");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
+			strcpy(tp->board_part_number, "BCM57788");
+		else
+			goto nomatch;
+	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+			strcpy(tp->board_part_number, "BCM57761");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
+			strcpy(tp->board_part_number, "BCM57765");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+			strcpy(tp->board_part_number, "BCM57781");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+			strcpy(tp->board_part_number, "BCM57785");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+			strcpy(tp->board_part_number, "BCM57791");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+			strcpy(tp->board_part_number, "BCM57795");
+		else
+			goto nomatch;
+	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+			strcpy(tp->board_part_number, "BCM57762");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+			strcpy(tp->board_part_number, "BCM57766");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+			strcpy(tp->board_part_number, "BCM57782");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+			strcpy(tp->board_part_number, "BCM57786");
+		else
+			goto nomatch;
+	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		strcpy(tp->board_part_number, "BCM95906");
+	} else {
+nomatch:
+		strcpy(tp->board_part_number, "none");
+	}
+}
+
+static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+{
+	u32 val;
+
+	if (tg3_nvram_read(tp, offset, &val) ||
+	    (val & 0xfc000000) != 0x0c000000 ||
+	    tg3_nvram_read(tp, offset + 4, &val) ||
+	    val != 0)
+		return 0;
+
+	return 1;
+}
+
+static void tg3_read_bc_ver(struct tg3 *tp)
+{
+	u32 val, offset, start, ver_offset;
+	int i, dst_off;
+	bool newver = false;
+
+	if (tg3_nvram_read(tp, 0xc, &offset) ||
+	    tg3_nvram_read(tp, 0x4, &start))
+		return;
+
+	offset = tg3_nvram_logical_addr(tp, offset);
+
+	if (tg3_nvram_read(tp, offset, &val))
+		return;
+
+	if ((val & 0xfc000000) == 0x0c000000) {
+		if (tg3_nvram_read(tp, offset + 4, &val))
+			return;
+
+		if (val == 0)
+			newver = true;
+	}
+
+	dst_off = strlen(tp->fw_ver);
+
+	if (newver) {
+		if (TG3_VER_SIZE - dst_off < 16 ||
+		    tg3_nvram_read(tp, offset + 8, &ver_offset))
+			return;
+
+		offset = offset + ver_offset - start;
+		for (i = 0; i < 16; i += 4) {
+			__be32 v;
+			if (tg3_nvram_read_be32(tp, offset + i, &v))
+				return;
+
+			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
+		}
+	} else {
+		u32 major, minor;
+
+		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
+			return;
+
+		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
+			TG3_NVM_BCVER_MAJSFT;
+		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
+		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+			 "v%d.%02d", major, minor);
+	}
+}
+
+static void tg3_read_hwsb_ver(struct tg3 *tp)
+{
+	u32 val, major, minor;
+
+	/* Use native endian representation */
+	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
+		return;
+
+	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
+		TG3_NVM_HWSB_CFG1_MAJSFT;
+	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
+		TG3_NVM_HWSB_CFG1_MINSFT;
+
+	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
+}
+
+static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
+{
+	u32 offset, major, minor, build;
+
+	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
+
+	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
+		return;
+
+	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
+	case TG3_EEPROM_SB_REVISION_0:
+		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_2:
+		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_3:
+		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_4:
+		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_5:
+		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_6:
+		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
+		break;
+	default:
+		return;
+	}
+
+	if (tg3_nvram_read(tp, offset, &val))
+		return;
+
+	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
+		TG3_EEPROM_SB_EDH_BLD_SHFT;
+	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
+		TG3_EEPROM_SB_EDH_MAJ_SHFT;
+	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
+
+	if (minor > 99 || build > 26)
+		return;
+
+	offset = strlen(tp->fw_ver);
+	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+		 " v%d.%02d", major, minor);
+
+	if (build > 0) {
+		offset = strlen(tp->fw_ver);
+		if (offset < TG3_VER_SIZE - 1)
+			tp->fw_ver[offset] = 'a' + build - 1;
+	}
+}
+
+static void tg3_read_mgmtfw_ver(struct tg3 *tp)
+{
+	u32 val, offset, start;
+	int i, vlen;
+
+	for (offset = TG3_NVM_DIR_START;
+	     offset < TG3_NVM_DIR_END;
+	     offset += TG3_NVM_DIRENT_SIZE) {
+		if (tg3_nvram_read(tp, offset, &val))
+			return;
+
+		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
+			break;
+	}
+
+	if (offset == TG3_NVM_DIR_END)
+		return;
+
+	if (!tg3_flag(tp, 5705_PLUS))
+		start = 0x08000000;
+	else if (tg3_nvram_read(tp, offset - 4, &start))
+		return;
+
+	if (tg3_nvram_read(tp, offset + 4, &offset) ||
+	    !tg3_fw_img_is_valid(tp, offset) ||
+	    tg3_nvram_read(tp, offset + 8, &val))
+		return;
+
+	offset += val - start;
+
+	vlen = strlen(tp->fw_ver);
+
+	tp->fw_ver[vlen++] = ',';
+	tp->fw_ver[vlen++] = ' ';
+
+	for (i = 0; i < 4; i++) {
+		__be32 v;
+		if (tg3_nvram_read_be32(tp, offset, &v))
+			return;
+
+		offset += sizeof(v);
+
+		if (vlen > TG3_VER_SIZE - sizeof(v)) {
+			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
+			break;
+		}
+
+		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
+		vlen += sizeof(v);
+	}
+}
+
+static void tg3_probe_ncsi(struct tg3 *tp)
+{
+	u32 apedata;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+	if (apedata != APE_SEG_SIG_MAGIC)
+		return;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+	if (!(apedata & APE_FW_STATUS_READY))
+		return;
+
+	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+		tg3_flag_set(tp, APE_HAS_NCSI);
+}
+
+static void tg3_read_dash_ver(struct tg3 *tp)
+{
+	int vlen;
+	u32 apedata;
+	char *fwtype;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
+
+	if (tg3_flag(tp, APE_HAS_NCSI))
+		fwtype = "NCSI";
+	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
+		fwtype = "SMASH";
+	else
+		fwtype = "DASH";
+
+	vlen = strlen(tp->fw_ver);
+
+	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
+		 fwtype,
+		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
+		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
+		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
+		 (apedata & APE_FW_VERSION_BLDMSK));
+}
+
+static void tg3_read_otp_ver(struct tg3 *tp)
+{
+	u32 val, val2;
+
+	if (tg3_asic_rev(tp) != ASIC_REV_5762)
+		return;
+
+	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
+	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
+	    TG3_OTP_MAGIC0_VALID(val)) {
+		u64 val64 = (u64) val << 32 | val2;
+		u32 ver = 0;
+		int i, vlen;
+
+		for (i = 0; i < 7; i++) {
+			if ((val64 & 0xff) == 0)
+				break;
+			ver = val64 & 0xff;
+			val64 >>= 8;
+		}
+		vlen = strlen(tp->fw_ver);
+		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
+	}
+}
+
+static void tg3_read_fw_ver(struct tg3 *tp)
+{
+	u32 val;
+	bool vpd_vers = false;
+
+	if (tp->fw_ver[0] != 0)
+		vpd_vers = true;
+
+	if (tg3_flag(tp, NO_NVRAM)) {
+		strcat(tp->fw_ver, "sb");
+		tg3_read_otp_ver(tp);
+		return;
+	}
+
+	if (tg3_nvram_read(tp, 0, &val))
+		return;
+
+	if (val == TG3_EEPROM_MAGIC)
+		tg3_read_bc_ver(tp);
+	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
+		tg3_read_sb_ver(tp, val);
+	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+		tg3_read_hwsb_ver(tp);
+
+	if (tg3_flag(tp, ENABLE_ASF)) {
+		if (tg3_flag(tp, ENABLE_APE)) {
+			tg3_probe_ncsi(tp);
+			if (!vpd_vers)
+				tg3_read_dash_ver(tp);
+		} else if (!vpd_vers) {
+			tg3_read_mgmtfw_ver(tp);
+		}
+	}
+
+	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
+}
+
+static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
+{
+	if (tg3_flag(tp, LRG_PROD_RING_CAP))
+		return TG3_RX_RET_MAX_SIZE_5717;
+	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
+		return TG3_RX_RET_MAX_SIZE_5700;
+	else
+		return TG3_RX_RET_MAX_SIZE_5705;
+}
+
+static const struct pci_device_id tg3_write_reorder_chipsets[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
+	{ },
+};
+
+static struct pci_dev *tg3_find_peer(struct tg3 *tp)
+{
+	struct pci_dev *peer;
+	unsigned int func, devnr = tp->pdev->devfn & ~7;
+
+	for (func = 0; func < 8; func++) {
+		peer = pci_get_slot(tp->pdev->bus, devnr | func);
+		if (peer && peer != tp->pdev)
+			break;
+		pci_dev_put(peer);
+	}
+	/* 5704 can be configured in single-port mode, set peer to
+	 * tp->pdev in that case.
+	 */
+	if (!peer) {
+		peer = tp->pdev;
+		return peer;
+	}
+
+	/*
+	 * We don't need to keep the refcount elevated; there's no way
+	 * to remove one half of this device without removing the other
+	 */
+	pci_dev_put(peer);
+
+	return peer;
+}
+
+static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
+{
+	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
+	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
+		u32 reg;
+
+		/* All devices that use the alternate
+		 * ASIC REV location have a CPMU.
+		 */
+		tg3_flag_set(tp, CPMU_PRESENT);
+
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
+			reg = TG3PCI_GEN2_PRODID_ASICREV;
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+			reg = TG3PCI_GEN15_PRODID_ASICREV;
+		else
+			reg = TG3PCI_PRODID_ASICREV;
+
+		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
+	}
+
+	/* Wrong chip ID in 5752 A0. This code can be removed later
+	 * as A0 is not in production.
+	 */
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
+		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
+		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720)
+		tg3_flag_set(tp, 5717_PLUS);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57766)
+		tg3_flag_set(tp, 57765_CLASS);
+
+	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
+	     tg3_asic_rev(tp) == ASIC_REV_5762)
+		tg3_flag_set(tp, 57765_PLUS);
+
+	/* Intentionally exclude ASIC_REV_5906 */
+	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
+	    tg3_flag(tp, 57765_PLUS))
+		tg3_flag_set(tp, 5755_PLUS);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5714)
+		tg3_flag_set(tp, 5780_CLASS);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
+	    tg3_flag(tp, 5755_PLUS) ||
+	    tg3_flag(tp, 5780_CLASS))
+		tg3_flag_set(tp, 5750_PLUS);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+	    tg3_flag(tp, 5750_PLUS))
+		tg3_flag_set(tp, 5705_PLUS);
+}
+
+static bool tg3_10_100_only_device(struct tg3 *tp,
+				   const struct pci_device_id *ent)
+{
+	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
+
+	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
+	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
+		return true;
+
+	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
+		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
+			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
+				return true;
+		} else {
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
+{
+	u32 misc_ctrl_reg;
+	u32 pci_state_reg, grc_misc_cfg;
+	u32 val;
+	u16 pci_cmd;
+	int err;
+
+	/* Force memory write invalidate off.  If we leave it on,
+	 * then on 5700_BX chips we have to enable a workaround.
+	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
+	 * to match the cacheline size.  The Broadcom driver have this
+	 * workaround but turns MWI off all the times so never uses
+	 * it.  This seems to suggest that the workaround is insufficient.
+	 */
+	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
+	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+	/* Important! -- Make sure register accesses are byteswapped
+	 * correctly.  Also, for those chips that require it, make
+	 * sure that indirect register accesses are enabled before
+	 * the first operation.
+	 */
+	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			      &misc_ctrl_reg);
+	tp->misc_host_ctrl |= (misc_ctrl_reg &
+			       MISC_HOST_CTRL_CHIPREV);
+	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			       tp->misc_host_ctrl);
+
+	tg3_detect_asic_rev(tp, misc_ctrl_reg);
+
+	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
+	 * we need to disable memory and use config. cycles
+	 * only to access all registers. The 5702/03 chips
+	 * can mistakenly decode the special cycles from the
+	 * ICH chipsets as memory write cycles, causing corruption
+	 * of register and memory space. Only certain ICH bridges
+	 * will drive special cycles with non-zero data during the
+	 * address phase which can fall within the 5703's address
+	 * range. This is not an ICH bug as the PCI spec allows
+	 * non-zero address during special cycles. However, only
+	 * these ICH bridges are known to drive non-zero addresses
+	 * during special cycles.
+	 *
+	 * Since special cycles do not cross PCI bridges, we only
+	 * enable this workaround if the 5703 is on the secondary
+	 * bus of these ICH bridges.
+	 */
+	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
+	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
+		static struct tg3_dev_id {
+			u32	vendor;
+			u32	device;
+			u32	rev;
+		} ich_chipsets[] = {
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
+			  PCI_ANY_ID },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
+			  PCI_ANY_ID },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
+			  0xa },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
+			  PCI_ANY_ID },
+			{ },
+		};
+		struct tg3_dev_id *pci_id = &ich_chipsets[0];
+		struct pci_dev *bridge = NULL;
+
+		while (pci_id->vendor != 0) {
+			bridge = pci_get_device(pci_id->vendor, pci_id->device,
+						bridge);
+			if (!bridge) {
+				pci_id++;
+				continue;
+			}
+			if (pci_id->rev != PCI_ANY_ID) {
+				if (bridge->revision > pci_id->rev)
+					continue;
+			}
+			if (bridge->subordinate &&
+			    (bridge->subordinate->number ==
+			     tp->pdev->bus->number)) {
+				tg3_flag_set(tp, ICH_WORKAROUND);
+				pci_dev_put(bridge);
+				break;
+			}
+		}
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
+		static struct tg3_dev_id {
+			u32	vendor;
+			u32	device;
+		} bridge_chipsets[] = {
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
+			{ },
+		};
+		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
+		struct pci_dev *bridge = NULL;
+
+		while (pci_id->vendor != 0) {
+			bridge = pci_get_device(pci_id->vendor,
+						pci_id->device,
+						bridge);
+			if (!bridge) {
+				pci_id++;
+				continue;
+			}
+			if (bridge->subordinate &&
+			    (bridge->subordinate->number <=
+			     tp->pdev->bus->number) &&
+			    (bridge->subordinate->busn_res.end >=
+			     tp->pdev->bus->number)) {
+				tg3_flag_set(tp, 5701_DMA_BUG);
+				pci_dev_put(bridge);
+				break;
+			}
+		}
+	}
+
+	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
+	 * DMA addresses > 40-bit. This bridge may have other additional
+	 * 57xx devices behind it in some 4-port NIC designs for example.
+	 * Any tg3 device found behind the bridge will also need the 40-bit
+	 * DMA workaround.
+	 */
+	if (tg3_flag(tp, 5780_CLASS)) {
+		tg3_flag_set(tp, 40BIT_DMA_BUG);
+		tp->msi_cap = tp->pdev->msi_cap;
+	} else {
+		struct pci_dev *bridge = NULL;
+
+		do {
+			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+						PCI_DEVICE_ID_SERVERWORKS_EPB,
+						bridge);
+			if (bridge && bridge->subordinate &&
+			    (bridge->subordinate->number <=
+			     tp->pdev->bus->number) &&
+			    (bridge->subordinate->busn_res.end >=
+			     tp->pdev->bus->number)) {
+				tg3_flag_set(tp, 40BIT_DMA_BUG);
+				pci_dev_put(bridge);
+				break;
+			}
+		} while (bridge);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5714)
+		tp->pdev_peer = tg3_find_peer(tp);
+
+	/* Determine TSO capabilities */
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
+		; /* Do nothing. HW bug. */
+	else if (tg3_flag(tp, 57765_PLUS))
+		tg3_flag_set(tp, HW_TSO_3);
+	else if (tg3_flag(tp, 5755_PLUS) ||
+		 tg3_asic_rev(tp) == ASIC_REV_5906)
+		tg3_flag_set(tp, HW_TSO_2);
+	else if (tg3_flag(tp, 5750_PLUS)) {
+		tg3_flag_set(tp, HW_TSO_1);
+		tg3_flag_set(tp, TSO_BUG);
+		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
+		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
+			tg3_flag_clear(tp, TSO_BUG);
+	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
+		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
+		tg3_flag_set(tp, FW_TSO);
+		tg3_flag_set(tp, TSO_BUG);
+		if (tg3_asic_rev(tp) == ASIC_REV_5705)
+			tp->fw_needed = FIRMWARE_TG3TSO5;
+		else
+			tp->fw_needed = FIRMWARE_TG3TSO;
+	}
+
+	/* Selectively allow TSO based on operating conditions */
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3) ||
+	    tg3_flag(tp, FW_TSO)) {
+		/* For firmware TSO, assume ASF is disabled.
+		 * We'll disable TSO later if we discover ASF
+		 * is enabled in tg3_get_eeprom_hw_cfg().
+		 */
+		tg3_flag_set(tp, TSO_CAPABLE);
+	} else {
+		tg3_flag_clear(tp, TSO_CAPABLE);
+		tg3_flag_clear(tp, TSO_BUG);
+		tp->fw_needed = NULL;
+	}
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
+		tp->fw_needed = FIRMWARE_TG3;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_57766)
+		tp->fw_needed = FIRMWARE_TG357766;
+
+	tp->irq_max = 1;
+
+	if (tg3_flag(tp, 5750_PLUS)) {
+		tg3_flag_set(tp, SUPPORT_MSI);
+		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
+		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
+		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
+		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
+		     tp->pdev_peer == tp->pdev))
+			tg3_flag_clear(tp, SUPPORT_MSI);
+
+		if (tg3_flag(tp, 5755_PLUS) ||
+		    tg3_asic_rev(tp) == ASIC_REV_5906) {
+			tg3_flag_set(tp, 1SHOT_MSI);
+		}
+
+		if (tg3_flag(tp, 57765_PLUS)) {
+			tg3_flag_set(tp, SUPPORT_MSIX);
+			tp->irq_max = TG3_IRQ_MAX_VECS;
+		}
+	}
+
+	tp->txq_max = 1;
+	tp->rxq_max = 1;
+	if (tp->irq_max > 1) {
+		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
+		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5720)
+			tp->txq_max = tp->irq_max - 1;
+	}
+
+	if (tg3_flag(tp, 5755_PLUS) ||
+	    tg3_asic_rev(tp) == ASIC_REV_5906)
+		tg3_flag_set(tp, SHORT_DMA_BUG);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5719)
+		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762)
+		tg3_flag_set(tp, LRG_PROD_RING_CAP);
+
+	if (tg3_flag(tp, 57765_PLUS) &&
+	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
+		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
+
+	if (!tg3_flag(tp, 5705_PLUS) ||
+	    tg3_flag(tp, 5780_CLASS) ||
+	    tg3_flag(tp, USE_JUMBO_BDFLAG))
+		tg3_flag_set(tp, JUMBO_CAPABLE);
+
+	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+			      &pci_state_reg);
+
+	if (pci_is_pcie(tp->pdev)) {
+		u16 lnkctl;
+
+		tg3_flag_set(tp, PCI_EXPRESS);
+
+		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
+		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
+			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+				tg3_flag_clear(tp, HW_TSO_2);
+				tg3_flag_clear(tp, TSO_CAPABLE);
+			}
+			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
+			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
+			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
+				tg3_flag_set(tp, CLKREQ_BUG);
+		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
+			tg3_flag_set(tp, L1PLLPD_EN);
+		}
+	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
+		/* BCM5785 devices are effectively PCIe devices, and should
+		 * follow PCIe codepaths, but do not have a PCIe capabilities
+		 * section.
+		 */
+		tg3_flag_set(tp, PCI_EXPRESS);
+	} else if (!tg3_flag(tp, 5705_PLUS) ||
+		   tg3_flag(tp, 5780_CLASS)) {
+		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+		if (!tp->pcix_cap) {
+			dev_err(&tp->pdev->dev,
+				"Cannot find PCI-X capability, aborting\n");
+			return -EIO;
+		}
+
+		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
+			tg3_flag_set(tp, PCIX_MODE);
+	}
+
+	/* If we have an AMD 762 or VIA K8T800 chipset, write
+	 * reordering to the mailbox registers done by the host
+	 * controller can cause major troubles.  We read back from
+	 * every mailbox register write to force the writes to be
+	 * posted to the chip in order.
+	 */
+	if (pci_dev_present(tg3_write_reorder_chipsets) &&
+	    !tg3_flag(tp, PCI_EXPRESS))
+		tg3_flag_set(tp, MBOX_WRITE_REORDER);
+
+	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+			     &tp->pci_cacheline_sz);
+	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+			     &tp->pci_lat_timer);
+	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
+	    tp->pci_lat_timer < 64) {
+		tp->pci_lat_timer = 64;
+		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+				      tp->pci_lat_timer);
+	}
+
+	/* Important! -- It is critical that the PCI-X hw workaround
+	 * situation is decided before the first MMIO register access.
+	 */
+	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
+		/* 5700 BX chips need to have their TX producer index
+		 * mailboxes written twice to workaround a bug.
+		 */
+		tg3_flag_set(tp, TXD_MBOX_HWBUG);
+
+		/* If we are in PCI-X mode, enable register write workaround.
+		 *
+		 * The workaround is to use indirect register accesses
+		 * for all chip writes not to mailbox registers.
+		 */
+		if (tg3_flag(tp, PCIX_MODE)) {
+			u32 pm_reg;
+
+			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+
+			/* The chip can have it's power management PCI config
+			 * space registers clobbered due to this bug.
+			 * So explicitly force the chip into D0 here.
+			 */
+			pci_read_config_dword(tp->pdev,
+					      tp->pdev->pm_cap + PCI_PM_CTRL,
+					      &pm_reg);
+			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
+			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
+			pci_write_config_dword(tp->pdev,
+					       tp->pdev->pm_cap + PCI_PM_CTRL,
+					       pm_reg);
+
+			/* Also, force SERR#/PERR# in PCI command. */
+			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+		}
+	}
+
+	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
+		tg3_flag_set(tp, PCI_HIGH_SPEED);
+	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
+		tg3_flag_set(tp, PCI_32BIT);
+
+	/* Chip-specific fixup from Broadcom driver */
+	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
+	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
+		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
+		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
+	}
+
+	/* Default fast path register access methods */
+	tp->read32 = tg3_read32;
+	tp->write32 = tg3_write32;
+	tp->read32_mbox = tg3_read32;
+	tp->write32_mbox = tg3_write32;
+	tp->write32_tx_mbox = tg3_write32;
+	tp->write32_rx_mbox = tg3_write32;
+
+	/* Various workaround register access methods */
+	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
+		tp->write32 = tg3_write_indirect_reg32;
+	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
+		 (tg3_flag(tp, PCI_EXPRESS) &&
+		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
+		/*
+		 * Back to back register writes can cause problems on these
+		 * chips, the workaround is to read back all reg writes
+		 * except those to mailbox regs.
+		 *
+		 * See tg3_write_indirect_reg32().
+		 */
+		tp->write32 = tg3_write_flush_reg32;
+	}
+
+	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
+		tp->write32_tx_mbox = tg3_write32_tx_mbox;
+		if (tg3_flag(tp, MBOX_WRITE_REORDER))
+			tp->write32_rx_mbox = tg3_write_flush_reg32;
+	}
+
+	if (tg3_flag(tp, ICH_WORKAROUND)) {
+		tp->read32 = tg3_read_indirect_reg32;
+		tp->write32 = tg3_write_indirect_reg32;
+		tp->read32_mbox = tg3_read_indirect_mbox;
+		tp->write32_mbox = tg3_write_indirect_mbox;
+		tp->write32_tx_mbox = tg3_write_indirect_mbox;
+		tp->write32_rx_mbox = tg3_write_indirect_mbox;
+
+		iounmap(tp->regs);
+		tp->regs = NULL;
+
+		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+		pci_cmd &= ~PCI_COMMAND_MEMORY;
+		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+	}
+	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+		tp->read32_mbox = tg3_read32_mbox_5906;
+		tp->write32_mbox = tg3_write32_mbox_5906;
+		tp->write32_tx_mbox = tg3_write32_mbox_5906;
+		tp->write32_rx_mbox = tg3_write32_mbox_5906;
+	}
+
+	if (tp->write32 == tg3_write_indirect_reg32 ||
+	    (tg3_flag(tp, PCIX_MODE) &&
+	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	      tg3_asic_rev(tp) == ASIC_REV_5701)))
+		tg3_flag_set(tp, SRAM_USE_CONFIG);
+
+	/* The memory arbiter has to be enabled in order for SRAM accesses
+	 * to succeed.  Normally on powerup the tg3 chip firmware will make
+	 * sure it is enabled, but other entities such as system netboot
+	 * code might disable it.
+	 */
+	val = tr32(MEMARB_MODE);
+	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+	    tg3_flag(tp, 5780_CLASS)) {
+		if (tg3_flag(tp, PCIX_MODE)) {
+			pci_read_config_dword(tp->pdev,
+					      tp->pcix_cap + PCI_X_STATUS,
+					      &val);
+			tp->pci_fn = val & 0x7;
+		}
+	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
+		   tg3_asic_rev(tp) == ASIC_REV_5720) {
+		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
+		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
+			val = tr32(TG3_CPMU_STATUS);
+
+		if (tg3_asic_rev(tp) == ASIC_REV_5717)
+			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
+		else
+			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
+				     TG3_CPMU_STATUS_FSHFT_5719;
+	}
+
+	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+		tp->write32_tx_mbox = tg3_write_flush_reg32;
+		tp->write32_rx_mbox = tg3_write_flush_reg32;
+	}
+
+	/* Get eeprom hw config before calling tg3_set_power_state().
+	 * In particular, the TG3_FLAG_IS_NIC flag must be
+	 * determined before calling tg3_set_power_state() so that
+	 * we know whether or not to switch out of Vaux power.
+	 * When the flag is set, it means that GPIO1 is used for eeprom
+	 * write protect and also implies that it is a LOM where GPIOs
+	 * are not used to switch power.
+	 */
+	tg3_get_eeprom_hw_cfg(tp);
+
+	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
+		tg3_flag_clear(tp, TSO_CAPABLE);
+		tg3_flag_clear(tp, TSO_BUG);
+		tp->fw_needed = NULL;
+	}
+
+	if (tg3_flag(tp, ENABLE_APE)) {
+		/* Allow reads and writes to the
+		 * APE register and memory space.
+		 */
+		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+				 PCISTATE_ALLOW_APE_SHMEM_WR |
+				 PCISTATE_ALLOW_APE_PSPACE_WR;
+		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
+				       pci_state_reg);
+
+		tg3_ape_lock_init(tp);
+	}
+
+	/* Set up tp->grc_local_ctrl before calling
+	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
+	 * will bring 5700's external PHY out of reset.
+	 * It is also used as eeprom write protect on LOMs.
+	 */
+	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    tg3_flag(tp, EEPROM_WRITE_PROT))
+		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+				       GRC_LCLCTRL_GPIO_OUTPUT1);
+	/* Unused GPIO3 must be driven as output on 5752 because there
+	 * are no pull-up resistors on unused GPIO pins.
+	 */
+	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
+		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
+	    tg3_flag(tp, 57765_CLASS))
+		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+		/* Turn off the debug UART. */
+		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+		if (tg3_flag(tp, IS_NIC))
+			/* Keep VMain power. */
+			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+					      GRC_LCLCTRL_GPIO_OUTPUT0;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5762)
+		tp->grc_local_ctrl |=
+			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
+
+	/* Switch out of Vaux if it is a NIC */
+	tg3_pwrsrc_switch_to_vmain(tp);
+
+	/* Derive initial jumbo mode from MTU assigned in
+	 * ether_setup() via the alloc_etherdev() call
+	 */
+	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
+		tg3_flag_set(tp, JUMBO_RING_ENABLE);
+
+	/* Determine WakeOnLan speed to use. */
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
+		tg3_flag_clear(tp, WOL_SPEED_100MB);
+	} else {
+		tg3_flag_set(tp, WOL_SPEED_100MB);
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5906)
+		tp->phy_flags |= TG3_PHYFLG_IS_FET;
+
+	/* A few boards don't want Ethernet@WireSpeed phy feature */
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
+	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
+	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
+
+	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
+	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
+		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
+		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
+
+	if (tg3_flag(tp, 5705_PLUS) &&
+	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
+	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
+	    !tg3_flag(tp, 57765_PLUS)) {
+		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5761) {
+			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
+			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
+				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
+			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
+				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
+		} else
+			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
+		tp->phy_otp = tg3_read_otp_phycfg(tp);
+		if (tp->phy_otp == 0)
+			tp->phy_otp = TG3_OTP_DEFAULT;
+	}
+
+	if (tg3_flag(tp, CPMU_PRESENT))
+		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
+	else
+		tp->mi_mode = MAC_MI_MODE_BASE;
+
+	tp->coalesce_mode = 0;
+	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
+	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
+		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
+
+	/* Set these bits to enable statistics workaround. */
+	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
+		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
+		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
+	}
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
+	    tg3_asic_rev(tp) == ASIC_REV_57780)
+		tg3_flag_set(tp, USE_PHYLIB);
+
+	err = tg3_mdio_init(tp);
+	if (err)
+		return err;
+
+	/* Initialize data/descriptor byte/word swapping. */
+	val = tr32(GRC_MODE);
+	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762)
+		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
+			GRC_MODE_WORD_SWAP_B2HRX_DATA |
+			GRC_MODE_B2HRX_ENABLE |
+			GRC_MODE_HTX2B_ENABLE |
+			GRC_MODE_HOST_STACKUP);
+	else
+		val &= GRC_MODE_HOST_STACKUP;
+
+	tw32(GRC_MODE, val | tp->grc_mode);
+
+	tg3_switch_clocks(tp);
+
+	/* Clear this out for sanity. */
+	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
+	tw32(TG3PCI_REG_BASE_ADDR, 0);
+
+	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+			      &pci_state_reg);
+	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
+		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
+		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
+			void __iomem *sram_base;
+
+			/* Write some dummy words into the SRAM status block
+			 * area, see if it reads back correctly.  If the return
+			 * value is bad, force enable the PCIX workaround.
+			 */
+			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
+
+			writel(0x00000000, sram_base);
+			writel(0x00000000, sram_base + 4);
+			writel(0xffffffff, sram_base + 4);
+			if (readl(sram_base) != 0x00000000)
+				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+		}
+	}
+
+	udelay(50);
+	tg3_nvram_init(tp);
+
+	/* If the device has an NVRAM, no need to load patch firmware */
+	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
+	    !tg3_flag(tp, NO_NVRAM))
+		tp->fw_needed = NULL;
+
+	grc_misc_cfg = tr32(GRC_MISC_CFG);
+	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
+	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
+		tg3_flag_set(tp, IS_5788);
+
+	if (!tg3_flag(tp, IS_5788) &&
+	    tg3_asic_rev(tp) != ASIC_REV_5700)
+		tg3_flag_set(tp, TAGGED_STATUS);
+	if (tg3_flag(tp, TAGGED_STATUS)) {
+		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
+				      HOSTCC_MODE_CLRTICK_TXBD);
+
+		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
+		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+				       tp->misc_host_ctrl);
+	}
+
+	/* Preserve the APE MAC_MODE bits */
+	if (tg3_flag(tp, ENABLE_APE))
+		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+	else
+		tp->mac_mode = 0;
+
+	if (tg3_10_100_only_device(tp, ent))
+		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
+
+	err = tg3_phy_probe(tp);
+	if (err) {
+		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
+		/* ... but do not return immediately ... */
+		tg3_mdio_fini(tp);
+	}
+
+	tg3_read_vpd(tp);
+	tg3_read_fw_ver(tp);
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+	} else {
+		if (tg3_asic_rev(tp) == ASIC_REV_5700)
+			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+		else
+			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+	}
+
+	/* 5700 {AX,BX} chips have a broken status block link
+	 * change bit implementation, so we must use the
+	 * status register in those cases.
+	 */
+	if (tg3_asic_rev(tp) == ASIC_REV_5700)
+		tg3_flag_set(tp, USE_LINKCHG_REG);
+	else
+		tg3_flag_clear(tp, USE_LINKCHG_REG);
+
+	/* The led_ctrl is set during tg3_phy_probe, here we might
+	 * have to force the link status polling mechanism based
+	 * upon subsystem IDs.
+	 */
+	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
+	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+		tg3_flag_set(tp, USE_LINKCHG_REG);
+	}
+
+	/* For all SERDES we poll the MAC status register. */
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+		tg3_flag_set(tp, POLL_SERDES);
+	else
+		tg3_flag_clear(tp, POLL_SERDES);
+
+	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+		tg3_flag_set(tp, POLL_CPMU_LINK);
+
+	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
+	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
+	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
+	    tg3_flag(tp, PCIX_MODE)) {
+		tp->rx_offset = NET_SKB_PAD;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+		tp->rx_copy_thresh = ~(u16)0;
+#endif
+	}
+
+	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
+	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
+	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
+
+	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
+
+	/* Increment the rx prod index on the rx std ring by at most
+	 * 8 for these chips to workaround hw errata.
+	 */
+	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5755)
+		tp->rx_std_max_post = 8;
+
+	if (tg3_flag(tp, ASPM_WORKAROUND))
+		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
+				     PCIE_PWR_MGMT_L1_THRESH_MSK;
+
+	return err;
+}
+
+#ifdef CONFIG_SPARC
+static int tg3_get_macaddr_sparc(struct tg3 *tp)
+{
+	struct net_device *dev = tp->dev;
+	struct pci_dev *pdev = tp->pdev;
+	struct device_node *dp = pci_device_to_OF_node(pdev);
+	const unsigned char *addr;
+	int len;
+
+	addr = of_get_property(dp, "local-mac-address", &len);
+	if (addr && len == ETH_ALEN) {
+		memcpy(dev->dev_addr, addr, ETH_ALEN);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
+{
+	struct net_device *dev = tp->dev;
+
+	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+	return 0;
+}
+#endif
+
+static int tg3_get_device_address(struct tg3 *tp)
+{
+	struct net_device *dev = tp->dev;
+	u32 hi, lo, mac_offset;
+	int addr_ok = 0;
+	int err;
+
+#ifdef CONFIG_SPARC
+	if (!tg3_get_macaddr_sparc(tp))
+		return 0;
+#endif
+
+	if (tg3_flag(tp, IS_SSB_CORE)) {
+		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
+		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+			return 0;
+	}
+
+	mac_offset = 0x7c;
+	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+	    tg3_flag(tp, 5780_CLASS)) {
+		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+			mac_offset = 0xcc;
+		if (tg3_nvram_lock(tp))
+			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
+		else
+			tg3_nvram_unlock(tp);
+	} else if (tg3_flag(tp, 5717_PLUS)) {
+		if (tp->pci_fn & 1)
+			mac_offset = 0xcc;
+		if (tp->pci_fn > 1)
+			mac_offset += 0x18c;
+	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
+		mac_offset = 0x10;
+
+	/* First try to get it from MAC address mailbox. */
+	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
+	if ((hi >> 16) == 0x484b) {
+		dev->dev_addr[0] = (hi >>  8) & 0xff;
+		dev->dev_addr[1] = (hi >>  0) & 0xff;
+
+		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
+		dev->dev_addr[2] = (lo >> 24) & 0xff;
+		dev->dev_addr[3] = (lo >> 16) & 0xff;
+		dev->dev_addr[4] = (lo >>  8) & 0xff;
+		dev->dev_addr[5] = (lo >>  0) & 0xff;
+
+		/* Some old bootcode may report a 0 MAC address in SRAM */
+		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+	}
+	if (!addr_ok) {
+		/* Next, try NVRAM. */
+		if (!tg3_flag(tp, NO_NVRAM) &&
+		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
+		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
+			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
+			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+		}
+		/* Finally just fetch it out of the MAC control regs. */
+		else {
+			hi = tr32(MAC_ADDR_0_HIGH);
+			lo = tr32(MAC_ADDR_0_LOW);
+
+			dev->dev_addr[5] = lo & 0xff;
+			dev->dev_addr[4] = (lo >> 8) & 0xff;
+			dev->dev_addr[3] = (lo >> 16) & 0xff;
+			dev->dev_addr[2] = (lo >> 24) & 0xff;
+			dev->dev_addr[1] = hi & 0xff;
+			dev->dev_addr[0] = (hi >> 8) & 0xff;
+		}
+	}
+
+	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+#ifdef CONFIG_SPARC
+		if (!tg3_get_default_macaddr_sparc(tp))
+			return 0;
+#endif
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define BOUNDARY_SINGLE_CACHELINE	1
+#define BOUNDARY_MULTI_CACHELINE	2
+
+static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+{
+	int cacheline_size;
+	u8 byte;
+	int goal;
+
+	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+	if (byte == 0)
+		cacheline_size = 1024;
+	else
+		cacheline_size = (int) byte * 4;
+
+	/* On 5703 and later chips, the boundary bits have no
+	 * effect.
+	 */
+	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
+	    !tg3_flag(tp, PCI_EXPRESS))
+		goto out;
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+	goal = BOUNDARY_MULTI_CACHELINE;
+#else
+#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
+	goal = BOUNDARY_SINGLE_CACHELINE;
+#else
+	goal = 0;
+#endif
+#endif
+
+	if (tg3_flag(tp, 57765_PLUS)) {
+		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+		goto out;
+	}
+
+	if (!goal)
+		goto out;
+
+	/* PCI controllers on most RISC systems tend to disconnect
+	 * when a device tries to burst across a cache-line boundary.
+	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
+	 *
+	 * Unfortunately, for PCI-E there are only limited
+	 * write-side controls for this, and thus for reads
+	 * we will still get the disconnects.  We'll also waste
+	 * these PCI cycles for both read and write for chips
+	 * other than 5700 and 5701 which do not implement the
+	 * boundary bits.
+	 */
+	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
+		switch (cacheline_size) {
+		case 16:
+		case 32:
+		case 64:
+		case 128:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
+					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
+			} else {
+				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+			}
+			break;
+
+		case 256:
+			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
+				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
+			break;
+
+		default:
+			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+			break;
+		}
+	} else if (tg3_flag(tp, PCI_EXPRESS)) {
+		switch (cacheline_size) {
+		case 16:
+		case 32:
+		case 64:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
+				break;
+			}
+			/* fallthrough */
+		case 128:
+		default:
+			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+			break;
+		}
+	} else {
+		switch (cacheline_size) {
+		case 16:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_16 |
+					DMA_RWCTRL_WRITE_BNDRY_16);
+				break;
+			}
+			/* fallthrough */
+		case 32:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_32 |
+					DMA_RWCTRL_WRITE_BNDRY_32);
+				break;
+			}
+			/* fallthrough */
+		case 64:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_64 |
+					DMA_RWCTRL_WRITE_BNDRY_64);
+				break;
+			}
+			/* fallthrough */
+		case 128:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_128 |
+					DMA_RWCTRL_WRITE_BNDRY_128);
+				break;
+			}
+			/* fallthrough */
+		case 256:
+			val |= (DMA_RWCTRL_READ_BNDRY_256 |
+				DMA_RWCTRL_WRITE_BNDRY_256);
+			break;
+		case 512:
+			val |= (DMA_RWCTRL_READ_BNDRY_512 |
+				DMA_RWCTRL_WRITE_BNDRY_512);
+			break;
+		case 1024:
+		default:
+			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
+				DMA_RWCTRL_WRITE_BNDRY_1024);
+			break;
+		}
+	}
+
+out:
+	return val;
+}
+
+static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
+			   int size, bool to_device)
+{
+	struct tg3_internal_buffer_desc test_desc;
+	u32 sram_dma_descs;
+	int i, ret;
+
+	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
+
+	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
+	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
+	tw32(RDMAC_STATUS, 0);
+	tw32(WDMAC_STATUS, 0);
+
+	tw32(BUFMGR_MODE, 0);
+	tw32(FTQ_RESET, 0);
+
+	test_desc.addr_hi = ((u64) buf_dma) >> 32;
+	test_desc.addr_lo = buf_dma & 0xffffffff;
+	test_desc.nic_mbuf = 0x00002100;
+	test_desc.len = size;
+
+	/*
+	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
+	 * the *second* time the tg3 driver was getting loaded after an
+	 * initial scan.
+	 *
+	 * Broadcom tells me:
+	 *   ...the DMA engine is connected to the GRC block and a DMA
+	 *   reset may affect the GRC block in some unpredictable way...
+	 *   The behavior of resets to individual blocks has not been tested.
+	 *
+	 * Broadcom noted the GRC reset will also reset all sub-components.
+	 */
+	if (to_device) {
+		test_desc.cqid_sqid = (13 << 8) | 2;
+
+		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
+		udelay(40);
+	} else {
+		test_desc.cqid_sqid = (16 << 8) | 7;
+
+		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
+		udelay(40);
+	}
+	test_desc.flags = 0x00000005;
+
+	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
+		u32 val;
+
+		val = *(((u32 *)&test_desc) + i);
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
+				       sram_dma_descs + (i * sizeof(u32)));
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+	}
+	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+	if (to_device)
+		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
+	else
+		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
+
+	ret = -ENODEV;
+	for (i = 0; i < 40; i++) {
+		u32 val;
+
+		if (to_device)
+			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
+		else
+			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
+		if ((val & 0xffff) == sram_dma_descs) {
+			ret = 0;
+			break;
+		}
+
+		udelay(100);
+	}
+
+	return ret;
+}
+
+#define TEST_BUFFER_SIZE	0x2000
+
+static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+	{ },
+};
+
+static int tg3_test_dma(struct tg3 *tp)
+{
+	dma_addr_t buf_dma;
+	u32 *buf, saved_dma_rwctrl;
+	int ret = 0;
+
+	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+				 &buf_dma, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto out_nofree;
+	}
+
+	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
+			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
+
+	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
+
+	if (tg3_flag(tp, 57765_PLUS))
+		goto out;
+
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		/* DMA read watermark not used on PCIE */
+		tp->dma_rwctrl |= 0x00180000;
+	} else if (!tg3_flag(tp, PCIX_MODE)) {
+		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5750)
+			tp->dma_rwctrl |= 0x003f0000;
+		else
+			tp->dma_rwctrl |= 0x003f000f;
+	} else {
+		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5704) {
+			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
+			u32 read_water = 0x7;
+
+			/* If the 5704 is behind the EPB bridge, we can
+			 * do the less restrictive ONE_DMA workaround for
+			 * better performance.
+			 */
+			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
+			    tg3_asic_rev(tp) == ASIC_REV_5704)
+				tp->dma_rwctrl |= 0x8000;
+			else if (ccval == 0x6 || ccval == 0x7)
+				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
+
+			if (tg3_asic_rev(tp) == ASIC_REV_5703)
+				read_water = 4;
+			/* Set bit 23 to enable PCIX hw bug fix */
+			tp->dma_rwctrl |=
+				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
+				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
+				(1 << 23);
+		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
+			/* 5780 always in PCIX mode */
+			tp->dma_rwctrl |= 0x00144000;
+		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+			/* 5714 always in PCIX mode */
+			tp->dma_rwctrl |= 0x00148000;
+		} else {
+			tp->dma_rwctrl |= 0x001b000f;
+		}
+	}
+	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
+		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5704)
+		tp->dma_rwctrl &= 0xfffffff0;
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5701) {
+		/* Remove this if it causes problems for some boards. */
+		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
+
+		/* On 5700/5701 chips, we need to set this bit.
+		 * Otherwise the chip will issue cacheline transactions
+		 * to streamable DMA memory with not all the byte
+		 * enables turned on.  This is an error on several
+		 * RISC PCI controllers, in particular sparc64.
+		 *
+		 * On 5703/5704 chips, this bit has been reassigned
+		 * a different meaning.  In particular, it is used
+		 * on those chips to enable a PCI-X workaround.
+		 */
+		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
+	}
+
+	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+
+	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+	    tg3_asic_rev(tp) != ASIC_REV_5701)
+		goto out;
+
+	/* It is best to perform DMA test with maximum write burst size
+	 * to expose the 5700/5701 write DMA bug.
+	 */
+	saved_dma_rwctrl = tp->dma_rwctrl;
+	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+	while (1) {
+		u32 *p = buf, i;
+
+		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
+			p[i] = i;
+
+		/* Send the buffer to the chip. */
+		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
+		if (ret) {
+			dev_err(&tp->pdev->dev,
+				"%s: Buffer write failed. err = %d\n",
+				__func__, ret);
+			break;
+		}
+
+		/* Now read it back. */
+		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
+		if (ret) {
+			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+				"err = %d\n", __func__, ret);
+			break;
+		}
+
+		/* Verify it. */
+		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+			if (p[i] == i)
+				continue;
+
+			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+			    DMA_RWCTRL_WRITE_BNDRY_16) {
+				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+				break;
+			} else {
+				dev_err(&tp->pdev->dev,
+					"%s: Buffer corrupted on read back! "
+					"(%d != %d)\n", __func__, p[i], i);
+				ret = -ENODEV;
+				goto out;
+			}
+		}
+
+		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
+			/* Success. */
+			ret = 0;
+			break;
+		}
+	}
+	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+	    DMA_RWCTRL_WRITE_BNDRY_16) {
+		/* DMA test passed without adjusting DMA boundary,
+		 * now look for chipsets that are known to expose the
+		 * DMA bug without failing the test.
+		 */
+		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
+			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+		} else {
+			/* Safe to use the calculated DMA boundary. */
+			tp->dma_rwctrl = saved_dma_rwctrl;
+		}
+
+		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+	}
+
+out:
+	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
+out_nofree:
+	return ret;
+}
+
+static void tg3_init_bufmgr_config(struct tg3 *tp)
+{
+	if (tg3_flag(tp, 57765_PLUS)) {
+		tp->bufmgr_config.mbuf_read_dma_low_water =
+			DEFAULT_MB_RDMA_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_mac_rx_low_water =
+			DEFAULT_MB_MACRX_LOW_WATER_57765;
+		tp->bufmgr_config.mbuf_high_water =
+			DEFAULT_MB_HIGH_WATER_57765;
+
+		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+			DEFAULT_MB_RDMA_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+		tp->bufmgr_config.mbuf_high_water_jumbo =
+			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+	} else if (tg3_flag(tp, 5705_PLUS)) {
+		tp->bufmgr_config.mbuf_read_dma_low_water =
+			DEFAULT_MB_RDMA_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_mac_rx_low_water =
+			DEFAULT_MB_MACRX_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_high_water =
+			DEFAULT_MB_HIGH_WATER_5705;
+		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
+			tp->bufmgr_config.mbuf_mac_rx_low_water =
+				DEFAULT_MB_MACRX_LOW_WATER_5906;
+			tp->bufmgr_config.mbuf_high_water =
+				DEFAULT_MB_HIGH_WATER_5906;
+		}
+
+		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
+		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
+		tp->bufmgr_config.mbuf_high_water_jumbo =
+			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
+	} else {
+		tp->bufmgr_config.mbuf_read_dma_low_water =
+			DEFAULT_MB_RDMA_LOW_WATER;
+		tp->bufmgr_config.mbuf_mac_rx_low_water =
+			DEFAULT_MB_MACRX_LOW_WATER;
+		tp->bufmgr_config.mbuf_high_water =
+			DEFAULT_MB_HIGH_WATER;
+
+		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
+		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
+		tp->bufmgr_config.mbuf_high_water_jumbo =
+			DEFAULT_MB_HIGH_WATER_JUMBO;
+	}
+
+	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
+	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
+}
+
+static char *tg3_phy_string(struct tg3 *tp)
+{
+	switch (tp->phy_id & TG3_PHY_ID_MASK) {
+	case TG3_PHY_ID_BCM5400:	return "5400";
+	case TG3_PHY_ID_BCM5401:	return "5401";
+	case TG3_PHY_ID_BCM5411:	return "5411";
+	case TG3_PHY_ID_BCM5701:	return "5701";
+	case TG3_PHY_ID_BCM5703:	return "5703";
+	case TG3_PHY_ID_BCM5704:	return "5704";
+	case TG3_PHY_ID_BCM5705:	return "5705";
+	case TG3_PHY_ID_BCM5750:	return "5750";
+	case TG3_PHY_ID_BCM5752:	return "5752";
+	case TG3_PHY_ID_BCM5714:	return "5714";
+	case TG3_PHY_ID_BCM5780:	return "5780";
+	case TG3_PHY_ID_BCM5755:	return "5755";
+	case TG3_PHY_ID_BCM5787:	return "5787";
+	case TG3_PHY_ID_BCM5784:	return "5784";
+	case TG3_PHY_ID_BCM5756:	return "5722/5756";
+	case TG3_PHY_ID_BCM5906:	return "5906";
+	case TG3_PHY_ID_BCM5761:	return "5761";
+	case TG3_PHY_ID_BCM5718C:	return "5718C";
+	case TG3_PHY_ID_BCM5718S:	return "5718S";
+	case TG3_PHY_ID_BCM57765:	return "57765";
+	case TG3_PHY_ID_BCM5719C:	return "5719C";
+	case TG3_PHY_ID_BCM5720C:	return "5720C";
+	case TG3_PHY_ID_BCM5762:	return "5762C";
+	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
+	case 0:			return "serdes";
+	default:		return "unknown";
+	}
+}
+
+static char *tg3_bus_string(struct tg3 *tp, char *str)
+{
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		strcpy(str, "PCI Express");
+		return str;
+	} else if (tg3_flag(tp, PCIX_MODE)) {
+		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
+
+		strcpy(str, "PCIX:");
+
+		if ((clock_ctrl == 7) ||
+		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
+		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
+			strcat(str, "133MHz");
+		else if (clock_ctrl == 0)
+			strcat(str, "33MHz");
+		else if (clock_ctrl == 2)
+			strcat(str, "50MHz");
+		else if (clock_ctrl == 4)
+			strcat(str, "66MHz");
+		else if (clock_ctrl == 6)
+			strcat(str, "100MHz");
+	} else {
+		strcpy(str, "PCI:");
+		if (tg3_flag(tp, PCI_HIGH_SPEED))
+			strcat(str, "66MHz");
+		else
+			strcat(str, "33MHz");
+	}
+	if (tg3_flag(tp, PCI_32BIT))
+		strcat(str, ":32-bit");
+	else
+		strcat(str, ":64-bit");
+	return str;
+}
+
+static void tg3_init_coal(struct tg3 *tp)
+{
+	struct ethtool_coalesce *ec = &tp->coal;
+
+	memset(ec, 0, sizeof(*ec));
+	ec->cmd = ETHTOOL_GCOALESCE;
+	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
+	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
+	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
+	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
+	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
+	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
+	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
+	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
+	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
+
+	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
+				 HOSTCC_MODE_CLRTICK_TXBD)) {
+		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
+		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
+		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
+		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
+	}
+
+	if (tg3_flag(tp, 5705_PLUS)) {
+		ec->rx_coalesce_usecs_irq = 0;
+		ec->tx_coalesce_usecs_irq = 0;
+		ec->stats_block_coalesce_usecs = 0;
+	}
+}
+
+static int tg3_init_one(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	struct net_device *dev;
+	struct tg3 *tp;
+	int i, err;
+	u32 sndmbx, rcvmbx, intmbx;
+	char str[40];
+	u64 dma_mask, persist_dma_mask;
+	netdev_features_t features = 0;
+
+	printk_once(KERN_INFO "%s\n", version);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+		goto err_out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_out_free_res;
+	}
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	tp = netdev_priv(dev);
+	tp->pdev = pdev;
+	tp->dev = dev;
+	tp->rx_mode = TG3_DEF_RX_MODE;
+	tp->tx_mode = TG3_DEF_TX_MODE;
+	tp->irq_sync = 1;
+	tp->pcierr_recovery = false;
+
+	if (tg3_debug > 0)
+		tp->msg_enable = tg3_debug;
+	else
+		tp->msg_enable = TG3_DEF_MSG_ENABLE;
+
+	if (pdev_is_ssb_gige_core(pdev)) {
+		tg3_flag_set(tp, IS_SSB_CORE);
+		if (ssb_gige_must_flush_posted_writes(pdev))
+			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
+		if (ssb_gige_one_dma_at_once(pdev))
+			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
+		if (ssb_gige_have_roboswitch(pdev)) {
+			tg3_flag_set(tp, USE_PHYLIB);
+			tg3_flag_set(tp, ROBOSWITCH);
+		}
+		if (ssb_gige_is_rgmii(pdev))
+			tg3_flag_set(tp, RGMII_MODE);
+	}
+
+	/* The word/byte swap controls here control register access byte
+	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
+	 * setting below.
+	 */
+	tp->misc_host_ctrl =
+		MISC_HOST_CTRL_MASK_PCI_INT |
+		MISC_HOST_CTRL_WORD_SWAP |
+		MISC_HOST_CTRL_INDIR_ACCESS |
+		MISC_HOST_CTRL_PCISTATE_RW;
+
+	/* The NONFRM (non-frame) byte/word swap controls take effect
+	 * on descriptor entries, anything which isn't packet data.
+	 *
+	 * The StrongARM chips on the board (one for tx, one for rx)
+	 * are running in big-endian mode.
+	 */
+	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
+			GRC_MODE_WSWAP_NONFRM_DATA);
+#ifdef __BIG_ENDIAN
+	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
+#endif
+	spin_lock_init(&tp->lock);
+	spin_lock_init(&tp->indirect_lock);
+	INIT_WORK(&tp->reset_task, tg3_reset_task);
+
+	tp->regs = pci_ioremap_bar(pdev, BAR_0);
+	if (!tp->regs) {
+		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+		err = -ENOMEM;
+		goto err_out_free_dev;
+	}
+
+	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
+		tg3_flag_set(tp, ENABLE_APE);
+		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
+		if (!tp->aperegs) {
+			dev_err(&pdev->dev,
+				"Cannot map APE registers, aborting\n");
+			err = -ENOMEM;
+			goto err_out_iounmap;
+		}
+	}
+
+	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
+	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
+
+	dev->ethtool_ops = &tg3_ethtool_ops;
+	dev->watchdog_timeo = TG3_TX_TIMEOUT;
+	dev->netdev_ops = &tg3_netdev_ops;
+	dev->irq = pdev->irq;
+
+	err = tg3_get_invariants(tp, ent);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Problem fetching invariants of chip, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	/* The EPB bridge inside 5714, 5715, and 5780 and any
+	 * device behind the EPB cannot support DMA addresses > 40-bit.
+	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+	 * do DMA address check in tg3_start_xmit().
+	 */
+	if (tg3_flag(tp, IS_5788))
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
+	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+#ifdef CONFIG_HIGHMEM
+		dma_mask = DMA_BIT_MASK(64);
+#endif
+	} else
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+	/* Configure DMA attributes. */
+	if (dma_mask > DMA_BIT_MASK(32)) {
+		err = pci_set_dma_mask(pdev, dma_mask);
+		if (!err) {
+			features |= NETIF_F_HIGHDMA;
+			err = pci_set_consistent_dma_mask(pdev,
+							  persist_dma_mask);
+			if (err < 0) {
+				dev_err(&pdev->dev, "Unable to obtain 64 bit "
+					"DMA for consistent allocations\n");
+				goto err_out_apeunmap;
+			}
+		}
+	}
+	if (err || dma_mask == DMA_BIT_MASK(32)) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_out_apeunmap;
+		}
+	}
+
+	tg3_init_bufmgr_config(tp);
+
+	/* 5700 B0 chips do not support checksumming correctly due
+	 * to hardware bugs.
+	 */
+	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
+		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+		if (tg3_flag(tp, 5755_PLUS))
+			features |= NETIF_F_IPV6_CSUM;
+	}
+
+	/* TSO is on by default on chips that support hardware TSO.
+	 * Firmware TSO on older chips gives lower performance, so it
+	 * is off by default, but can be enabled using ethtool.
+	 */
+	if ((tg3_flag(tp, HW_TSO_1) ||
+	     tg3_flag(tp, HW_TSO_2) ||
+	     tg3_flag(tp, HW_TSO_3)) &&
+	    (features & NETIF_F_IP_CSUM))
+		features |= NETIF_F_TSO;
+	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
+		if (features & NETIF_F_IPV6_CSUM)
+			features |= NETIF_F_TSO6;
+		if (tg3_flag(tp, HW_TSO_3) ||
+		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
+		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
+		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
+		    tg3_asic_rev(tp) == ASIC_REV_57780)
+			features |= NETIF_F_TSO_ECN;
+	}
+
+	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+			 NETIF_F_HW_VLAN_CTAG_RX;
+	dev->vlan_features |= features;
+
+	/*
+	 * Add loopback capability only for a subset of devices that support
+	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
+	 * loopback for the remaining devices.
+	 */
+	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
+	    !tg3_flag(tp, CPMU_PRESENT))
+		/* Add the loopback capability */
+		features |= NETIF_F_LOOPBACK;
+
+	dev->hw_features |= features;
+	dev->priv_flags |= IFF_UNICAST_FLT;
+
+	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
+	    !tg3_flag(tp, TSO_CAPABLE) &&
+	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
+		tg3_flag_set(tp, MAX_RXPEND_64);
+		tp->rx_pending = 63;
+	}
+
+	err = tg3_get_device_address(tp);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Could not obtain valid ethernet address, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+	for (i = 0; i < tp->irq_max; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tnapi->tp = tp;
+		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+		tnapi->int_mbox = intmbx;
+		if (i <= 4)
+			intmbx += 0x8;
+		else
+			intmbx += 0x4;
+
+		tnapi->consmbox = rcvmbx;
+		tnapi->prodmbox = sndmbx;
+
+		if (i)
+			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+		else
+			tnapi->coal_now = HOSTCC_MODE_NOW;
+
+		if (!tg3_flag(tp, SUPPORT_MSIX))
+			break;
+
+		/*
+		 * If we support MSIX, we'll be using RSS.  If we're using
+		 * RSS, the first vector only handles link interrupts and the
+		 * remaining vectors handle rx and tx interrupts.  Reuse the
+		 * mailbox values for the next iteration.  The values we setup
+		 * above are still useful for the single vectored mode.
+		 */
+		if (!i)
+			continue;
+
+		rcvmbx += 0x8;
+
+		if (sndmbx & 0x4)
+			sndmbx -= 0x4;
+		else
+			sndmbx += 0xc;
+	}
+
+	/*
+	 * Reset chip in case UNDI or EFI driver did not shutdown
+	 * DMA self test will enable WDMAC and we'll see (spurious)
+	 * pending DMA on the PCI bus at that point.
+	 */
+	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+		tg3_full_lock(tp, 0);
+		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		tg3_full_unlock(tp);
+	}
+
+	err = tg3_test_dma(tp);
+	if (err) {
+		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	tg3_init_coal(tp);
+
+	pci_set_drvdata(pdev, dev);
+
+	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762)
+		tg3_flag_set(tp, PTP_CAPABLE);
+
+	tg3_timer_init(tp);
+
+	tg3_carrier_off(tp);
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	if (tg3_flag(tp, PTP_CAPABLE)) {
+		tg3_ptp_init(tp);
+		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
+						   &tp->pdev->dev);
+		if (IS_ERR(tp->ptp_clock))
+			tp->ptp_clock = NULL;
+	}
+
+	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+		    tp->board_part_number,
+		    tg3_chip_rev_id(tp),
+		    tg3_bus_string(tp, str),
+		    dev->dev_addr);
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+		struct phy_device *phydev;
+		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
+		netdev_info(dev,
+			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+			    phydev->drv->name, dev_name(&phydev->dev));
+	} else {
+		char *ethtype;
+
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+			ethtype = "10/100Base-TX";
+		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+			ethtype = "1000Base-SX";
+		else
+			ethtype = "10/100/1000Base-T";
+
+		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+			    "(WireSpeed[%d], EEE[%d])\n",
+			    tg3_phy_string(tp), ethtype,
+			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
+			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
+	}
+
+	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+		    (dev->features & NETIF_F_RXCSUM) != 0,
+		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
+		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
+		    tg3_flag(tp, ENABLE_ASF) != 0,
+		    tg3_flag(tp, TSO_CAPABLE) != 0);
+	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+		    tp->dma_rwctrl,
+		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
+
+	pci_save_state(pdev);
+
+	return 0;
+
+err_out_apeunmap:
+	if (tp->aperegs) {
+		iounmap(tp->aperegs);
+		tp->aperegs = NULL;
+	}
+
+err_out_iounmap:
+	if (tp->regs) {
+		iounmap(tp->regs);
+		tp->regs = NULL;
+	}
+
+err_out_free_dev:
+	free_netdev(dev);
+
+err_out_free_res:
+	pci_release_regions(pdev);
+
+err_out_disable_pdev:
+	if (pci_is_enabled(pdev))
+		pci_disable_device(pdev);
+	return err;
+}
+
+static void tg3_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		struct tg3 *tp = netdev_priv(dev);
+
+		tg3_ptp_fini(tp);
+
+		release_firmware(tp->fw);
+
+		tg3_reset_task_cancel(tp);
+
+		if (tg3_flag(tp, USE_PHYLIB)) {
+			tg3_phy_fini(tp);
+			tg3_mdio_fini(tp);
+		}
+
+		unregister_netdev(dev);
+		if (tp->aperegs) {
+			iounmap(tp->aperegs);
+			tp->aperegs = NULL;
+		}
+		if (tp->regs) {
+			iounmap(tp->regs);
+			tp->regs = NULL;
+		}
+		free_netdev(dev);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+	}
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tg3_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(dev);
+	int err = 0;
+
+	rtnl_lock();
+
+	if (!netif_running(dev))
+		goto unlock;
+
+	tg3_reset_task_cancel(tp);
+	tg3_phy_stop(tp);
+	tg3_netif_stop(tp);
+
+	tg3_timer_stop(tp);
+
+	tg3_full_lock(tp, 1);
+	tg3_disable_ints(tp);
+	tg3_full_unlock(tp);
+
+	netif_device_detach(dev);
+
+	tg3_full_lock(tp, 0);
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	tg3_flag_clear(tp, INIT_COMPLETE);
+	tg3_full_unlock(tp);
+
+	err = tg3_power_down_prepare(tp);
+	if (err) {
+		int err2;
+
+		tg3_full_lock(tp, 0);
+
+		tg3_flag_set(tp, INIT_COMPLETE);
+		err2 = tg3_restart_hw(tp, true);
+		if (err2)
+			goto out;
+
+		tg3_timer_start(tp);
+
+		netif_device_attach(dev);
+		tg3_netif_start(tp);
+
+out:
+		tg3_full_unlock(tp);
+
+		if (!err2)
+			tg3_phy_start(tp);
+	}
+
+unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static int tg3_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(dev);
+	int err = 0;
+
+	rtnl_lock();
+
+	if (!netif_running(dev))
+		goto unlock;
+
+	netif_device_attach(dev);
+
+	tg3_full_lock(tp, 0);
+
+	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
+
+	tg3_flag_set(tp, INIT_COMPLETE);
+	err = tg3_restart_hw(tp,
+			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
+	if (err)
+		goto out;
+
+	tg3_timer_start(tp);
+
+	tg3_netif_start(tp);
+
+out:
+	tg3_full_unlock(tp);
+
+	if (!err)
+		tg3_phy_start(tp);
+
+unlock:
+	rtnl_unlock();
+	return err;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+
+static void tg3_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(dev);
+
+	rtnl_lock();
+	netif_device_detach(dev);
+
+	if (netif_running(dev))
+		dev_close(dev);
+
+	if (system_state == SYSTEM_POWER_OFF)
+		tg3_power_down(tp);
+
+	rtnl_unlock();
+}
+
+/**
+ * tg3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(netdev);
+	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
+
+	netdev_info(netdev, "PCI I/O error detected\n");
+
+	rtnl_lock();
+
+	/* We probably don't have netdev yet */
+	if (!netdev || !netif_running(netdev))
+		goto done;
+
+	/* We needn't recover from permanent error */
+	if (state == pci_channel_io_frozen)
+		tp->pcierr_recovery = true;
+
+	tg3_phy_stop(tp);
+
+	tg3_netif_stop(tp);
+
+	tg3_timer_stop(tp);
+
+	/* Want to make sure that the reset task doesn't run */
+	tg3_reset_task_cancel(tp);
+
+	netif_device_detach(netdev);
+
+	/* Clean up software state, even if MMIO is blocked */
+	tg3_full_lock(tp, 0);
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+	tg3_full_unlock(tp);
+
+done:
+	if (state == pci_channel_io_perm_failure) {
+		if (netdev) {
+			tg3_napi_enable(tp);
+			dev_close(netdev);
+		}
+		err = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_disable_device(pdev);
+	}
+
+	rtnl_unlock();
+
+	return err;
+}
+
+/**
+ * tg3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(netdev);
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+	int err;
+
+	rtnl_lock();
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		goto done;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+
+	if (!netdev || !netif_running(netdev)) {
+		rc = PCI_ERS_RESULT_RECOVERED;
+		goto done;
+	}
+
+	err = tg3_power_up(tp);
+	if (err)
+		goto done;
+
+	rc = PCI_ERS_RESULT_RECOVERED;
+
+done:
+	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+		tg3_napi_enable(tp);
+		dev_close(netdev);
+	}
+	rtnl_unlock();
+
+	return rc;
+}
+
+/**
+ * tg3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void tg3_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(netdev);
+	int err;
+
+	rtnl_lock();
+
+	if (!netdev || !netif_running(netdev))
+		goto done;
+
+	tg3_full_lock(tp, 0);
+	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
+	tg3_flag_set(tp, INIT_COMPLETE);
+	err = tg3_restart_hw(tp, true);
+	if (err) {
+		tg3_full_unlock(tp);
+		netdev_err(netdev, "Cannot restart hardware after reset.\n");
+		goto done;
+	}
+
+	netif_device_attach(netdev);
+
+	tg3_timer_start(tp);
+
+	tg3_netif_start(tp);
+
+	tg3_full_unlock(tp);
+
+	tg3_phy_start(tp);
+
+done:
+	tp->pcierr_recovery = false;
+	rtnl_unlock();
+}
+
+static const struct pci_error_handlers tg3_err_handler = {
+	.error_detected	= tg3_io_error_detected,
+	.slot_reset	= tg3_io_slot_reset,
+	.resume		= tg3_io_resume
+};
+
+static struct pci_driver tg3_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= tg3_pci_tbl,
+	.probe		= tg3_init_one,
+	.remove		= tg3_remove_one,
+	.err_handler	= &tg3_err_handler,
+	.driver.pm	= &tg3_pm_ops,
+	.shutdown	= tg3_shutdown,
+};
+
+module_pci_driver(tg3_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
new file mode 100644
index 0000000..31c9f82
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -0,0 +1,3427 @@
+/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $
+ * tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2014 Broadcom Corporation.
+ */
+
+#ifndef _T3_H
+#define _T3_H
+
+#define TG3_64BIT_REG_HIGH		0x00UL
+#define TG3_64BIT_REG_LOW		0x04UL
+
+/* Descriptor block info. */
+#define TG3_BDINFO_HOST_ADDR		0x0UL /* 64-bit */
+#define TG3_BDINFO_MAXLEN_FLAGS		0x8UL /* 32-bit */
+#define  BDINFO_FLAGS_USE_EXT_RECV	 0x00000001 /* ext rx_buffer_desc */
+#define  BDINFO_FLAGS_DISABLED		 0x00000002
+#define  BDINFO_FLAGS_MAXLEN_MASK	 0xffff0000
+#define  BDINFO_FLAGS_MAXLEN_SHIFT	 16
+#define TG3_BDINFO_NIC_ADDR		0xcUL /* 32-bit */
+#define TG3_BDINFO_SIZE			0x10UL
+
+#define TG3_RX_STD_MAX_SIZE_5700	512
+#define TG3_RX_STD_MAX_SIZE_5717	2048
+#define TG3_RX_JMB_MAX_SIZE_5700	256
+#define TG3_RX_JMB_MAX_SIZE_5717	1024
+#define TG3_RX_RET_MAX_SIZE_5700	1024
+#define TG3_RX_RET_MAX_SIZE_5705	512
+#define TG3_RX_RET_MAX_SIZE_5717	4096
+
+#define TG3_RSS_INDIR_TBL_SIZE		128
+
+/* First 256 bytes are a mirror of PCI config space. */
+#define TG3PCI_VENDOR			0x00000000
+#define  TG3PCI_VENDOR_BROADCOM		 0x14e4
+#define TG3PCI_DEVICE			0x00000002
+#define  TG3PCI_DEVICE_TIGON3_1		 0x1644 /* BCM5700 */
+#define  TG3PCI_DEVICE_TIGON3_2		 0x1645 /* BCM5701 */
+#define  TG3PCI_DEVICE_TIGON3_3		 0x1646 /* BCM5702 */
+#define  TG3PCI_DEVICE_TIGON3_4		 0x1647 /* BCM5703 */
+#define  TG3PCI_DEVICE_TIGON3_5761S	 0x1688
+#define  TG3PCI_DEVICE_TIGON3_5761SE	 0x1689
+#define  TG3PCI_DEVICE_TIGON3_57780	 0x1692
+#define  TG3PCI_DEVICE_TIGON3_5787M	 0x1693
+#define  TG3PCI_DEVICE_TIGON3_57760	 0x1690
+#define  TG3PCI_DEVICE_TIGON3_57790	 0x1694
+#define  TG3PCI_DEVICE_TIGON3_57788	 0x1691
+#define  TG3PCI_DEVICE_TIGON3_5785_G	 0x1699 /* GPHY */
+#define  TG3PCI_DEVICE_TIGON3_5785_F	 0x16a0 /* 10/100 only */
+#define  TG3PCI_DEVICE_TIGON3_5717	 0x1655
+#define  TG3PCI_DEVICE_TIGON3_5717_C	 0x1665
+#define  TG3PCI_DEVICE_TIGON3_5718	 0x1656
+#define  TG3PCI_DEVICE_TIGON3_57781	 0x16b1
+#define  TG3PCI_DEVICE_TIGON3_57785	 0x16b5
+#define  TG3PCI_DEVICE_TIGON3_57761	 0x16b0
+#define  TG3PCI_DEVICE_TIGON3_57765	 0x16b4
+#define  TG3PCI_DEVICE_TIGON3_57791	 0x16b2
+#define  TG3PCI_DEVICE_TIGON3_57795	 0x16b6
+#define  TG3PCI_DEVICE_TIGON3_5719	 0x1657
+#define  TG3PCI_DEVICE_TIGON3_5720	 0x165f
+#define  TG3PCI_DEVICE_TIGON3_57762	 0x1682
+#define  TG3PCI_DEVICE_TIGON3_57766	 0x1686
+#define  TG3PCI_DEVICE_TIGON3_57786	 0x16b3
+#define  TG3PCI_DEVICE_TIGON3_57782	 0x16b7
+#define  TG3PCI_DEVICE_TIGON3_5762	 0x1687
+#define  TG3PCI_DEVICE_TIGON3_5725	 0x1643
+#define  TG3PCI_DEVICE_TIGON3_5727	 0x16f3
+#define  TG3PCI_DEVICE_TIGON3_57764	 0x1642
+#define  TG3PCI_DEVICE_TIGON3_57767	 0x1683
+#define  TG3PCI_DEVICE_TIGON3_57787	 0x1641
+/* 0x04 --> 0x2c unused */
+#define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5	0x0001
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6	0x0002
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9	0x0003
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1	0x0005
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8	0x0006
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7	0x0007
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10	0x0008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12	0x8008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1	0x0009
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2	0x8009
+#define TG3PCI_SUBVENDOR_ID_3COM		PCI_VENDOR_ID_3COM
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996T		0x1000
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT	0x1006
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX	0x1004
+#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T	0x1007
+#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01	0x1008
+#define TG3PCI_SUBVENDOR_ID_DELL		PCI_VENDOR_ID_DELL
+#define TG3PCI_SUBDEVICE_ID_DELL_VIPER		0x00d1
+#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR		0x0106
+#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT		0x0109
+#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT	0x010a
+#define TG3PCI_SUBVENDOR_ID_COMPAQ		PCI_VENDOR_ID_COMPAQ
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE	0x007c
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2	0x009a
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING	0x007d
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780	0x0085
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2	0x0099
+#define TG3PCI_SUBVENDOR_ID_IBM			PCI_VENDOR_ID_IBM
+#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2	0x0281
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_A	0x0601
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_B	0x0612
+#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M	0x3056
+
+/* 0x30 --> 0x64 unused */
+#define TG3PCI_MSI_DATA			0x00000064
+/* 0x66 --> 0x68 unused */
+#define TG3PCI_MISC_HOST_CTRL		0x00000068
+#define  MISC_HOST_CTRL_CLEAR_INT	 0x00000001
+#define  MISC_HOST_CTRL_MASK_PCI_INT	 0x00000002
+#define  MISC_HOST_CTRL_BYTE_SWAP	 0x00000004
+#define  MISC_HOST_CTRL_WORD_SWAP	 0x00000008
+#define  MISC_HOST_CTRL_PCISTATE_RW	 0x00000010
+#define  MISC_HOST_CTRL_CLKREG_RW	 0x00000020
+#define  MISC_HOST_CTRL_REGWORD_SWAP	 0x00000040
+#define  MISC_HOST_CTRL_INDIR_ACCESS	 0x00000080
+#define  MISC_HOST_CTRL_IRQ_MASK_MODE	 0x00000100
+#define  MISC_HOST_CTRL_TAGGED_STATUS	 0x00000200
+#define  MISC_HOST_CTRL_CHIPREV		 0xffff0000
+#define  MISC_HOST_CTRL_CHIPREV_SHIFT	 16
+
+#define  CHIPREV_ID_5700_A0		 0x7000
+#define  CHIPREV_ID_5700_A1		 0x7001
+#define  CHIPREV_ID_5700_B0		 0x7100
+#define  CHIPREV_ID_5700_B1		 0x7101
+#define  CHIPREV_ID_5700_B3		 0x7102
+#define  CHIPREV_ID_5700_ALTIMA		 0x7104
+#define  CHIPREV_ID_5700_C0		 0x7200
+#define  CHIPREV_ID_5701_A0		 0x0000
+#define  CHIPREV_ID_5701_B0		 0x0100
+#define  CHIPREV_ID_5701_B2		 0x0102
+#define  CHIPREV_ID_5701_B5		 0x0105
+#define  CHIPREV_ID_5703_A0		 0x1000
+#define  CHIPREV_ID_5703_A1		 0x1001
+#define  CHIPREV_ID_5703_A2		 0x1002
+#define  CHIPREV_ID_5703_A3		 0x1003
+#define  CHIPREV_ID_5704_A0		 0x2000
+#define  CHIPREV_ID_5704_A1		 0x2001
+#define  CHIPREV_ID_5704_A2		 0x2002
+#define  CHIPREV_ID_5704_A3		 0x2003
+#define  CHIPREV_ID_5705_A0		 0x3000
+#define  CHIPREV_ID_5705_A1		 0x3001
+#define  CHIPREV_ID_5705_A2		 0x3002
+#define  CHIPREV_ID_5705_A3		 0x3003
+#define  CHIPREV_ID_5750_A0		 0x4000
+#define  CHIPREV_ID_5750_A1		 0x4001
+#define  CHIPREV_ID_5750_A3		 0x4003
+#define  CHIPREV_ID_5750_C2		 0x4202
+#define  CHIPREV_ID_5752_A0_HW		 0x5000
+#define  CHIPREV_ID_5752_A0		 0x6000
+#define  CHIPREV_ID_5752_A1		 0x6001
+#define  CHIPREV_ID_5714_A2		 0x9002
+#define  CHIPREV_ID_5906_A1		 0xc001
+#define  CHIPREV_ID_57780_A0		 0x57780000
+#define  CHIPREV_ID_57780_A1		 0x57780001
+#define  CHIPREV_ID_5717_A0		 0x05717000
+#define  CHIPREV_ID_5717_C0		 0x05717200
+#define  CHIPREV_ID_57765_A0		 0x57785000
+#define  CHIPREV_ID_5719_A0		 0x05719000
+#define  CHIPREV_ID_5720_A0		 0x05720000
+#define  CHIPREV_ID_5762_A0		 0x05762000
+
+#define   ASIC_REV_5700			 0x07
+#define   ASIC_REV_5701			 0x00
+#define   ASIC_REV_5703			 0x01
+#define   ASIC_REV_5704			 0x02
+#define   ASIC_REV_5705			 0x03
+#define   ASIC_REV_5750			 0x04
+#define   ASIC_REV_5752			 0x06
+#define   ASIC_REV_5780			 0x08
+#define   ASIC_REV_5714			 0x09
+#define   ASIC_REV_5755			 0x0a
+#define   ASIC_REV_5787			 0x0b
+#define   ASIC_REV_5906			 0x0c
+#define   ASIC_REV_USE_PROD_ID_REG	 0x0f
+#define   ASIC_REV_5784			 0x5784
+#define   ASIC_REV_5761			 0x5761
+#define   ASIC_REV_5785			 0x5785
+#define   ASIC_REV_57780		 0x57780
+#define   ASIC_REV_5717			 0x5717
+#define   ASIC_REV_57765		 0x57785
+#define   ASIC_REV_5719			 0x5719
+#define   ASIC_REV_5720			 0x5720
+#define   ASIC_REV_57766		 0x57766
+#define   ASIC_REV_5762			 0x5762
+#define   CHIPREV_5700_AX		 0x70
+#define   CHIPREV_5700_BX		 0x71
+#define   CHIPREV_5700_CX		 0x72
+#define   CHIPREV_5701_AX		 0x00
+#define   CHIPREV_5703_AX		 0x10
+#define   CHIPREV_5704_AX		 0x20
+#define   CHIPREV_5704_BX		 0x21
+#define   CHIPREV_5750_AX		 0x40
+#define   CHIPREV_5750_BX		 0x41
+#define   CHIPREV_5784_AX		 0x57840
+#define   CHIPREV_5761_AX		 0x57610
+#define   CHIPREV_57765_AX		 0x577650
+#define   METAL_REV_A0			 0x00
+#define   METAL_REV_A1			 0x01
+#define   METAL_REV_B0			 0x00
+#define   METAL_REV_B1			 0x01
+#define   METAL_REV_B2			 0x02
+#define TG3PCI_DMA_RW_CTRL		0x0000006c
+#define  DMA_RWCTRL_DIS_CACHE_ALIGNMENT  0x00000001
+#define  DMA_RWCTRL_TAGGED_STAT_WA	 0x00000080
+#define  DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
+#define  DMA_RWCTRL_READ_BNDRY_MASK	 0x00000700
+#define  DMA_RWCTRL_READ_BNDRY_DISAB	 0x00000000
+#define  DMA_RWCTRL_READ_BNDRY_16	 0x00000100
+#define  DMA_RWCTRL_READ_BNDRY_128_PCIX	 0x00000100
+#define  DMA_RWCTRL_READ_BNDRY_32	 0x00000200
+#define  DMA_RWCTRL_READ_BNDRY_256_PCIX	 0x00000200
+#define  DMA_RWCTRL_READ_BNDRY_64	 0x00000300
+#define  DMA_RWCTRL_READ_BNDRY_384_PCIX	 0x00000300
+#define  DMA_RWCTRL_READ_BNDRY_128	 0x00000400
+#define  DMA_RWCTRL_READ_BNDRY_256	 0x00000500
+#define  DMA_RWCTRL_READ_BNDRY_512	 0x00000600
+#define  DMA_RWCTRL_READ_BNDRY_1024	 0x00000700
+#define  DMA_RWCTRL_WRITE_BNDRY_MASK	 0x00003800
+#define  DMA_RWCTRL_WRITE_BNDRY_DISAB	 0x00000000
+#define  DMA_RWCTRL_WRITE_BNDRY_16	 0x00000800
+#define  DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800
+#define  DMA_RWCTRL_WRITE_BNDRY_32	 0x00001000
+#define  DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000
+#define  DMA_RWCTRL_WRITE_BNDRY_64	 0x00001800
+#define  DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800
+#define  DMA_RWCTRL_WRITE_BNDRY_128	 0x00002000
+#define  DMA_RWCTRL_WRITE_BNDRY_256	 0x00002800
+#define  DMA_RWCTRL_WRITE_BNDRY_512	 0x00003000
+#define  DMA_RWCTRL_WRITE_BNDRY_1024	 0x00003800
+#define  DMA_RWCTRL_ONE_DMA		 0x00004000
+#define  DMA_RWCTRL_READ_WATER		 0x00070000
+#define  DMA_RWCTRL_READ_WATER_SHIFT	 16
+#define  DMA_RWCTRL_WRITE_WATER		 0x00380000
+#define  DMA_RWCTRL_WRITE_WATER_SHIFT	 19
+#define  DMA_RWCTRL_USE_MEM_READ_MULT	 0x00400000
+#define  DMA_RWCTRL_ASSERT_ALL_BE	 0x00800000
+#define  DMA_RWCTRL_PCI_READ_CMD	 0x0f000000
+#define  DMA_RWCTRL_PCI_READ_CMD_SHIFT	 24
+#define  DMA_RWCTRL_PCI_WRITE_CMD	 0xf0000000
+#define  DMA_RWCTRL_PCI_WRITE_CMD_SHIFT	 28
+#define  DMA_RWCTRL_WRITE_BNDRY_64_PCIE	 0x10000000
+#define  DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000
+#define  DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000
+#define TG3PCI_PCISTATE			0x00000070
+#define  PCISTATE_FORCE_RESET		 0x00000001
+#define  PCISTATE_INT_NOT_ACTIVE	 0x00000002
+#define  PCISTATE_CONV_PCI_MODE		 0x00000004
+#define  PCISTATE_BUS_SPEED_HIGH	 0x00000008
+#define  PCISTATE_BUS_32BIT		 0x00000010
+#define  PCISTATE_ROM_ENABLE		 0x00000020
+#define  PCISTATE_ROM_RETRY_ENABLE	 0x00000040
+#define  PCISTATE_FLAT_VIEW		 0x00000100
+#define  PCISTATE_RETRY_SAME_DMA	 0x00002000
+#define  PCISTATE_ALLOW_APE_CTLSPC_WR	 0x00010000
+#define  PCISTATE_ALLOW_APE_SHMEM_WR	 0x00020000
+#define  PCISTATE_ALLOW_APE_PSPACE_WR	 0x00040000
+#define TG3PCI_CLOCK_CTRL		0x00000074
+#define  CLOCK_CTRL_CORECLK_DISABLE	 0x00000200
+#define  CLOCK_CTRL_RXCLK_DISABLE	 0x00000400
+#define  CLOCK_CTRL_TXCLK_DISABLE	 0x00000800
+#define  CLOCK_CTRL_ALTCLK		 0x00001000
+#define  CLOCK_CTRL_PWRDOWN_PLL133	 0x00008000
+#define  CLOCK_CTRL_44MHZ_CORE		 0x00040000
+#define  CLOCK_CTRL_625_CORE		 0x00100000
+#define  CLOCK_CTRL_FORCE_CLKRUN	 0x00200000
+#define  CLOCK_CTRL_CLKRUN_OENABLE	 0x00400000
+#define  CLOCK_CTRL_DELAY_PCI_GRANT	 0x80000000
+#define TG3PCI_REG_BASE_ADDR		0x00000078
+#define TG3PCI_MEM_WIN_BASE_ADDR	0x0000007c
+#define TG3PCI_REG_DATA			0x00000080
+#define TG3PCI_MEM_WIN_DATA		0x00000084
+#define TG3PCI_MISC_LOCAL_CTRL		0x00000090
+/* 0x94 --> 0x98 unused */
+#define TG3PCI_STD_RING_PROD_IDX	0x00000098 /* 64-bit */
+#define TG3PCI_RCV_RET_RING_CON_IDX	0x000000a0 /* 64-bit */
+/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DUAL_MAC_CTRL		0x000000b8
+#define  DUAL_MAC_CTRL_CH_MASK		 0x00000003
+#define  DUAL_MAC_CTRL_ID		 0x00000004
+#define TG3PCI_PRODID_ASICREV		0x000000bc
+#define  PROD_ID_ASIC_REV_MASK		 0x0fffffff
+/* 0xc0 --> 0xf4 unused */
+
+#define TG3PCI_GEN2_PRODID_ASICREV	0x000000f4
+#define TG3PCI_GEN15_PRODID_ASICREV	0x000000fc
+/* 0xf8 --> 0x200 unused */
+
+#define TG3_CORR_ERR_STAT		0x00000110
+#define  TG3_CORR_ERR_STAT_CLEAR	0xffffffff
+/* 0x114 --> 0x200 unused */
+
+/* Mailbox registers */
+#define MAILBOX_INTERRUPT_0		0x00000200 /* 64-bit */
+#define MAILBOX_INTERRUPT_1		0x00000208 /* 64-bit */
+#define MAILBOX_INTERRUPT_2		0x00000210 /* 64-bit */
+#define MAILBOX_INTERRUPT_3		0x00000218 /* 64-bit */
+#define MAILBOX_GENERAL_0		0x00000220 /* 64-bit */
+#define MAILBOX_GENERAL_1		0x00000228 /* 64-bit */
+#define MAILBOX_GENERAL_2		0x00000230 /* 64-bit */
+#define MAILBOX_GENERAL_3		0x00000238 /* 64-bit */
+#define MAILBOX_GENERAL_4		0x00000240 /* 64-bit */
+#define MAILBOX_GENERAL_5		0x00000248 /* 64-bit */
+#define MAILBOX_GENERAL_6		0x00000250 /* 64-bit */
+#define MAILBOX_GENERAL_7		0x00000258 /* 64-bit */
+#define MAILBOX_RELOAD_STAT		0x00000260 /* 64-bit */
+#define MAILBOX_RCV_STD_PROD_IDX	0x00000268 /* 64-bit */
+#define TG3_RX_STD_PROD_IDX_REG		(MAILBOX_RCV_STD_PROD_IDX + \
+					 TG3_64BIT_REG_LOW)
+#define MAILBOX_RCV_JUMBO_PROD_IDX	0x00000270 /* 64-bit */
+#define TG3_RX_JMB_PROD_IDX_REG		(MAILBOX_RCV_JUMBO_PROD_IDX + \
+					 TG3_64BIT_REG_LOW)
+#define MAILBOX_RCV_MINI_PROD_IDX	0x00000278 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_0	0x00000280 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_1	0x00000288 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_2	0x00000290 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_3	0x00000298 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_4	0x000002a0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_5	0x000002a8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_6	0x000002b0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_7	0x000002b8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_8	0x000002c0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_9	0x000002c8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_10	0x000002d0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_11	0x000002d8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_12	0x000002e0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_13	0x000002e8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_14	0x000002f0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_15	0x000002f8 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_0	0x00000300 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_1	0x00000308 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_2	0x00000310 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_3	0x00000318 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_4	0x00000320 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_5	0x00000328 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_6	0x00000330 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_7	0x00000338 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_8	0x00000340 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_9	0x00000348 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_10	0x00000350 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_11	0x00000358 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_12	0x00000360 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_13	0x00000368 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_14	0x00000370 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_15	0x00000378 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_0	0x00000380 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_1	0x00000388 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_2	0x00000390 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_3	0x00000398 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_4	0x000003a0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_5	0x000003a8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_6	0x000003b0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_7	0x000003b8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_8	0x000003c0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_9	0x000003c8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_10	0x000003d0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_11	0x000003d8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_12	0x000003e0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_13	0x000003e8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_14	0x000003f0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_15	0x000003f8 /* 64-bit */
+
+/* MAC control registers */
+#define MAC_MODE			0x00000400
+#define  MAC_MODE_RESET			 0x00000001
+#define  MAC_MODE_HALF_DUPLEX		 0x00000002
+#define  MAC_MODE_PORT_MODE_MASK	 0x0000000c
+#define  MAC_MODE_PORT_MODE_TBI		 0x0000000c
+#define  MAC_MODE_PORT_MODE_GMII	 0x00000008
+#define  MAC_MODE_PORT_MODE_MII		 0x00000004
+#define  MAC_MODE_PORT_MODE_NONE	 0x00000000
+#define  MAC_MODE_PORT_INT_LPBACK	 0x00000010
+#define  MAC_MODE_TAGGED_MAC_CTRL	 0x00000080
+#define  MAC_MODE_TX_BURSTING		 0x00000100
+#define  MAC_MODE_MAX_DEFER		 0x00000200
+#define  MAC_MODE_LINK_POLARITY		 0x00000400
+#define  MAC_MODE_RXSTAT_ENABLE		 0x00000800
+#define  MAC_MODE_RXSTAT_CLEAR		 0x00001000
+#define  MAC_MODE_RXSTAT_FLUSH		 0x00002000
+#define  MAC_MODE_TXSTAT_ENABLE		 0x00004000
+#define  MAC_MODE_TXSTAT_CLEAR		 0x00008000
+#define  MAC_MODE_TXSTAT_FLUSH		 0x00010000
+#define  MAC_MODE_SEND_CONFIGS		 0x00020000
+#define  MAC_MODE_MAGIC_PKT_ENABLE	 0x00040000
+#define  MAC_MODE_ACPI_ENABLE		 0x00080000
+#define  MAC_MODE_MIP_ENABLE		 0x00100000
+#define  MAC_MODE_TDE_ENABLE		 0x00200000
+#define  MAC_MODE_RDE_ENABLE		 0x00400000
+#define  MAC_MODE_FHDE_ENABLE		 0x00800000
+#define  MAC_MODE_KEEP_FRAME_IN_WOL	 0x01000000
+#define  MAC_MODE_APE_RX_EN		 0x08000000
+#define  MAC_MODE_APE_TX_EN		 0x10000000
+#define MAC_STATUS			0x00000404
+#define  MAC_STATUS_PCS_SYNCED		 0x00000001
+#define  MAC_STATUS_SIGNAL_DET		 0x00000002
+#define  MAC_STATUS_RCVD_CFG		 0x00000004
+#define  MAC_STATUS_CFG_CHANGED		 0x00000008
+#define  MAC_STATUS_SYNC_CHANGED	 0x00000010
+#define  MAC_STATUS_PORT_DEC_ERR	 0x00000400
+#define  MAC_STATUS_LNKSTATE_CHANGED	 0x00001000
+#define  MAC_STATUS_MI_COMPLETION	 0x00400000
+#define  MAC_STATUS_MI_INTERRUPT	 0x00800000
+#define  MAC_STATUS_AP_ERROR		 0x01000000
+#define  MAC_STATUS_ODI_ERROR		 0x02000000
+#define  MAC_STATUS_RXSTAT_OVERRUN	 0x04000000
+#define  MAC_STATUS_TXSTAT_OVERRUN	 0x08000000
+#define MAC_EVENT			0x00000408
+#define  MAC_EVENT_PORT_DECODE_ERR	 0x00000400
+#define  MAC_EVENT_LNKSTATE_CHANGED	 0x00001000
+#define  MAC_EVENT_MI_COMPLETION	 0x00400000
+#define  MAC_EVENT_MI_INTERRUPT		 0x00800000
+#define  MAC_EVENT_AP_ERROR		 0x01000000
+#define  MAC_EVENT_ODI_ERROR		 0x02000000
+#define  MAC_EVENT_RXSTAT_OVERRUN	 0x04000000
+#define  MAC_EVENT_TXSTAT_OVERRUN	 0x08000000
+#define MAC_LED_CTRL			0x0000040c
+#define  LED_CTRL_LNKLED_OVERRIDE	 0x00000001
+#define  LED_CTRL_1000MBPS_ON		 0x00000002
+#define  LED_CTRL_100MBPS_ON		 0x00000004
+#define  LED_CTRL_10MBPS_ON		 0x00000008
+#define  LED_CTRL_TRAFFIC_OVERRIDE	 0x00000010
+#define  LED_CTRL_TRAFFIC_BLINK		 0x00000020
+#define  LED_CTRL_TRAFFIC_LED		 0x00000040
+#define  LED_CTRL_1000MBPS_STATUS	 0x00000080
+#define  LED_CTRL_100MBPS_STATUS	 0x00000100
+#define  LED_CTRL_10MBPS_STATUS		 0x00000200
+#define  LED_CTRL_TRAFFIC_STATUS	 0x00000400
+#define  LED_CTRL_MODE_MAC		 0x00000000
+#define  LED_CTRL_MODE_PHY_1		 0x00000800
+#define  LED_CTRL_MODE_PHY_2		 0x00001000
+#define  LED_CTRL_MODE_SHASTA_MAC	 0x00002000
+#define  LED_CTRL_MODE_SHARED		 0x00004000
+#define  LED_CTRL_MODE_COMBO		 0x00008000
+#define  LED_CTRL_BLINK_RATE_MASK	 0x7ff80000
+#define  LED_CTRL_BLINK_RATE_SHIFT	 19
+#define  LED_CTRL_BLINK_PER_OVERRIDE	 0x00080000
+#define  LED_CTRL_BLINK_RATE_OVERRIDE	 0x80000000
+#define MAC_ADDR_0_HIGH			0x00000410 /* upper 2 bytes */
+#define MAC_ADDR_0_LOW			0x00000414 /* lower 4 bytes */
+#define MAC_ADDR_1_HIGH			0x00000418 /* upper 2 bytes */
+#define MAC_ADDR_1_LOW			0x0000041c /* lower 4 bytes */
+#define MAC_ADDR_2_HIGH			0x00000420 /* upper 2 bytes */
+#define MAC_ADDR_2_LOW			0x00000424 /* lower 4 bytes */
+#define MAC_ADDR_3_HIGH			0x00000428 /* upper 2 bytes */
+#define MAC_ADDR_3_LOW			0x0000042c /* lower 4 bytes */
+#define MAC_ACPI_MBUF_PTR		0x00000430
+#define MAC_ACPI_LEN_OFFSET		0x00000434
+#define  ACPI_LENOFF_LEN_MASK		 0x0000ffff
+#define  ACPI_LENOFF_LEN_SHIFT		 0
+#define  ACPI_LENOFF_OFF_MASK		 0x0fff0000
+#define  ACPI_LENOFF_OFF_SHIFT		 16
+#define MAC_TX_BACKOFF_SEED		0x00000438
+#define  TX_BACKOFF_SEED_MASK		 0x000003ff
+#define MAC_RX_MTU_SIZE			0x0000043c
+#define  RX_MTU_SIZE_MASK		 0x0000ffff
+#define MAC_PCS_TEST			0x00000440
+#define  PCS_TEST_PATTERN_MASK		 0x000fffff
+#define  PCS_TEST_PATTERN_SHIFT		 0
+#define  PCS_TEST_ENABLE		 0x00100000
+#define MAC_TX_AUTO_NEG			0x00000444
+#define  TX_AUTO_NEG_MASK		 0x0000ffff
+#define  TX_AUTO_NEG_SHIFT		 0
+#define MAC_RX_AUTO_NEG			0x00000448
+#define  RX_AUTO_NEG_MASK		 0x0000ffff
+#define  RX_AUTO_NEG_SHIFT		 0
+#define MAC_MI_COM			0x0000044c
+#define  MI_COM_CMD_MASK		 0x0c000000
+#define  MI_COM_CMD_WRITE		 0x04000000
+#define  MI_COM_CMD_READ		 0x08000000
+#define  MI_COM_READ_FAILED		 0x10000000
+#define  MI_COM_START			 0x20000000
+#define  MI_COM_BUSY			 0x20000000
+#define  MI_COM_PHY_ADDR_MASK		 0x03e00000
+#define  MI_COM_PHY_ADDR_SHIFT		 21
+#define  MI_COM_REG_ADDR_MASK		 0x001f0000
+#define  MI_COM_REG_ADDR_SHIFT		 16
+#define  MI_COM_DATA_MASK		 0x0000ffff
+#define MAC_MI_STAT			0x00000450
+#define  MAC_MI_STAT_LNKSTAT_ATTN_ENAB	 0x00000001
+#define  MAC_MI_STAT_10MBPS_MODE	 0x00000002
+#define MAC_MI_MODE			0x00000454
+#define  MAC_MI_MODE_CLK_10MHZ		 0x00000001
+#define  MAC_MI_MODE_SHORT_PREAMBLE	 0x00000002
+#define  MAC_MI_MODE_AUTO_POLL		 0x00000010
+#define  MAC_MI_MODE_500KHZ_CONST	 0x00008000
+#define  MAC_MI_MODE_BASE		 0x000c0000 /* XXX magic values XXX */
+#define MAC_AUTO_POLL_STATUS		0x00000458
+#define  MAC_AUTO_POLL_ERROR		 0x00000001
+#define MAC_TX_MODE			0x0000045c
+#define  TX_MODE_RESET			 0x00000001
+#define  TX_MODE_ENABLE			 0x00000002
+#define  TX_MODE_FLOW_CTRL_ENABLE	 0x00000010
+#define  TX_MODE_BIG_BCKOFF_ENABLE	 0x00000020
+#define  TX_MODE_LONG_PAUSE_ENABLE	 0x00000040
+#define  TX_MODE_MBUF_LOCKUP_FIX	 0x00000100
+#define  TX_MODE_JMB_FRM_LEN		 0x00400000
+#define  TX_MODE_CNT_DN_MODE		 0x00800000
+#define MAC_TX_STATUS			0x00000460
+#define  TX_STATUS_XOFFED		 0x00000001
+#define  TX_STATUS_SENT_XOFF		 0x00000002
+#define  TX_STATUS_SENT_XON		 0x00000004
+#define  TX_STATUS_LINK_UP		 0x00000008
+#define  TX_STATUS_ODI_UNDERRUN		 0x00000010
+#define  TX_STATUS_ODI_OVERRUN		 0x00000020
+#define MAC_TX_LENGTHS			0x00000464
+#define  TX_LENGTHS_SLOT_TIME_MASK	 0x000000ff
+#define  TX_LENGTHS_SLOT_TIME_SHIFT	 0
+#define  TX_LENGTHS_IPG_MASK		 0x00000f00
+#define  TX_LENGTHS_IPG_SHIFT		 8
+#define  TX_LENGTHS_IPG_CRS_MASK	 0x00003000
+#define  TX_LENGTHS_IPG_CRS_SHIFT	 12
+#define  TX_LENGTHS_JMB_FRM_LEN_MSK	 0x00ff0000
+#define  TX_LENGTHS_CNT_DWN_VAL_MSK	 0xff000000
+#define MAC_RX_MODE			0x00000468
+#define  RX_MODE_RESET			 0x00000001
+#define  RX_MODE_ENABLE			 0x00000002
+#define  RX_MODE_FLOW_CTRL_ENABLE	 0x00000004
+#define  RX_MODE_KEEP_MAC_CTRL		 0x00000008
+#define  RX_MODE_KEEP_PAUSE		 0x00000010
+#define  RX_MODE_ACCEPT_OVERSIZED	 0x00000020
+#define  RX_MODE_ACCEPT_RUNTS		 0x00000040
+#define  RX_MODE_LEN_CHECK		 0x00000080
+#define  RX_MODE_PROMISC		 0x00000100
+#define  RX_MODE_NO_CRC_CHECK		 0x00000200
+#define  RX_MODE_KEEP_VLAN_TAG		 0x00000400
+#define  RX_MODE_RSS_IPV4_HASH_EN	 0x00010000
+#define  RX_MODE_RSS_TCP_IPV4_HASH_EN	 0x00020000
+#define  RX_MODE_RSS_IPV6_HASH_EN	 0x00040000
+#define  RX_MODE_RSS_TCP_IPV6_HASH_EN	 0x00080000
+#define  RX_MODE_RSS_ITBL_HASH_BITS_7	 0x00700000
+#define  RX_MODE_RSS_ENABLE		 0x00800000
+#define  RX_MODE_IPV6_CSUM_ENABLE	 0x01000000
+#define  RX_MODE_IPV4_FRAG_FIX		 0x02000000
+#define MAC_RX_STATUS			0x0000046c
+#define  RX_STATUS_REMOTE_TX_XOFFED	 0x00000001
+#define  RX_STATUS_XOFF_RCVD		 0x00000002
+#define  RX_STATUS_XON_RCVD		 0x00000004
+#define MAC_HASH_REG_0			0x00000470
+#define MAC_HASH_REG_1			0x00000474
+#define MAC_HASH_REG_2			0x00000478
+#define MAC_HASH_REG_3			0x0000047c
+#define MAC_RCV_RULE_0			0x00000480
+#define MAC_RCV_VALUE_0			0x00000484
+#define MAC_RCV_RULE_1			0x00000488
+#define MAC_RCV_VALUE_1			0x0000048c
+#define MAC_RCV_RULE_2			0x00000490
+#define MAC_RCV_VALUE_2			0x00000494
+#define MAC_RCV_RULE_3			0x00000498
+#define MAC_RCV_VALUE_3			0x0000049c
+#define MAC_RCV_RULE_4			0x000004a0
+#define MAC_RCV_VALUE_4			0x000004a4
+#define MAC_RCV_RULE_5			0x000004a8
+#define MAC_RCV_VALUE_5			0x000004ac
+#define MAC_RCV_RULE_6			0x000004b0
+#define MAC_RCV_VALUE_6			0x000004b4
+#define MAC_RCV_RULE_7			0x000004b8
+#define MAC_RCV_VALUE_7			0x000004bc
+#define MAC_RCV_RULE_8			0x000004c0
+#define MAC_RCV_VALUE_8			0x000004c4
+#define MAC_RCV_RULE_9			0x000004c8
+#define MAC_RCV_VALUE_9			0x000004cc
+#define MAC_RCV_RULE_10			0x000004d0
+#define MAC_RCV_VALUE_10		0x000004d4
+#define MAC_RCV_RULE_11			0x000004d8
+#define MAC_RCV_VALUE_11		0x000004dc
+#define MAC_RCV_RULE_12			0x000004e0
+#define MAC_RCV_VALUE_12		0x000004e4
+#define MAC_RCV_RULE_13			0x000004e8
+#define MAC_RCV_VALUE_13		0x000004ec
+#define MAC_RCV_RULE_14			0x000004f0
+#define MAC_RCV_VALUE_14		0x000004f4
+#define MAC_RCV_RULE_15			0x000004f8
+#define MAC_RCV_VALUE_15		0x000004fc
+#define  RCV_RULE_DISABLE_MASK		 0x7fffffff
+#define MAC_RCV_RULE_CFG		0x00000500
+#define  RCV_RULE_CFG_DEFAULT_CLASS	0x00000008
+#define MAC_LOW_WMARK_MAX_RX_FRAME	0x00000504
+/* 0x508 --> 0x520 unused */
+#define MAC_HASHREGU_0			0x00000520
+#define MAC_HASHREGU_1			0x00000524
+#define MAC_HASHREGU_2			0x00000528
+#define MAC_HASHREGU_3			0x0000052c
+#define MAC_EXTADDR_0_HIGH		0x00000530
+#define MAC_EXTADDR_0_LOW		0x00000534
+#define MAC_EXTADDR_1_HIGH		0x00000538
+#define MAC_EXTADDR_1_LOW		0x0000053c
+#define MAC_EXTADDR_2_HIGH		0x00000540
+#define MAC_EXTADDR_2_LOW		0x00000544
+#define MAC_EXTADDR_3_HIGH		0x00000548
+#define MAC_EXTADDR_3_LOW		0x0000054c
+#define MAC_EXTADDR_4_HIGH		0x00000550
+#define MAC_EXTADDR_4_LOW		0x00000554
+#define MAC_EXTADDR_5_HIGH		0x00000558
+#define MAC_EXTADDR_5_LOW		0x0000055c
+#define MAC_EXTADDR_6_HIGH		0x00000560
+#define MAC_EXTADDR_6_LOW		0x00000564
+#define MAC_EXTADDR_7_HIGH		0x00000568
+#define MAC_EXTADDR_7_LOW		0x0000056c
+#define MAC_EXTADDR_8_HIGH		0x00000570
+#define MAC_EXTADDR_8_LOW		0x00000574
+#define MAC_EXTADDR_9_HIGH		0x00000578
+#define MAC_EXTADDR_9_LOW		0x0000057c
+#define MAC_EXTADDR_10_HIGH		0x00000580
+#define MAC_EXTADDR_10_LOW		0x00000584
+#define MAC_EXTADDR_11_HIGH		0x00000588
+#define MAC_EXTADDR_11_LOW		0x0000058c
+#define MAC_SERDES_CFG			0x00000590
+#define  MAC_SERDES_CFG_EDGE_SELECT	 0x00001000
+#define MAC_SERDES_STAT			0x00000594
+/* 0x598 --> 0x5a0 unused */
+#define MAC_PHYCFG1			0x000005a0
+#define  MAC_PHYCFG1_RGMII_INT		 0x00000001
+#define  MAC_PHYCFG1_RXCLK_TO_MASK	 0x00001ff0
+#define  MAC_PHYCFG1_RXCLK_TIMEOUT	 0x00001000
+#define  MAC_PHYCFG1_TXCLK_TO_MASK	 0x01ff0000
+#define  MAC_PHYCFG1_TXCLK_TIMEOUT	 0x01000000
+#define  MAC_PHYCFG1_RGMII_EXT_RX_DEC	 0x02000000
+#define  MAC_PHYCFG1_RGMII_SND_STAT_EN	 0x04000000
+#define  MAC_PHYCFG1_TXC_DRV		 0x20000000
+#define MAC_PHYCFG2			0x000005a4
+#define  MAC_PHYCFG2_INBAND_ENABLE	 0x00000001
+#define  MAC_PHYCFG2_EMODE_MASK_MASK	 0x000001c0
+#define  MAC_PHYCFG2_EMODE_MASK_AC131	 0x000000c0
+#define  MAC_PHYCFG2_EMODE_MASK_50610	 0x00000100
+#define  MAC_PHYCFG2_EMODE_MASK_RT8211	 0x00000000
+#define  MAC_PHYCFG2_EMODE_MASK_RT8201	 0x000001c0
+#define  MAC_PHYCFG2_EMODE_COMP_MASK	 0x00000e00
+#define  MAC_PHYCFG2_EMODE_COMP_AC131	 0x00000600
+#define  MAC_PHYCFG2_EMODE_COMP_50610	 0x00000400
+#define  MAC_PHYCFG2_EMODE_COMP_RT8211	 0x00000800
+#define  MAC_PHYCFG2_EMODE_COMP_RT8201	 0x00000000
+#define  MAC_PHYCFG2_FMODE_MASK_MASK	 0x00007000
+#define  MAC_PHYCFG2_FMODE_MASK_AC131	 0x00006000
+#define  MAC_PHYCFG2_FMODE_MASK_50610	 0x00004000
+#define  MAC_PHYCFG2_FMODE_MASK_RT8211	 0x00000000
+#define  MAC_PHYCFG2_FMODE_MASK_RT8201	 0x00007000
+#define  MAC_PHYCFG2_FMODE_COMP_MASK	 0x00038000
+#define  MAC_PHYCFG2_FMODE_COMP_AC131	 0x00030000
+#define  MAC_PHYCFG2_FMODE_COMP_50610	 0x00008000
+#define  MAC_PHYCFG2_FMODE_COMP_RT8211	 0x00038000
+#define  MAC_PHYCFG2_FMODE_COMP_RT8201	 0x00000000
+#define  MAC_PHYCFG2_GMODE_MASK_MASK	 0x001c0000
+#define  MAC_PHYCFG2_GMODE_MASK_AC131	 0x001c0000
+#define  MAC_PHYCFG2_GMODE_MASK_50610	 0x00100000
+#define  MAC_PHYCFG2_GMODE_MASK_RT8211	 0x00000000
+#define  MAC_PHYCFG2_GMODE_MASK_RT8201	 0x001c0000
+#define  MAC_PHYCFG2_GMODE_COMP_MASK	 0x00e00000
+#define  MAC_PHYCFG2_GMODE_COMP_AC131	 0x00e00000
+#define  MAC_PHYCFG2_GMODE_COMP_50610	 0x00000000
+#define  MAC_PHYCFG2_GMODE_COMP_RT8211	 0x00200000
+#define  MAC_PHYCFG2_GMODE_COMP_RT8201	 0x00000000
+#define  MAC_PHYCFG2_ACT_MASK_MASK	 0x03000000
+#define  MAC_PHYCFG2_ACT_MASK_AC131	 0x03000000
+#define  MAC_PHYCFG2_ACT_MASK_50610	 0x01000000
+#define  MAC_PHYCFG2_ACT_MASK_RT8211	 0x03000000
+#define  MAC_PHYCFG2_ACT_MASK_RT8201	 0x01000000
+#define  MAC_PHYCFG2_ACT_COMP_MASK	 0x0c000000
+#define  MAC_PHYCFG2_ACT_COMP_AC131	 0x00000000
+#define  MAC_PHYCFG2_ACT_COMP_50610	 0x00000000
+#define  MAC_PHYCFG2_ACT_COMP_RT8211	 0x00000000
+#define  MAC_PHYCFG2_ACT_COMP_RT8201	 0x08000000
+#define  MAC_PHYCFG2_QUAL_MASK_MASK	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_AC131	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_50610	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_RT8211	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_RT8201	 0x30000000
+#define  MAC_PHYCFG2_QUAL_COMP_MASK	 0xc0000000
+#define  MAC_PHYCFG2_QUAL_COMP_AC131	 0x00000000
+#define  MAC_PHYCFG2_QUAL_COMP_50610	 0x00000000
+#define  MAC_PHYCFG2_QUAL_COMP_RT8211	 0x00000000
+#define  MAC_PHYCFG2_QUAL_COMP_RT8201	 0x00000000
+#define MAC_PHYCFG2_50610_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_50610 | \
+	 MAC_PHYCFG2_EMODE_COMP_50610 | \
+	 MAC_PHYCFG2_FMODE_MASK_50610 | \
+	 MAC_PHYCFG2_FMODE_COMP_50610 | \
+	 MAC_PHYCFG2_GMODE_MASK_50610 | \
+	 MAC_PHYCFG2_GMODE_COMP_50610 | \
+	 MAC_PHYCFG2_ACT_MASK_50610 | \
+	 MAC_PHYCFG2_ACT_COMP_50610 | \
+	 MAC_PHYCFG2_QUAL_MASK_50610 | \
+	 MAC_PHYCFG2_QUAL_COMP_50610)
+#define MAC_PHYCFG2_AC131_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_AC131 | \
+	 MAC_PHYCFG2_EMODE_COMP_AC131 | \
+	 MAC_PHYCFG2_FMODE_MASK_AC131 | \
+	 MAC_PHYCFG2_FMODE_COMP_AC131 | \
+	 MAC_PHYCFG2_GMODE_MASK_AC131 | \
+	 MAC_PHYCFG2_GMODE_COMP_AC131 | \
+	 MAC_PHYCFG2_ACT_MASK_AC131 | \
+	 MAC_PHYCFG2_ACT_COMP_AC131 | \
+	 MAC_PHYCFG2_QUAL_MASK_AC131 | \
+	 MAC_PHYCFG2_QUAL_COMP_AC131)
+#define MAC_PHYCFG2_RTL8211C_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_RT8211 | \
+	 MAC_PHYCFG2_EMODE_COMP_RT8211 | \
+	 MAC_PHYCFG2_FMODE_MASK_RT8211 | \
+	 MAC_PHYCFG2_FMODE_COMP_RT8211 | \
+	 MAC_PHYCFG2_GMODE_MASK_RT8211 | \
+	 MAC_PHYCFG2_GMODE_COMP_RT8211 | \
+	 MAC_PHYCFG2_ACT_MASK_RT8211 | \
+	 MAC_PHYCFG2_ACT_COMP_RT8211 | \
+	 MAC_PHYCFG2_QUAL_MASK_RT8211 | \
+	 MAC_PHYCFG2_QUAL_COMP_RT8211)
+#define MAC_PHYCFG2_RTL8201E_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_RT8201 | \
+	 MAC_PHYCFG2_EMODE_COMP_RT8201 | \
+	 MAC_PHYCFG2_FMODE_MASK_RT8201 | \
+	 MAC_PHYCFG2_FMODE_COMP_RT8201 | \
+	 MAC_PHYCFG2_GMODE_MASK_RT8201 | \
+	 MAC_PHYCFG2_GMODE_COMP_RT8201 | \
+	 MAC_PHYCFG2_ACT_MASK_RT8201 | \
+	 MAC_PHYCFG2_ACT_COMP_RT8201 | \
+	 MAC_PHYCFG2_QUAL_MASK_RT8201 | \
+	 MAC_PHYCFG2_QUAL_COMP_RT8201)
+#define MAC_EXT_RGMII_MODE		0x000005a8
+#define  MAC_RGMII_MODE_TX_ENABLE	 0x00000001
+#define  MAC_RGMII_MODE_TX_LOWPWR	 0x00000002
+#define  MAC_RGMII_MODE_TX_RESET	 0x00000004
+#define  MAC_RGMII_MODE_RX_INT_B	 0x00000100
+#define  MAC_RGMII_MODE_RX_QUALITY	 0x00000200
+#define  MAC_RGMII_MODE_RX_ACTIVITY	 0x00000400
+#define  MAC_RGMII_MODE_RX_ENG_DET	 0x00000800
+/* 0x5ac --> 0x5b0 unused */
+#define SERDES_RX_CTRL			0x000005b0	/* 5780/5714 only */
+#define  SERDES_RX_SIG_DETECT		 0x00000400
+#define SG_DIG_CTRL			0x000005b0
+#define  SG_DIG_USING_HW_AUTONEG	 0x80000000
+#define  SG_DIG_SOFT_RESET		 0x40000000
+#define  SG_DIG_DISABLE_LINKRDY		 0x20000000
+#define  SG_DIG_CRC16_CLEAR_N		 0x01000000
+#define  SG_DIG_EN10B			 0x00800000
+#define  SG_DIG_CLEAR_STATUS		 0x00400000
+#define  SG_DIG_LOCAL_DUPLEX_STATUS	 0x00200000
+#define  SG_DIG_LOCAL_LINK_STATUS	 0x00100000
+#define  SG_DIG_SPEED_STATUS_MASK	 0x000c0000
+#define  SG_DIG_SPEED_STATUS_SHIFT	 18
+#define  SG_DIG_JUMBO_PACKET_DISABLE	 0x00020000
+#define  SG_DIG_RESTART_AUTONEG		 0x00010000
+#define  SG_DIG_FIBER_MODE		 0x00008000
+#define  SG_DIG_REMOTE_FAULT_MASK	 0x00006000
+#define  SG_DIG_PAUSE_MASK		 0x00001800
+#define  SG_DIG_PAUSE_CAP		 0x00000800
+#define  SG_DIG_ASYM_PAUSE		 0x00001000
+#define  SG_DIG_GBIC_ENABLE		 0x00000400
+#define  SG_DIG_CHECK_END_ENABLE	 0x00000200
+#define  SG_DIG_SGMII_AUTONEG_TIMER	 0x00000100
+#define  SG_DIG_CLOCK_PHASE_SELECT	 0x00000080
+#define  SG_DIG_GMII_INPUT_SELECT	 0x00000040
+#define  SG_DIG_MRADV_CRC16_SELECT	 0x00000020
+#define  SG_DIG_COMMA_DETECT_ENABLE	 0x00000010
+#define  SG_DIG_AUTONEG_TIMER_REDUCE	 0x00000008
+#define  SG_DIG_AUTONEG_LOW_ENABLE	 0x00000004
+#define  SG_DIG_REMOTE_LOOPBACK		 0x00000002
+#define  SG_DIG_LOOPBACK		 0x00000001
+#define  SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \
+			      SG_DIG_LOCAL_DUPLEX_STATUS | \
+			      SG_DIG_LOCAL_LINK_STATUS | \
+			      (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \
+			      SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE)
+#define SG_DIG_STATUS			0x000005b4
+#define  SG_DIG_CRC16_BUS_MASK		 0xffff0000
+#define  SG_DIG_PARTNER_FAULT_MASK	 0x00600000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_ASYM_PAUSE	 0x00100000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_PAUSE_CAPABLE	 0x00080000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_HALF_DUPLEX	 0x00040000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_FULL_DUPLEX	 0x00020000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_NEXT_PAGE	 0x00010000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_AUTONEG_STATE_MASK	 0x00000ff0
+#define  SG_DIG_IS_SERDES		 0x00000100
+#define  SG_DIG_COMMA_DETECTOR		 0x00000008
+#define  SG_DIG_MAC_ACK_STATUS		 0x00000004
+#define  SG_DIG_AUTONEG_COMPLETE	 0x00000002
+#define  SG_DIG_AUTONEG_ERROR		 0x00000001
+#define TG3_TX_TSTAMP_LSB		0x000005c0
+#define TG3_TX_TSTAMP_MSB		0x000005c4
+#define  TG3_TSTAMP_MASK		 0x7fffffffffffffffLL
+/* 0x5c8 --> 0x600 unused */
+#define MAC_TX_MAC_STATE_BASE		0x00000600 /* 16 bytes */
+#define MAC_RX_MAC_STATE_BASE		0x00000610 /* 20 bytes */
+/* 0x624 --> 0x670 unused */
+
+#define MAC_RSS_INDIR_TBL_0		0x00000630
+
+#define MAC_RSS_HASH_KEY_0		0x00000670
+#define MAC_RSS_HASH_KEY_1		0x00000674
+#define MAC_RSS_HASH_KEY_2		0x00000678
+#define MAC_RSS_HASH_KEY_3		0x0000067c
+#define MAC_RSS_HASH_KEY_4		0x00000680
+#define MAC_RSS_HASH_KEY_5		0x00000684
+#define MAC_RSS_HASH_KEY_6		0x00000688
+#define MAC_RSS_HASH_KEY_7		0x0000068c
+#define MAC_RSS_HASH_KEY_8		0x00000690
+#define MAC_RSS_HASH_KEY_9		0x00000694
+/* 0x698 --> 0x6b0 unused */
+
+#define TG3_RX_TSTAMP_LSB		0x000006b0
+#define TG3_RX_TSTAMP_MSB		0x000006b4
+/* 0x6b8 --> 0x6c8 unused */
+
+#define TG3_RX_PTP_CTL			0x000006c8
+#define TG3_RX_PTP_CTL_SYNC_EVNT	0x00000001
+#define TG3_RX_PTP_CTL_DELAY_REQ	0x00000002
+#define TG3_RX_PTP_CTL_PDLAY_REQ	0x00000004
+#define TG3_RX_PTP_CTL_PDLAY_RES	0x00000008
+#define TG3_RX_PTP_CTL_ALL_V1_EVENTS	(TG3_RX_PTP_CTL_SYNC_EVNT | \
+					 TG3_RX_PTP_CTL_DELAY_REQ)
+#define TG3_RX_PTP_CTL_ALL_V2_EVENTS	(TG3_RX_PTP_CTL_SYNC_EVNT | \
+					 TG3_RX_PTP_CTL_DELAY_REQ | \
+					 TG3_RX_PTP_CTL_PDLAY_REQ | \
+					 TG3_RX_PTP_CTL_PDLAY_RES)
+#define TG3_RX_PTP_CTL_FOLLOW_UP	0x00000100
+#define TG3_RX_PTP_CTL_DELAY_RES	0x00000200
+#define TG3_RX_PTP_CTL_PDRES_FLW_UP	0x00000400
+#define TG3_RX_PTP_CTL_ANNOUNCE		0x00000800
+#define TG3_RX_PTP_CTL_SIGNALING	0x00001000
+#define TG3_RX_PTP_CTL_MANAGEMENT	0x00002000
+#define TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN	0x00800000
+#define TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN	0x01000000
+#define TG3_RX_PTP_CTL_RX_PTP_V2_EN	(TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | \
+					 TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN)
+#define TG3_RX_PTP_CTL_RX_PTP_V1_EN	0x02000000
+#define TG3_RX_PTP_CTL_HWTS_INTERLOCK	0x04000000
+/* 0x6cc --> 0x800 unused */
+
+#define MAC_TX_STATS_OCTETS		0x00000800
+#define MAC_TX_STATS_RESV1		0x00000804
+#define MAC_TX_STATS_COLLISIONS		0x00000808
+#define MAC_TX_STATS_XON_SENT		0x0000080c
+#define MAC_TX_STATS_XOFF_SENT		0x00000810
+#define MAC_TX_STATS_RESV2		0x00000814
+#define MAC_TX_STATS_MAC_ERRORS		0x00000818
+#define MAC_TX_STATS_SINGLE_COLLISIONS	0x0000081c
+#define MAC_TX_STATS_MULT_COLLISIONS	0x00000820
+#define MAC_TX_STATS_DEFERRED		0x00000824
+#define MAC_TX_STATS_RESV3		0x00000828
+#define MAC_TX_STATS_EXCESSIVE_COL	0x0000082c
+#define MAC_TX_STATS_LATE_COL		0x00000830
+#define MAC_TX_STATS_RESV4_1		0x00000834
+#define MAC_TX_STATS_RESV4_2		0x00000838
+#define MAC_TX_STATS_RESV4_3		0x0000083c
+#define MAC_TX_STATS_RESV4_4		0x00000840
+#define MAC_TX_STATS_RESV4_5		0x00000844
+#define MAC_TX_STATS_RESV4_6		0x00000848
+#define MAC_TX_STATS_RESV4_7		0x0000084c
+#define MAC_TX_STATS_RESV4_8		0x00000850
+#define MAC_TX_STATS_RESV4_9		0x00000854
+#define MAC_TX_STATS_RESV4_10		0x00000858
+#define MAC_TX_STATS_RESV4_11		0x0000085c
+#define MAC_TX_STATS_RESV4_12		0x00000860
+#define MAC_TX_STATS_RESV4_13		0x00000864
+#define MAC_TX_STATS_RESV4_14		0x00000868
+#define MAC_TX_STATS_UCAST		0x0000086c
+#define MAC_TX_STATS_MCAST		0x00000870
+#define MAC_TX_STATS_BCAST		0x00000874
+#define MAC_TX_STATS_RESV5_1		0x00000878
+#define MAC_TX_STATS_RESV5_2		0x0000087c
+#define MAC_RX_STATS_OCTETS		0x00000880
+#define MAC_RX_STATS_RESV1		0x00000884
+#define MAC_RX_STATS_FRAGMENTS		0x00000888
+#define MAC_RX_STATS_UCAST		0x0000088c
+#define MAC_RX_STATS_MCAST		0x00000890
+#define MAC_RX_STATS_BCAST		0x00000894
+#define MAC_RX_STATS_FCS_ERRORS		0x00000898
+#define MAC_RX_STATS_ALIGN_ERRORS	0x0000089c
+#define MAC_RX_STATS_XON_PAUSE_RECVD	0x000008a0
+#define MAC_RX_STATS_XOFF_PAUSE_RECVD	0x000008a4
+#define MAC_RX_STATS_MAC_CTRL_RECVD	0x000008a8
+#define MAC_RX_STATS_XOFF_ENTERED	0x000008ac
+#define MAC_RX_STATS_FRAME_TOO_LONG	0x000008b0
+#define MAC_RX_STATS_JABBERS		0x000008b4
+#define MAC_RX_STATS_UNDERSIZE		0x000008b8
+/* 0x8bc --> 0xc00 unused */
+
+/* Send data initiator control registers */
+#define SNDDATAI_MODE			0x00000c00
+#define  SNDDATAI_MODE_RESET		 0x00000001
+#define  SNDDATAI_MODE_ENABLE		 0x00000002
+#define  SNDDATAI_MODE_STAT_OFLOW_ENAB	 0x00000004
+#define SNDDATAI_STATUS			0x00000c04
+#define  SNDDATAI_STATUS_STAT_OFLOW	 0x00000004
+#define SNDDATAI_STATSCTRL		0x00000c08
+#define  SNDDATAI_SCTRL_ENABLE		 0x00000001
+#define  SNDDATAI_SCTRL_FASTUPD		 0x00000002
+#define  SNDDATAI_SCTRL_CLEAR		 0x00000004
+#define  SNDDATAI_SCTRL_FLUSH		 0x00000008
+#define  SNDDATAI_SCTRL_FORCE_ZERO	 0x00000010
+#define SNDDATAI_STATSENAB		0x00000c0c
+#define SNDDATAI_STATSINCMASK		0x00000c10
+#define ISO_PKT_TX			0x00000c20
+/* 0xc24 --> 0xc80 unused */
+#define SNDDATAI_COS_CNT_0		0x00000c80
+#define SNDDATAI_COS_CNT_1		0x00000c84
+#define SNDDATAI_COS_CNT_2		0x00000c88
+#define SNDDATAI_COS_CNT_3		0x00000c8c
+#define SNDDATAI_COS_CNT_4		0x00000c90
+#define SNDDATAI_COS_CNT_5		0x00000c94
+#define SNDDATAI_COS_CNT_6		0x00000c98
+#define SNDDATAI_COS_CNT_7		0x00000c9c
+#define SNDDATAI_COS_CNT_8		0x00000ca0
+#define SNDDATAI_COS_CNT_9		0x00000ca4
+#define SNDDATAI_COS_CNT_10		0x00000ca8
+#define SNDDATAI_COS_CNT_11		0x00000cac
+#define SNDDATAI_COS_CNT_12		0x00000cb0
+#define SNDDATAI_COS_CNT_13		0x00000cb4
+#define SNDDATAI_COS_CNT_14		0x00000cb8
+#define SNDDATAI_COS_CNT_15		0x00000cbc
+#define SNDDATAI_DMA_RDQ_FULL_CNT	0x00000cc0
+#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT	0x00000cc4
+#define SNDDATAI_SDCQ_FULL_CNT		0x00000cc8
+#define SNDDATAI_NICRNG_SSND_PIDX_CNT	0x00000ccc
+#define SNDDATAI_STATS_UPDATED_CNT	0x00000cd0
+#define SNDDATAI_INTERRUPTS_CNT		0x00000cd4
+#define SNDDATAI_AVOID_INTERRUPTS_CNT	0x00000cd8
+#define SNDDATAI_SND_THRESH_HIT_CNT	0x00000cdc
+/* 0xce0 --> 0x1000 unused */
+
+/* Send data completion control registers */
+#define SNDDATAC_MODE			0x00001000
+#define  SNDDATAC_MODE_RESET		 0x00000001
+#define  SNDDATAC_MODE_ENABLE		 0x00000002
+#define  SNDDATAC_MODE_CDELAY		 0x00000010
+/* 0x1004 --> 0x1400 unused */
+
+/* Send BD ring selector */
+#define SNDBDS_MODE			0x00001400
+#define  SNDBDS_MODE_RESET		 0x00000001
+#define  SNDBDS_MODE_ENABLE		 0x00000002
+#define  SNDBDS_MODE_ATTN_ENABLE	 0x00000004
+#define SNDBDS_STATUS			0x00001404
+#define  SNDBDS_STATUS_ERROR_ATTN	 0x00000004
+#define SNDBDS_HWDIAG			0x00001408
+/* 0x140c --> 0x1440 */
+#define SNDBDS_SEL_CON_IDX_0		0x00001440
+#define SNDBDS_SEL_CON_IDX_1		0x00001444
+#define SNDBDS_SEL_CON_IDX_2		0x00001448
+#define SNDBDS_SEL_CON_IDX_3		0x0000144c
+#define SNDBDS_SEL_CON_IDX_4		0x00001450
+#define SNDBDS_SEL_CON_IDX_5		0x00001454
+#define SNDBDS_SEL_CON_IDX_6		0x00001458
+#define SNDBDS_SEL_CON_IDX_7		0x0000145c
+#define SNDBDS_SEL_CON_IDX_8		0x00001460
+#define SNDBDS_SEL_CON_IDX_9		0x00001464
+#define SNDBDS_SEL_CON_IDX_10		0x00001468
+#define SNDBDS_SEL_CON_IDX_11		0x0000146c
+#define SNDBDS_SEL_CON_IDX_12		0x00001470
+#define SNDBDS_SEL_CON_IDX_13		0x00001474
+#define SNDBDS_SEL_CON_IDX_14		0x00001478
+#define SNDBDS_SEL_CON_IDX_15		0x0000147c
+/* 0x1480 --> 0x1800 unused */
+
+/* Send BD initiator control registers */
+#define SNDBDI_MODE			0x00001800
+#define  SNDBDI_MODE_RESET		 0x00000001
+#define  SNDBDI_MODE_ENABLE		 0x00000002
+#define  SNDBDI_MODE_ATTN_ENABLE	 0x00000004
+#define  SNDBDI_MODE_MULTI_TXQ_EN	 0x00000020
+#define SNDBDI_STATUS			0x00001804
+#define  SNDBDI_STATUS_ERROR_ATTN	 0x00000004
+#define SNDBDI_IN_PROD_IDX_0		0x00001808
+#define SNDBDI_IN_PROD_IDX_1		0x0000180c
+#define SNDBDI_IN_PROD_IDX_2		0x00001810
+#define SNDBDI_IN_PROD_IDX_3		0x00001814
+#define SNDBDI_IN_PROD_IDX_4		0x00001818
+#define SNDBDI_IN_PROD_IDX_5		0x0000181c
+#define SNDBDI_IN_PROD_IDX_6		0x00001820
+#define SNDBDI_IN_PROD_IDX_7		0x00001824
+#define SNDBDI_IN_PROD_IDX_8		0x00001828
+#define SNDBDI_IN_PROD_IDX_9		0x0000182c
+#define SNDBDI_IN_PROD_IDX_10		0x00001830
+#define SNDBDI_IN_PROD_IDX_11		0x00001834
+#define SNDBDI_IN_PROD_IDX_12		0x00001838
+#define SNDBDI_IN_PROD_IDX_13		0x0000183c
+#define SNDBDI_IN_PROD_IDX_14		0x00001840
+#define SNDBDI_IN_PROD_IDX_15		0x00001844
+/* 0x1848 --> 0x1c00 unused */
+
+/* Send BD completion control registers */
+#define SNDBDC_MODE			0x00001c00
+#define SNDBDC_MODE_RESET		 0x00000001
+#define SNDBDC_MODE_ENABLE		 0x00000002
+#define SNDBDC_MODE_ATTN_ENABLE		 0x00000004
+/* 0x1c04 --> 0x2000 unused */
+
+/* Receive list placement control registers */
+#define RCVLPC_MODE			0x00002000
+#define  RCVLPC_MODE_RESET		 0x00000001
+#define  RCVLPC_MODE_ENABLE		 0x00000002
+#define  RCVLPC_MODE_CLASS0_ATTN_ENAB	 0x00000004
+#define  RCVLPC_MODE_MAPOOR_AATTN_ENAB	 0x00000008
+#define  RCVLPC_MODE_STAT_OFLOW_ENAB	 0x00000010
+#define RCVLPC_STATUS			0x00002004
+#define  RCVLPC_STATUS_CLASS0		 0x00000004
+#define  RCVLPC_STATUS_MAPOOR		 0x00000008
+#define  RCVLPC_STATUS_STAT_OFLOW	 0x00000010
+#define RCVLPC_LOCK			0x00002008
+#define  RCVLPC_LOCK_REQ_MASK		 0x0000ffff
+#define  RCVLPC_LOCK_REQ_SHIFT		 0
+#define  RCVLPC_LOCK_GRANT_MASK		 0xffff0000
+#define  RCVLPC_LOCK_GRANT_SHIFT	 16
+#define RCVLPC_NON_EMPTY_BITS		0x0000200c
+#define  RCVLPC_NON_EMPTY_BITS_MASK	 0x0000ffff
+#define RCVLPC_CONFIG			0x00002010
+#define RCVLPC_STATSCTRL		0x00002014
+#define  RCVLPC_STATSCTRL_ENABLE	 0x00000001
+#define  RCVLPC_STATSCTRL_FASTUPD	 0x00000002
+#define RCVLPC_STATS_ENABLE		0x00002018
+#define  RCVLPC_STATSENAB_ASF_FIX	 0x00000002
+#define  RCVLPC_STATSENAB_DACK_FIX	 0x00040000
+#define  RCVLPC_STATSENAB_LNGBRST_RFIX	 0x00400000
+#define RCVLPC_STATS_INCMASK		0x0000201c
+/* 0x2020 --> 0x2100 unused */
+#define RCVLPC_SELLST_BASE		0x00002100 /* 16 16-byte entries */
+#define  SELLST_TAIL			0x00000004
+#define  SELLST_CONT			0x00000008
+#define  SELLST_UNUSED			0x0000000c
+#define RCVLPC_COS_CNTL_BASE		0x00002200 /* 16 4-byte entries */
+#define RCVLPC_DROP_FILTER_CNT		0x00002240
+#define RCVLPC_DMA_WQ_FULL_CNT		0x00002244
+#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT	0x00002248
+#define RCVLPC_NO_RCV_BD_CNT		0x0000224c
+#define RCVLPC_IN_DISCARDS_CNT		0x00002250
+#define RCVLPC_IN_ERRORS_CNT		0x00002254
+#define RCVLPC_RCV_THRESH_HIT_CNT	0x00002258
+/* 0x225c --> 0x2400 unused */
+
+/* Receive Data and Receive BD Initiator Control */
+#define RCVDBDI_MODE			0x00002400
+#define  RCVDBDI_MODE_RESET		 0x00000001
+#define  RCVDBDI_MODE_ENABLE		 0x00000002
+#define  RCVDBDI_MODE_JUMBOBD_NEEDED	 0x00000004
+#define  RCVDBDI_MODE_FRM_TOO_BIG	 0x00000008
+#define  RCVDBDI_MODE_INV_RING_SZ	 0x00000010
+#define  RCVDBDI_MODE_LRG_RING_SZ	 0x00010000
+#define RCVDBDI_STATUS			0x00002404
+#define  RCVDBDI_STATUS_JUMBOBD_NEEDED	 0x00000004
+#define  RCVDBDI_STATUS_FRM_TOO_BIG	 0x00000008
+#define  RCVDBDI_STATUS_INV_RING_SZ	 0x00000010
+#define RCVDBDI_SPLIT_FRAME_MINSZ	0x00002408
+/* 0x240c --> 0x2440 unused */
+#define RCVDBDI_JUMBO_BD		0x00002440 /* TG3_BDINFO_... */
+#define RCVDBDI_STD_BD			0x00002450 /* TG3_BDINFO_... */
+#define RCVDBDI_MINI_BD			0x00002460 /* TG3_BDINFO_... */
+#define RCVDBDI_JUMBO_CON_IDX		0x00002470
+#define RCVDBDI_STD_CON_IDX		0x00002474
+#define RCVDBDI_MINI_CON_IDX		0x00002478
+/* 0x247c --> 0x2480 unused */
+#define RCVDBDI_BD_PROD_IDX_0		0x00002480
+#define RCVDBDI_BD_PROD_IDX_1		0x00002484
+#define RCVDBDI_BD_PROD_IDX_2		0x00002488
+#define RCVDBDI_BD_PROD_IDX_3		0x0000248c
+#define RCVDBDI_BD_PROD_IDX_4		0x00002490
+#define RCVDBDI_BD_PROD_IDX_5		0x00002494
+#define RCVDBDI_BD_PROD_IDX_6		0x00002498
+#define RCVDBDI_BD_PROD_IDX_7		0x0000249c
+#define RCVDBDI_BD_PROD_IDX_8		0x000024a0
+#define RCVDBDI_BD_PROD_IDX_9		0x000024a4
+#define RCVDBDI_BD_PROD_IDX_10		0x000024a8
+#define RCVDBDI_BD_PROD_IDX_11		0x000024ac
+#define RCVDBDI_BD_PROD_IDX_12		0x000024b0
+#define RCVDBDI_BD_PROD_IDX_13		0x000024b4
+#define RCVDBDI_BD_PROD_IDX_14		0x000024b8
+#define RCVDBDI_BD_PROD_IDX_15		0x000024bc
+#define RCVDBDI_HWDIAG			0x000024c0
+/* 0x24c4 --> 0x2800 unused */
+
+/* Receive Data Completion Control */
+#define RCVDCC_MODE			0x00002800
+#define  RCVDCC_MODE_RESET		 0x00000001
+#define  RCVDCC_MODE_ENABLE		 0x00000002
+#define  RCVDCC_MODE_ATTN_ENABLE	 0x00000004
+/* 0x2804 --> 0x2c00 unused */
+
+/* Receive BD Initiator Control Registers */
+#define RCVBDI_MODE			0x00002c00
+#define  RCVBDI_MODE_RESET		 0x00000001
+#define  RCVBDI_MODE_ENABLE		 0x00000002
+#define  RCVBDI_MODE_RCB_ATTN_ENAB	 0x00000004
+#define RCVBDI_STATUS			0x00002c04
+#define  RCVBDI_STATUS_RCB_ATTN		 0x00000004
+#define RCVBDI_JUMBO_PROD_IDX		0x00002c08
+#define RCVBDI_STD_PROD_IDX		0x00002c0c
+#define RCVBDI_MINI_PROD_IDX		0x00002c10
+#define RCVBDI_MINI_THRESH		0x00002c14
+#define RCVBDI_STD_THRESH		0x00002c18
+#define RCVBDI_JUMBO_THRESH		0x00002c1c
+/* 0x2c20 --> 0x2d00 unused */
+
+#define STD_REPLENISH_LWM		0x00002d00
+#define JMB_REPLENISH_LWM		0x00002d04
+/* 0x2d08 --> 0x3000 unused */
+
+/* Receive BD Completion Control Registers */
+#define RCVCC_MODE			0x00003000
+#define  RCVCC_MODE_RESET		 0x00000001
+#define  RCVCC_MODE_ENABLE		 0x00000002
+#define  RCVCC_MODE_ATTN_ENABLE		 0x00000004
+#define RCVCC_STATUS			0x00003004
+#define  RCVCC_STATUS_ERROR_ATTN	 0x00000004
+#define RCVCC_JUMP_PROD_IDX		0x00003008
+#define RCVCC_STD_PROD_IDX		0x0000300c
+#define RCVCC_MINI_PROD_IDX		0x00003010
+/* 0x3014 --> 0x3400 unused */
+
+/* Receive list selector control registers */
+#define RCVLSC_MODE			0x00003400
+#define  RCVLSC_MODE_RESET		 0x00000001
+#define  RCVLSC_MODE_ENABLE		 0x00000002
+#define  RCVLSC_MODE_ATTN_ENABLE	 0x00000004
+#define RCVLSC_STATUS			0x00003404
+#define  RCVLSC_STATUS_ERROR_ATTN	 0x00000004
+/* 0x3408 --> 0x3600 unused */
+
+#define TG3_CPMU_DRV_STATUS		0x0000344c
+
+/* CPMU registers */
+#define TG3_CPMU_CTRL			0x00003600
+#define  CPMU_CTRL_LINK_IDLE_MODE	 0x00000200
+#define  CPMU_CTRL_LINK_AWARE_MODE	 0x00000400
+#define  CPMU_CTRL_LINK_SPEED_MODE	 0x00004000
+#define  CPMU_CTRL_GPHY_10MB_RXONLY	 0x00010000
+#define TG3_CPMU_LSPD_10MB_CLK		0x00003604
+#define  CPMU_LSPD_10MB_MACCLK_MASK	 0x001f0000
+#define  CPMU_LSPD_10MB_MACCLK_6_25	 0x00130000
+/* 0x3608 --> 0x360c unused */
+
+#define TG3_CPMU_LSPD_1000MB_CLK	0x0000360c
+#define  CPMU_LSPD_1000MB_MACCLK_62_5	 0x00000000
+#define  CPMU_LSPD_1000MB_MACCLK_12_5	 0x00110000
+#define  CPMU_LSPD_1000MB_MACCLK_MASK	 0x001f0000
+#define TG3_CPMU_LNK_AWARE_PWRMD	0x00003610
+#define  CPMU_LNK_AWARE_MACCLK_MASK	 0x001f0000
+#define  CPMU_LNK_AWARE_MACCLK_6_25	 0x00130000
+/* 0x3614 --> 0x361c unused */
+
+#define TG3_CPMU_HST_ACC		0x0000361c
+#define  CPMU_HST_ACC_MACCLK_MASK	 0x001f0000
+#define  CPMU_HST_ACC_MACCLK_6_25	 0x00130000
+/* 0x3620 --> 0x3630 unused */
+
+#define TG3_CPMU_CLCK_ORIDE		0x00003624
+#define  CPMU_CLCK_ORIDE_MAC_ORIDE_EN	 0x80000000
+
+#define TG3_CPMU_CLCK_ORIDE_ENABLE	0x00003628
+#define  TG3_CPMU_MAC_ORIDE_ENABLE	 (1 << 13)
+
+#define TG3_CPMU_STATUS			0x0000362c
+#define  TG3_CPMU_STATUS_FMSK_5717	 0x20000000
+#define  TG3_CPMU_STATUS_FMSK_5719	 0xc0000000
+#define  TG3_CPMU_STATUS_FSHFT_5719	 30
+#define  TG3_CPMU_STATUS_LINK_MASK	 0x180000
+
+#define TG3_CPMU_CLCK_STAT		0x00003630
+#define  CPMU_CLCK_STAT_MAC_CLCK_MASK	 0x001f0000
+#define  CPMU_CLCK_STAT_MAC_CLCK_62_5	 0x00000000
+#define  CPMU_CLCK_STAT_MAC_CLCK_12_5	 0x00110000
+#define  CPMU_CLCK_STAT_MAC_CLCK_6_25	 0x00130000
+/* 0x3634 --> 0x365c unused */
+
+#define TG3_CPMU_MUTEX_REQ		0x0000365c
+#define  CPMU_MUTEX_REQ_DRIVER		 0x00001000
+#define TG3_CPMU_MUTEX_GNT		0x00003660
+#define  CPMU_MUTEX_GNT_DRIVER		 0x00001000
+#define TG3_CPMU_PHY_STRAP		0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES	 0x00000020
+#define TG3_CPMU_PADRNG_CTL		0x00003668
+#define  TG3_CPMU_PADRNG_CTL_RDIV2	 0x00040000
+/* 0x3664 --> 0x36b0 unused */
+
+#define TG3_CPMU_EEE_MODE		0x000036b0
+#define  TG3_CPMU_EEEMD_APE_TX_DET_EN	 0x00000004
+#define  TG3_CPMU_EEEMD_ERLY_L1_XIT_DET	 0x00000008
+#define  TG3_CPMU_EEEMD_SND_IDX_DET_EN	 0x00000040
+#define  TG3_CPMU_EEEMD_LPI_ENABLE	 0x00000080
+#define  TG3_CPMU_EEEMD_LPI_IN_TX	 0x00000100
+#define  TG3_CPMU_EEEMD_LPI_IN_RX	 0x00000200
+#define  TG3_CPMU_EEEMD_EEE_ENABLE	 0x00100000
+#define TG3_CPMU_EEE_DBTMR1		0x000036b4
+#define  TG3_CPMU_DBTMR1_PCIEXIT_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR1_LNKIDLE_2047US	 0x000007ff
+#define  TG3_CPMU_DBTMR1_LNKIDLE_MAX	 0x0000ffff
+#define TG3_CPMU_EEE_DBTMR2		0x000036b8
+#define  TG3_CPMU_DBTMR2_APE_TX_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US	 0x000007ff
+#define TG3_CPMU_EEE_LNKIDL_CTRL	0x000036bc
+#define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0	 0x01000000
+#define  TG3_CPMU_EEE_LNKIDL_UART_IDL	 0x00000004
+#define  TG3_CPMU_EEE_LNKIDL_APE_TX_MT	 0x00000002
+/* 0x36c0 --> 0x36d0 unused */
+
+#define TG3_CPMU_EEE_CTRL		0x000036d0
+#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US	 0x0000019d
+#define TG3_CPMU_EEE_CTRL_EXIT_36_US	 0x00000384
+#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US	 0x000001f8
+/* 0x36d4 --> 0x3800 unused */
+
+/* Mbuf cluster free registers */
+#define MBFREE_MODE			0x00003800
+#define  MBFREE_MODE_RESET		 0x00000001
+#define  MBFREE_MODE_ENABLE		 0x00000002
+#define MBFREE_STATUS			0x00003804
+/* 0x3808 --> 0x3c00 unused */
+
+/* Host coalescing control registers */
+#define HOSTCC_MODE			0x00003c00
+#define  HOSTCC_MODE_RESET		 0x00000001
+#define  HOSTCC_MODE_ENABLE		 0x00000002
+#define  HOSTCC_MODE_ATTN		 0x00000004
+#define  HOSTCC_MODE_NOW		 0x00000008
+#define  HOSTCC_MODE_FULL_STATUS	 0x00000000
+#define  HOSTCC_MODE_64BYTE		 0x00000080
+#define  HOSTCC_MODE_32BYTE		 0x00000100
+#define  HOSTCC_MODE_CLRTICK_RXBD	 0x00000200
+#define  HOSTCC_MODE_CLRTICK_TXBD	 0x00000400
+#define  HOSTCC_MODE_NOINT_ON_NOW	 0x00000800
+#define  HOSTCC_MODE_NOINT_ON_FORCE	 0x00001000
+#define  HOSTCC_MODE_COAL_VEC1_NOW	 0x00002000
+#define HOSTCC_STATUS			0x00003c04
+#define  HOSTCC_STATUS_ERROR_ATTN	 0x00000004
+#define HOSTCC_RXCOL_TICKS		0x00003c08
+#define  LOW_RXCOL_TICKS		 0x00000032
+#define  LOW_RXCOL_TICKS_CLRTCKS	 0x00000014
+#define  DEFAULT_RXCOL_TICKS		 0x00000048
+#define  HIGH_RXCOL_TICKS		 0x00000096
+#define  MAX_RXCOL_TICKS		 0x000003ff
+#define HOSTCC_TXCOL_TICKS		0x00003c0c
+#define  LOW_TXCOL_TICKS		 0x00000096
+#define  LOW_TXCOL_TICKS_CLRTCKS	 0x00000048
+#define  DEFAULT_TXCOL_TICKS		 0x0000012c
+#define  HIGH_TXCOL_TICKS		 0x00000145
+#define  MAX_TXCOL_TICKS		 0x000003ff
+#define HOSTCC_RXMAX_FRAMES		0x00003c10
+#define  LOW_RXMAX_FRAMES		 0x00000005
+#define  DEFAULT_RXMAX_FRAMES		 0x00000008
+#define  HIGH_RXMAX_FRAMES		 0x00000012
+#define  MAX_RXMAX_FRAMES		 0x000000ff
+#define HOSTCC_TXMAX_FRAMES		0x00003c14
+#define  LOW_TXMAX_FRAMES		 0x00000035
+#define  DEFAULT_TXMAX_FRAMES		 0x0000004b
+#define  HIGH_TXMAX_FRAMES		 0x00000052
+#define  MAX_TXMAX_FRAMES		 0x000000ff
+#define HOSTCC_RXCOAL_TICK_INT		0x00003c18
+#define  DEFAULT_RXCOAL_TICK_INT	 0x00000019
+#define  DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
+#define  MAX_RXCOAL_TICK_INT		 0x000003ff
+#define HOSTCC_TXCOAL_TICK_INT		0x00003c1c
+#define  DEFAULT_TXCOAL_TICK_INT	 0x00000019
+#define  DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
+#define  MAX_TXCOAL_TICK_INT		 0x000003ff
+#define HOSTCC_RXCOAL_MAXF_INT		0x00003c20
+#define  DEFAULT_RXCOAL_MAXF_INT	 0x00000005
+#define  MAX_RXCOAL_MAXF_INT		 0x000000ff
+#define HOSTCC_TXCOAL_MAXF_INT		0x00003c24
+#define  DEFAULT_TXCOAL_MAXF_INT	 0x00000005
+#define  MAX_TXCOAL_MAXF_INT		 0x000000ff
+#define HOSTCC_STAT_COAL_TICKS		0x00003c28
+#define  DEFAULT_STAT_COAL_TICKS	 0x000f4240
+#define  MAX_STAT_COAL_TICKS		 0xd693d400
+#define  MIN_STAT_COAL_TICKS		 0x00000064
+/* 0x3c2c --> 0x3c30 unused */
+#define HOSTCC_STATS_BLK_HOST_ADDR	0x00003c30 /* 64-bit */
+#define HOSTCC_STATUS_BLK_HOST_ADDR	0x00003c38 /* 64-bit */
+#define HOSTCC_STATS_BLK_NIC_ADDR	0x00003c40
+#define HOSTCC_STATUS_BLK_NIC_ADDR	0x00003c44
+#define HOSTCC_FLOW_ATTN		0x00003c48
+#define HOSTCC_FLOW_ATTN_MBUF_LWM	 0x00000040
+/* 0x3c4c --> 0x3c50 unused */
+#define HOSTCC_JUMBO_CON_IDX		0x00003c50
+#define HOSTCC_STD_CON_IDX		0x00003c54
+#define HOSTCC_MINI_CON_IDX		0x00003c58
+/* 0x3c5c --> 0x3c80 unused */
+#define HOSTCC_RET_PROD_IDX_0		0x00003c80
+#define HOSTCC_RET_PROD_IDX_1		0x00003c84
+#define HOSTCC_RET_PROD_IDX_2		0x00003c88
+#define HOSTCC_RET_PROD_IDX_3		0x00003c8c
+#define HOSTCC_RET_PROD_IDX_4		0x00003c90
+#define HOSTCC_RET_PROD_IDX_5		0x00003c94
+#define HOSTCC_RET_PROD_IDX_6		0x00003c98
+#define HOSTCC_RET_PROD_IDX_7		0x00003c9c
+#define HOSTCC_RET_PROD_IDX_8		0x00003ca0
+#define HOSTCC_RET_PROD_IDX_9		0x00003ca4
+#define HOSTCC_RET_PROD_IDX_10		0x00003ca8
+#define HOSTCC_RET_PROD_IDX_11		0x00003cac
+#define HOSTCC_RET_PROD_IDX_12		0x00003cb0
+#define HOSTCC_RET_PROD_IDX_13		0x00003cb4
+#define HOSTCC_RET_PROD_IDX_14		0x00003cb8
+#define HOSTCC_RET_PROD_IDX_15		0x00003cbc
+#define HOSTCC_SND_CON_IDX_0		0x00003cc0
+#define HOSTCC_SND_CON_IDX_1		0x00003cc4
+#define HOSTCC_SND_CON_IDX_2		0x00003cc8
+#define HOSTCC_SND_CON_IDX_3		0x00003ccc
+#define HOSTCC_SND_CON_IDX_4		0x00003cd0
+#define HOSTCC_SND_CON_IDX_5		0x00003cd4
+#define HOSTCC_SND_CON_IDX_6		0x00003cd8
+#define HOSTCC_SND_CON_IDX_7		0x00003cdc
+#define HOSTCC_SND_CON_IDX_8		0x00003ce0
+#define HOSTCC_SND_CON_IDX_9		0x00003ce4
+#define HOSTCC_SND_CON_IDX_10		0x00003ce8
+#define HOSTCC_SND_CON_IDX_11		0x00003cec
+#define HOSTCC_SND_CON_IDX_12		0x00003cf0
+#define HOSTCC_SND_CON_IDX_13		0x00003cf4
+#define HOSTCC_SND_CON_IDX_14		0x00003cf8
+#define HOSTCC_SND_CON_IDX_15		0x00003cfc
+#define HOSTCC_STATBLCK_RING1		0x00003d00
+/* 0x3d00 --> 0x3d80 unused */
+
+#define HOSTCC_RXCOL_TICKS_VEC1		0x00003d80
+#define HOSTCC_TXCOL_TICKS_VEC1		0x00003d84
+#define HOSTCC_RXMAX_FRAMES_VEC1	0x00003d88
+#define HOSTCC_TXMAX_FRAMES_VEC1	0x00003d8c
+#define HOSTCC_RXCOAL_MAXF_INT_VEC1	0x00003d90
+#define HOSTCC_TXCOAL_MAXF_INT_VEC1	0x00003d94
+/* 0x3d98 --> 0x4000 unused */
+
+/* Memory arbiter control registers */
+#define MEMARB_MODE			0x00004000
+#define  MEMARB_MODE_RESET		 0x00000001
+#define  MEMARB_MODE_ENABLE		 0x00000002
+#define MEMARB_STATUS			0x00004004
+#define MEMARB_TRAP_ADDR_LOW		0x00004008
+#define MEMARB_TRAP_ADDR_HIGH		0x0000400c
+/* 0x4010 --> 0x4400 unused */
+
+/* Buffer manager control registers */
+#define BUFMGR_MODE			0x00004400
+#define  BUFMGR_MODE_RESET		 0x00000001
+#define  BUFMGR_MODE_ENABLE		 0x00000002
+#define  BUFMGR_MODE_ATTN_ENABLE	 0x00000004
+#define  BUFMGR_MODE_BM_TEST		 0x00000008
+#define  BUFMGR_MODE_MBLOW_ATTN_ENAB	 0x00000010
+#define  BUFMGR_MODE_NO_TX_UNDERRUN	 0x80000000
+#define BUFMGR_STATUS			0x00004404
+#define  BUFMGR_STATUS_ERROR		 0x00000004
+#define  BUFMGR_STATUS_MBLOW		 0x00000010
+#define BUFMGR_MB_POOL_ADDR		0x00004408
+#define BUFMGR_MB_POOL_SIZE		0x0000440c
+#define BUFMGR_MB_RDMA_LOW_WATER	0x00004410
+#define  DEFAULT_MB_RDMA_LOW_WATER	 0x00000050
+#define  DEFAULT_MB_RDMA_LOW_WATER_5705	 0x00000000
+#define  DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130
+#define  DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000
+#define BUFMGR_MB_MACRX_LOW_WATER	0x00004414
+#define  DEFAULT_MB_MACRX_LOW_WATER	  0x00000020
+#define  DEFAULT_MB_MACRX_LOW_WATER_5705  0x00000010
+#define  DEFAULT_MB_MACRX_LOW_WATER_5906  0x00000004
+#define  DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
+#define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
+#define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
+#define BUFMGR_MB_HIGH_WATER		0x00004418
+#define  DEFAULT_MB_HIGH_WATER		 0x00000060
+#define  DEFAULT_MB_HIGH_WATER_5705	 0x00000060
+#define  DEFAULT_MB_HIGH_WATER_5906	 0x00000010
+#define  DEFAULT_MB_HIGH_WATER_57765	 0x000000a0
+#define  DEFAULT_MB_HIGH_WATER_JUMBO	 0x0000017c
+#define  DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define  DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
+#define BUFMGR_RX_MB_ALLOC_REQ		0x0000441c
+#define  BUFMGR_MB_ALLOC_BIT		 0x10000000
+#define BUFMGR_RX_MB_ALLOC_RESP		0x00004420
+#define BUFMGR_TX_MB_ALLOC_REQ		0x00004424
+#define BUFMGR_TX_MB_ALLOC_RESP		0x00004428
+#define BUFMGR_DMA_DESC_POOL_ADDR	0x0000442c
+#define BUFMGR_DMA_DESC_POOL_SIZE	0x00004430
+#define BUFMGR_DMA_LOW_WATER		0x00004434
+#define  DEFAULT_DMA_LOW_WATER		 0x00000005
+#define BUFMGR_DMA_HIGH_WATER		0x00004438
+#define  DEFAULT_DMA_HIGH_WATER		 0x0000000a
+#define BUFMGR_RX_DMA_ALLOC_REQ		0x0000443c
+#define BUFMGR_RX_DMA_ALLOC_RESP	0x00004440
+#define BUFMGR_TX_DMA_ALLOC_REQ		0x00004444
+#define BUFMGR_TX_DMA_ALLOC_RESP	0x00004448
+#define BUFMGR_HWDIAG_0			0x0000444c
+#define BUFMGR_HWDIAG_1			0x00004450
+#define BUFMGR_HWDIAG_2			0x00004454
+/* 0x4458 --> 0x4800 unused */
+
+/* Read DMA control registers */
+#define RDMAC_MODE			0x00004800
+#define  RDMAC_MODE_RESET		 0x00000001
+#define  RDMAC_MODE_ENABLE		 0x00000002
+#define  RDMAC_MODE_TGTABORT_ENAB	 0x00000004
+#define  RDMAC_MODE_MSTABORT_ENAB	 0x00000008
+#define  RDMAC_MODE_PARITYERR_ENAB	 0x00000010
+#define  RDMAC_MODE_ADDROFLOW_ENAB	 0x00000020
+#define  RDMAC_MODE_FIFOOFLOW_ENAB	 0x00000040
+#define  RDMAC_MODE_FIFOURUN_ENAB	 0x00000080
+#define  RDMAC_MODE_FIFOOREAD_ENAB	 0x00000100
+#define  RDMAC_MODE_LNGREAD_ENAB	 0x00000200
+#define  RDMAC_MODE_SPLIT_ENABLE	 0x00000800
+#define  RDMAC_MODE_BD_SBD_CRPT_ENAB	 0x00000800
+#define  RDMAC_MODE_SPLIT_RESET		 0x00001000
+#define  RDMAC_MODE_MBUF_RBD_CRPT_ENAB	 0x00001000
+#define  RDMAC_MODE_MBUF_SBD_CRPT_ENAB	 0x00002000
+#define  RDMAC_MODE_FIFO_SIZE_128	 0x00020000
+#define  RDMAC_MODE_FIFO_LONG_BURST	 0x00030000
+#define  RDMAC_MODE_JMB_2K_MMRR		 0x00800000
+#define  RDMAC_MODE_MULT_DMA_RD_DIS	 0x01000000
+#define  RDMAC_MODE_IPV4_LSO_EN		 0x08000000
+#define  RDMAC_MODE_IPV6_LSO_EN		 0x10000000
+#define  RDMAC_MODE_H2BNC_VLAN_DET	 0x20000000
+#define RDMAC_STATUS			0x00004804
+#define  RDMAC_STATUS_TGTABORT		 0x00000004
+#define  RDMAC_STATUS_MSTABORT		 0x00000008
+#define  RDMAC_STATUS_PARITYERR		 0x00000010
+#define  RDMAC_STATUS_ADDROFLOW		 0x00000020
+#define  RDMAC_STATUS_FIFOOFLOW		 0x00000040
+#define  RDMAC_STATUS_FIFOURUN		 0x00000080
+#define  RDMAC_STATUS_FIFOOREAD		 0x00000100
+#define  RDMAC_STATUS_LNGREAD		 0x00000200
+/* 0x4808 --> 0x4890 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG2		0x00004890
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL2	0x000048a0
+
+#define TG3_RDMA_RSRVCTRL_REG		0x00004900
+#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX	 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K	 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK	 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K	 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK	 0x000ff000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B	 0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK	 0xffe00000
+/* 0x4904 --> 0x4910 unused */
+
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL	0x00004910
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K	 0x00030000
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K	 0x000c0000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719	 0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720	 0x00200000
+/* 0x4914 --> 0x4be0 unused */
+
+#define TG3_NUM_RDMA_CHANNELS		4
+#define TG3_RDMA_LENGTH			0x00004be0
+
+/* Write DMA control registers */
+#define WDMAC_MODE			0x00004c00
+#define  WDMAC_MODE_RESET		 0x00000001
+#define  WDMAC_MODE_ENABLE		 0x00000002
+#define  WDMAC_MODE_TGTABORT_ENAB	 0x00000004
+#define  WDMAC_MODE_MSTABORT_ENAB	 0x00000008
+#define  WDMAC_MODE_PARITYERR_ENAB	 0x00000010
+#define  WDMAC_MODE_ADDROFLOW_ENAB	 0x00000020
+#define  WDMAC_MODE_FIFOOFLOW_ENAB	 0x00000040
+#define  WDMAC_MODE_FIFOURUN_ENAB	 0x00000080
+#define  WDMAC_MODE_FIFOOREAD_ENAB	 0x00000100
+#define  WDMAC_MODE_LNGREAD_ENAB	 0x00000200
+#define  WDMAC_MODE_RX_ACCEL		 0x00000400
+#define  WDMAC_MODE_STATUS_TAG_FIX	 0x20000000
+#define  WDMAC_MODE_BURST_ALL_DATA	 0xc0000000
+#define WDMAC_STATUS			0x00004c04
+#define  WDMAC_STATUS_TGTABORT		 0x00000004
+#define  WDMAC_STATUS_MSTABORT		 0x00000008
+#define  WDMAC_STATUS_PARITYERR		 0x00000010
+#define  WDMAC_STATUS_ADDROFLOW		 0x00000020
+#define  WDMAC_STATUS_FIFOOFLOW		 0x00000040
+#define  WDMAC_STATUS_FIFOURUN		 0x00000080
+#define  WDMAC_STATUS_FIFOOREAD		 0x00000100
+#define  WDMAC_STATUS_LNGREAD		 0x00000200
+/* 0x4c08 --> 0x5000 unused */
+
+/* Per-cpu register offsets (arm9) */
+#define CPU_MODE			0x00000000
+#define  CPU_MODE_RESET			 0x00000001
+#define  CPU_MODE_HALT			 0x00000400
+#define CPU_STATE			0x00000004
+#define CPU_EVTMASK			0x00000008
+/* 0xc --> 0x1c reserved */
+#define CPU_PC				0x0000001c
+#define CPU_INSN			0x00000020
+#define CPU_SPAD_UFLOW			0x00000024
+#define CPU_WDOG_CLEAR			0x00000028
+#define CPU_WDOG_VECTOR			0x0000002c
+#define CPU_WDOG_PC			0x00000030
+#define CPU_HW_BP			0x00000034
+/* 0x38 --> 0x44 unused */
+#define CPU_WDOG_SAVED_STATE		0x00000044
+#define CPU_LAST_BRANCH_ADDR		0x00000048
+#define CPU_SPAD_UFLOW_SET		0x0000004c
+/* 0x50 --> 0x200 unused */
+#define CPU_R0				0x00000200
+#define CPU_R1				0x00000204
+#define CPU_R2				0x00000208
+#define CPU_R3				0x0000020c
+#define CPU_R4				0x00000210
+#define CPU_R5				0x00000214
+#define CPU_R6				0x00000218
+#define CPU_R7				0x0000021c
+#define CPU_R8				0x00000220
+#define CPU_R9				0x00000224
+#define CPU_R10				0x00000228
+#define CPU_R11				0x0000022c
+#define CPU_R12				0x00000230
+#define CPU_R13				0x00000234
+#define CPU_R14				0x00000238
+#define CPU_R15				0x0000023c
+#define CPU_R16				0x00000240
+#define CPU_R17				0x00000244
+#define CPU_R18				0x00000248
+#define CPU_R19				0x0000024c
+#define CPU_R20				0x00000250
+#define CPU_R21				0x00000254
+#define CPU_R22				0x00000258
+#define CPU_R23				0x0000025c
+#define CPU_R24				0x00000260
+#define CPU_R25				0x00000264
+#define CPU_R26				0x00000268
+#define CPU_R27				0x0000026c
+#define CPU_R28				0x00000270
+#define CPU_R29				0x00000274
+#define CPU_R30				0x00000278
+#define CPU_R31				0x0000027c
+/* 0x280 --> 0x400 unused */
+
+#define RX_CPU_BASE			0x00005000
+#define RX_CPU_MODE			0x00005000
+#define RX_CPU_STATE			0x00005004
+#define RX_CPU_PGMCTR			0x0000501c
+#define RX_CPU_HWBKPT			0x00005034
+#define TX_CPU_BASE			0x00005400
+#define TX_CPU_MODE			0x00005400
+#define TX_CPU_STATE			0x00005404
+#define TX_CPU_PGMCTR			0x0000541c
+
+#define VCPU_STATUS			0x00005100
+#define  VCPU_STATUS_INIT_DONE		 0x04000000
+#define  VCPU_STATUS_DRV_RESET		 0x08000000
+
+#define VCPU_CFGSHDW			0x00005104
+#define  VCPU_CFGSHDW_WOL_ENABLE	 0x00000001
+#define  VCPU_CFGSHDW_WOL_MAGPKT	 0x00000004
+#define  VCPU_CFGSHDW_ASPM_DBNC		 0x00001000
+
+/* Mailboxes */
+#define GRCMBOX_BASE			0x00005600
+#define GRCMBOX_INTERRUPT_0		0x00005800 /* 64-bit */
+#define GRCMBOX_INTERRUPT_1		0x00005808 /* 64-bit */
+#define GRCMBOX_INTERRUPT_2		0x00005810 /* 64-bit */
+#define GRCMBOX_INTERRUPT_3		0x00005818 /* 64-bit */
+#define GRCMBOX_GENERAL_0		0x00005820 /* 64-bit */
+#define GRCMBOX_GENERAL_1		0x00005828 /* 64-bit */
+#define GRCMBOX_GENERAL_2		0x00005830 /* 64-bit */
+#define GRCMBOX_GENERAL_3		0x00005838 /* 64-bit */
+#define GRCMBOX_GENERAL_4		0x00005840 /* 64-bit */
+#define GRCMBOX_GENERAL_5		0x00005848 /* 64-bit */
+#define GRCMBOX_GENERAL_6		0x00005850 /* 64-bit */
+#define GRCMBOX_GENERAL_7		0x00005858 /* 64-bit */
+#define GRCMBOX_RELOAD_STAT		0x00005860 /* 64-bit */
+#define GRCMBOX_RCVSTD_PROD_IDX		0x00005868 /* 64-bit */
+#define GRCMBOX_RCVJUMBO_PROD_IDX	0x00005870 /* 64-bit */
+#define GRCMBOX_RCVMINI_PROD_IDX	0x00005878 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_0	0x00005880 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_1	0x00005888 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_2	0x00005890 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_3	0x00005898 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_4	0x000058a0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_5	0x000058a8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_6	0x000058b0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_7	0x000058b8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_8	0x000058c0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_9	0x000058c8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_10	0x000058d0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_11	0x000058d8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_12	0x000058e0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_13	0x000058e8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_14	0x000058f0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_15	0x000058f8 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_0	0x00005900 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_1	0x00005908 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_2	0x00005910 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_3	0x00005918 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_4	0x00005920 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_5	0x00005928 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_6	0x00005930 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_7	0x00005938 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_8	0x00005940 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_9	0x00005948 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_10	0x00005950 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_11	0x00005958 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_12	0x00005960 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_13	0x00005968 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_14	0x00005970 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_15	0x00005978 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_0	0x00005980 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_1	0x00005988 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_2	0x00005990 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_3	0x00005998 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_4	0x000059a0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_5	0x000059a8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_6	0x000059b0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_7	0x000059b8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_8	0x000059c0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_9	0x000059c8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_10	0x000059d0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_11	0x000059d8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_12	0x000059e0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_13	0x000059e8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_14	0x000059f0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_15	0x000059f8 /* 64-bit */
+#define GRCMBOX_HIGH_PRIO_EV_VECTOR	0x00005a00
+#define GRCMBOX_HIGH_PRIO_EV_MASK	0x00005a04
+#define GRCMBOX_LOW_PRIO_EV_VEC		0x00005a08
+#define GRCMBOX_LOW_PRIO_EV_MASK	0x00005a0c
+/* 0x5a10 --> 0x5c00 */
+
+/* Flow Through queues */
+#define FTQ_RESET			0x00005c00
+/* 0x5c04 --> 0x5c10 unused */
+#define FTQ_DMA_NORM_READ_CTL		0x00005c10
+#define FTQ_DMA_NORM_READ_FULL_CNT	0x00005c14
+#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ	0x00005c18
+#define FTQ_DMA_NORM_READ_WRITE_PEEK	0x00005c1c
+#define FTQ_DMA_HIGH_READ_CTL		0x00005c20
+#define FTQ_DMA_HIGH_READ_FULL_CNT	0x00005c24
+#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ	0x00005c28
+#define FTQ_DMA_HIGH_READ_WRITE_PEEK	0x00005c2c
+#define FTQ_DMA_COMP_DISC_CTL		0x00005c30
+#define FTQ_DMA_COMP_DISC_FULL_CNT	0x00005c34
+#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ	0x00005c38
+#define FTQ_DMA_COMP_DISC_WRITE_PEEK	0x00005c3c
+#define FTQ_SEND_BD_COMP_CTL		0x00005c40
+#define FTQ_SEND_BD_COMP_FULL_CNT	0x00005c44
+#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ	0x00005c48
+#define FTQ_SEND_BD_COMP_WRITE_PEEK	0x00005c4c
+#define FTQ_SEND_DATA_INIT_CTL		0x00005c50
+#define FTQ_SEND_DATA_INIT_FULL_CNT	0x00005c54
+#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ	0x00005c58
+#define FTQ_SEND_DATA_INIT_WRITE_PEEK	0x00005c5c
+#define FTQ_DMA_NORM_WRITE_CTL		0x00005c60
+#define FTQ_DMA_NORM_WRITE_FULL_CNT	0x00005c64
+#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ	0x00005c68
+#define FTQ_DMA_NORM_WRITE_WRITE_PEEK	0x00005c6c
+#define FTQ_DMA_HIGH_WRITE_CTL		0x00005c70
+#define FTQ_DMA_HIGH_WRITE_FULL_CNT	0x00005c74
+#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ	0x00005c78
+#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK	0x00005c7c
+#define FTQ_SWTYPE1_CTL			0x00005c80
+#define FTQ_SWTYPE1_FULL_CNT		0x00005c84
+#define FTQ_SWTYPE1_FIFO_ENQDEQ		0x00005c88
+#define FTQ_SWTYPE1_WRITE_PEEK		0x00005c8c
+#define FTQ_SEND_DATA_COMP_CTL		0x00005c90
+#define FTQ_SEND_DATA_COMP_FULL_CNT	0x00005c94
+#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ	0x00005c98
+#define FTQ_SEND_DATA_COMP_WRITE_PEEK	0x00005c9c
+#define FTQ_HOST_COAL_CTL		0x00005ca0
+#define FTQ_HOST_COAL_FULL_CNT		0x00005ca4
+#define FTQ_HOST_COAL_FIFO_ENQDEQ	0x00005ca8
+#define FTQ_HOST_COAL_WRITE_PEEK	0x00005cac
+#define FTQ_MAC_TX_CTL			0x00005cb0
+#define FTQ_MAC_TX_FULL_CNT		0x00005cb4
+#define FTQ_MAC_TX_FIFO_ENQDEQ		0x00005cb8
+#define FTQ_MAC_TX_WRITE_PEEK		0x00005cbc
+#define FTQ_MB_FREE_CTL			0x00005cc0
+#define FTQ_MB_FREE_FULL_CNT		0x00005cc4
+#define FTQ_MB_FREE_FIFO_ENQDEQ		0x00005cc8
+#define FTQ_MB_FREE_WRITE_PEEK		0x00005ccc
+#define FTQ_RCVBD_COMP_CTL		0x00005cd0
+#define FTQ_RCVBD_COMP_FULL_CNT		0x00005cd4
+#define FTQ_RCVBD_COMP_FIFO_ENQDEQ	0x00005cd8
+#define FTQ_RCVBD_COMP_WRITE_PEEK	0x00005cdc
+#define FTQ_RCVLST_PLMT_CTL		0x00005ce0
+#define FTQ_RCVLST_PLMT_FULL_CNT	0x00005ce4
+#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ	0x00005ce8
+#define FTQ_RCVLST_PLMT_WRITE_PEEK	0x00005cec
+#define FTQ_RCVDATA_INI_CTL		0x00005cf0
+#define FTQ_RCVDATA_INI_FULL_CNT	0x00005cf4
+#define FTQ_RCVDATA_INI_FIFO_ENQDEQ	0x00005cf8
+#define FTQ_RCVDATA_INI_WRITE_PEEK	0x00005cfc
+#define FTQ_RCVDATA_COMP_CTL		0x00005d00
+#define FTQ_RCVDATA_COMP_FULL_CNT	0x00005d04
+#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ	0x00005d08
+#define FTQ_RCVDATA_COMP_WRITE_PEEK	0x00005d0c
+#define FTQ_SWTYPE2_CTL			0x00005d10
+#define FTQ_SWTYPE2_FULL_CNT		0x00005d14
+#define FTQ_SWTYPE2_FIFO_ENQDEQ		0x00005d18
+#define FTQ_SWTYPE2_WRITE_PEEK		0x00005d1c
+/* 0x5d20 --> 0x6000 unused */
+
+/* Message signaled interrupt registers */
+#define MSGINT_MODE			0x00006000
+#define  MSGINT_MODE_RESET		 0x00000001
+#define  MSGINT_MODE_ENABLE		 0x00000002
+#define  MSGINT_MODE_ONE_SHOT_DISABLE	 0x00000020
+#define  MSGINT_MODE_MULTIVEC_EN	 0x00000080
+#define MSGINT_STATUS			0x00006004
+#define  MSGINT_STATUS_MSI_REQ		 0x00000001
+#define MSGINT_FIFO			0x00006008
+/* 0x600c --> 0x6400 unused */
+
+/* DMA completion registers */
+#define DMAC_MODE			0x00006400
+#define  DMAC_MODE_RESET		 0x00000001
+#define  DMAC_MODE_ENABLE		 0x00000002
+/* 0x6404 --> 0x6800 unused */
+
+/* GRC registers */
+#define GRC_MODE			0x00006800
+#define  GRC_MODE_UPD_ON_COAL		0x00000001
+#define  GRC_MODE_BSWAP_NONFRM_DATA	0x00000002
+#define  GRC_MODE_WSWAP_NONFRM_DATA	0x00000004
+#define  GRC_MODE_BSWAP_DATA		0x00000010
+#define  GRC_MODE_WSWAP_DATA		0x00000020
+#define  GRC_MODE_BYTE_SWAP_B2HRX_DATA	0x00000040
+#define  GRC_MODE_WORD_SWAP_B2HRX_DATA	0x00000080
+#define  GRC_MODE_SPLITHDR		0x00000100
+#define  GRC_MODE_NOFRM_CRACKING	0x00000200
+#define  GRC_MODE_INCL_CRC		0x00000400
+#define  GRC_MODE_ALLOW_BAD_FRMS	0x00000800
+#define  GRC_MODE_NOIRQ_ON_SENDS	0x00002000
+#define  GRC_MODE_NOIRQ_ON_RCV		0x00004000
+#define  GRC_MODE_FORCE_PCI32BIT	0x00008000
+#define  GRC_MODE_B2HRX_ENABLE		0x00008000
+#define  GRC_MODE_HOST_STACKUP		0x00010000
+#define  GRC_MODE_HOST_SENDBDS		0x00020000
+#define  GRC_MODE_HTX2B_ENABLE		0x00040000
+#define  GRC_MODE_TIME_SYNC_ENABLE	0x00080000
+#define  GRC_MODE_NO_TX_PHDR_CSUM	0x00100000
+#define  GRC_MODE_NVRAM_WR_ENABLE	0x00200000
+#define  GRC_MODE_PCIE_TL_SEL		0x00000000
+#define  GRC_MODE_PCIE_PL_SEL		0x00400000
+#define  GRC_MODE_NO_RX_PHDR_CSUM	0x00800000
+#define  GRC_MODE_IRQ_ON_TX_CPU_ATTN	0x01000000
+#define  GRC_MODE_IRQ_ON_RX_CPU_ATTN	0x02000000
+#define  GRC_MODE_IRQ_ON_MAC_ATTN	0x04000000
+#define  GRC_MODE_IRQ_ON_DMA_ATTN	0x08000000
+#define  GRC_MODE_IRQ_ON_FLOW_ATTN	0x10000000
+#define  GRC_MODE_4X_NIC_SEND_RINGS	0x20000000
+#define  GRC_MODE_PCIE_DL_SEL		0x20000000
+#define  GRC_MODE_MCAST_FRM_ENABLE	0x40000000
+#define  GRC_MODE_PCIE_HI_1K_EN		0x80000000
+#define  GRC_MODE_PCIE_PORT_MASK	(GRC_MODE_PCIE_TL_SEL | \
+					 GRC_MODE_PCIE_PL_SEL | \
+					 GRC_MODE_PCIE_DL_SEL | \
+					 GRC_MODE_PCIE_HI_1K_EN)
+#define GRC_MISC_CFG			0x00006804
+#define  GRC_MISC_CFG_CORECLK_RESET	0x00000001
+#define  GRC_MISC_CFG_PRESCALAR_MASK	0x000000fe
+#define  GRC_MISC_CFG_PRESCALAR_SHIFT	1
+#define  GRC_MISC_CFG_BOARD_ID_MASK	0x0001e000
+#define  GRC_MISC_CFG_BOARD_ID_5700	0x0001e000
+#define  GRC_MISC_CFG_BOARD_ID_5701	0x00000000
+#define  GRC_MISC_CFG_BOARD_ID_5702FE	0x00004000
+#define  GRC_MISC_CFG_BOARD_ID_5703	0x00000000
+#define  GRC_MISC_CFG_BOARD_ID_5703S	0x00002000
+#define  GRC_MISC_CFG_BOARD_ID_5704	0x00000000
+#define  GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000
+#define  GRC_MISC_CFG_BOARD_ID_5704_A2	0x00008000
+#define  GRC_MISC_CFG_BOARD_ID_5788	0x00010000
+#define  GRC_MISC_CFG_BOARD_ID_5788M	0x00018000
+#define  GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
+#define  GRC_MISC_CFG_EPHY_IDDQ		0x00200000
+#define  GRC_MISC_CFG_KEEP_GPHY_POWER	0x04000000
+#define GRC_LOCAL_CTRL			0x00006808
+#define  GRC_LCLCTRL_INT_ACTIVE		0x00000001
+#define  GRC_LCLCTRL_CLEARINT		0x00000002
+#define  GRC_LCLCTRL_SETINT		0x00000004
+#define  GRC_LCLCTRL_INT_ON_ATTN	0x00000008
+#define  GRC_LCLCTRL_GPIO_UART_SEL	0x00000010	/* 5755 only */
+#define  GRC_LCLCTRL_USE_SIG_DETECT	0x00000010	/* 5714/5780 only */
+#define  GRC_LCLCTRL_USE_EXT_SIG_DETECT	0x00000020	/* 5714/5780 only */
+#define  GRC_LCLCTRL_GPIO_INPUT3	0x00000020
+#define  GRC_LCLCTRL_GPIO_OE3		0x00000040
+#define  GRC_LCLCTRL_GPIO_OUTPUT3	0x00000080
+#define  GRC_LCLCTRL_GPIO_INPUT0	0x00000100
+#define  GRC_LCLCTRL_GPIO_INPUT1	0x00000200
+#define  GRC_LCLCTRL_GPIO_INPUT2	0x00000400
+#define  GRC_LCLCTRL_GPIO_OE0		0x00000800
+#define  GRC_LCLCTRL_GPIO_OE1		0x00001000
+#define  GRC_LCLCTRL_GPIO_OE2		0x00002000
+#define  GRC_LCLCTRL_GPIO_OUTPUT0	0x00004000
+#define  GRC_LCLCTRL_GPIO_OUTPUT1	0x00008000
+#define  GRC_LCLCTRL_GPIO_OUTPUT2	0x00010000
+#define  GRC_LCLCTRL_EXTMEM_ENABLE	0x00020000
+#define  GRC_LCLCTRL_MEMSZ_MASK		0x001c0000
+#define  GRC_LCLCTRL_MEMSZ_256K		0x00000000
+#define  GRC_LCLCTRL_MEMSZ_512K		0x00040000
+#define  GRC_LCLCTRL_MEMSZ_1M		0x00080000
+#define  GRC_LCLCTRL_MEMSZ_2M		0x000c0000
+#define  GRC_LCLCTRL_MEMSZ_4M		0x00100000
+#define  GRC_LCLCTRL_MEMSZ_8M		0x00140000
+#define  GRC_LCLCTRL_MEMSZ_16M		0x00180000
+#define  GRC_LCLCTRL_BANK_SELECT	0x00200000
+#define  GRC_LCLCTRL_SSRAM_TYPE		0x00400000
+#define  GRC_LCLCTRL_AUTO_SEEPROM	0x01000000
+#define GRC_TIMER			0x0000680c
+#define GRC_RX_CPU_EVENT		0x00006810
+#define  GRC_RX_CPU_DRIVER_EVENT	0x00004000
+#define GRC_RX_TIMER_REF		0x00006814
+#define GRC_RX_CPU_SEM			0x00006818
+#define GRC_REMOTE_RX_CPU_ATTN		0x0000681c
+#define GRC_TX_CPU_EVENT		0x00006820
+#define GRC_TX_TIMER_REF		0x00006824
+#define GRC_TX_CPU_SEM			0x00006828
+#define GRC_REMOTE_TX_CPU_ATTN		0x0000682c
+#define GRC_MEM_POWER_UP		0x00006830 /* 64-bit */
+#define GRC_EEPROM_ADDR			0x00006838
+#define  EEPROM_ADDR_WRITE		0x00000000
+#define  EEPROM_ADDR_READ		0x80000000
+#define  EEPROM_ADDR_COMPLETE		0x40000000
+#define  EEPROM_ADDR_FSM_RESET		0x20000000
+#define  EEPROM_ADDR_DEVID_MASK		0x1c000000
+#define  EEPROM_ADDR_DEVID_SHIFT	26
+#define  EEPROM_ADDR_START		0x02000000
+#define  EEPROM_ADDR_CLKPERD_SHIFT	16
+#define  EEPROM_ADDR_ADDR_MASK		0x0000ffff
+#define  EEPROM_ADDR_ADDR_SHIFT		0
+#define  EEPROM_DEFAULT_CLOCK_PERIOD	0x60
+#define  EEPROM_CHIP_SIZE		(64 * 1024)
+#define GRC_EEPROM_DATA			0x0000683c
+#define GRC_EEPROM_CTRL			0x00006840
+#define GRC_MDI_CTRL			0x00006844
+#define GRC_SEEPROM_DELAY		0x00006848
+/* 0x684c --> 0x6890 unused */
+#define GRC_VCPU_EXT_CTRL		0x00006890
+#define GRC_VCPU_EXT_CTRL_HALT_CPU	 0x00400000
+#define GRC_VCPU_EXT_CTRL_DISABLE_WOL	 0x20000000
+#define GRC_FASTBOOT_PC			0x00006894	/* 5752, 5755, 5787 */
+
+#define TG3_EAV_REF_CLCK_LSB		0x00006900
+#define TG3_EAV_REF_CLCK_MSB		0x00006904
+#define TG3_EAV_REF_CLCK_CTL		0x00006908
+#define  TG3_EAV_REF_CLCK_CTL_STOP	 0x00000002
+#define  TG3_EAV_REF_CLCK_CTL_RESUME	 0x00000004
+#define  TG3_EAV_CTL_TSYNC_GPIO_MASK	 (0x3 << 16)
+#define  TG3_EAV_CTL_TSYNC_WDOG0	 (1 << 17)
+
+#define TG3_EAV_WATCHDOG0_LSB		0x00006918
+#define TG3_EAV_WATCHDOG0_MSB		0x0000691c
+#define  TG3_EAV_WATCHDOG0_EN		 (1 << 31)
+#define  TG3_EAV_WATCHDOG_MSB_MASK	0x7fffffff
+
+#define TG3_EAV_REF_CLK_CORRECT_CTL	0x00006928
+#define  TG3_EAV_REF_CLK_CORRECT_EN	 (1 << 31)
+#define  TG3_EAV_REF_CLK_CORRECT_NEG	 (1 << 30)
+
+#define TG3_EAV_REF_CLK_CORRECT_MASK	0xffffff
+
+/* 0x692c --> 0x7000 unused */
+
+/* NVRAM Control registers */
+#define NVRAM_CMD			0x00007000
+#define  NVRAM_CMD_RESET		 0x00000001
+#define  NVRAM_CMD_DONE			 0x00000008
+#define  NVRAM_CMD_GO			 0x00000010
+#define  NVRAM_CMD_WR			 0x00000020
+#define  NVRAM_CMD_RD			 0x00000000
+#define  NVRAM_CMD_ERASE		 0x00000040
+#define  NVRAM_CMD_FIRST		 0x00000080
+#define  NVRAM_CMD_LAST			 0x00000100
+#define  NVRAM_CMD_WREN			 0x00010000
+#define  NVRAM_CMD_WRDI			 0x00020000
+#define NVRAM_STAT			0x00007004
+#define NVRAM_WRDATA			0x00007008
+#define NVRAM_ADDR			0x0000700c
+#define  NVRAM_ADDR_MSK			0x00ffffff
+#define NVRAM_RDDATA			0x00007010
+#define NVRAM_CFG1			0x00007014
+#define  NVRAM_CFG1_FLASHIF_ENAB	 0x00000001
+#define  NVRAM_CFG1_BUFFERED_MODE	 0x00000002
+#define  NVRAM_CFG1_PASS_THRU		 0x00000004
+#define  NVRAM_CFG1_STATUS_BITS		 0x00000070
+#define  NVRAM_CFG1_BIT_BANG		 0x00000008
+#define  NVRAM_CFG1_FLASH_SIZE		 0x02000000
+#define  NVRAM_CFG1_COMPAT_BYPASS	 0x80000000
+#define  NVRAM_CFG1_VENDOR_MASK		 0x03000003
+#define  FLASH_VENDOR_ATMEL_EEPROM	 0x02000000
+#define  FLASH_VENDOR_ATMEL_FLASH_BUFFERED	 0x02000003
+#define  FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED	 0x00000003
+#define  FLASH_VENDOR_ST			 0x03000001
+#define  FLASH_VENDOR_SAIFUN		 0x01000003
+#define  FLASH_VENDOR_SST_SMALL		 0x00000001
+#define  FLASH_VENDOR_SST_LARGE		 0x02000001
+#define  NVRAM_CFG1_5752VENDOR_MASK	 0x03c00003
+#define  NVRAM_CFG1_5762VENDOR_MASK	 0x03e00003
+#define  FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ	 0x00000000
+#define  FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ	 0x02000000
+#define  FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED	 0x02000003
+#define  FLASH_5752VENDOR_ST_M45PE10	 0x02400000
+#define  FLASH_5752VENDOR_ST_M45PE20	 0x02400002
+#define  FLASH_5752VENDOR_ST_M45PE40	 0x02400001
+#define  FLASH_5755VENDOR_ATMEL_FLASH_1	 0x03400001
+#define  FLASH_5755VENDOR_ATMEL_FLASH_2	 0x03400002
+#define  FLASH_5755VENDOR_ATMEL_FLASH_3	 0x03400000
+#define  FLASH_5755VENDOR_ATMEL_FLASH_4	 0x00000003
+#define  FLASH_5755VENDOR_ATMEL_FLASH_5	 0x02000003
+#define  FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ	 0x03c00003
+#define  FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ	 0x03c00002
+#define  FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ	 0x03000003
+#define  FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ	 0x03000002
+#define  FLASH_5787VENDOR_MICRO_EEPROM_64KHZ	 0x03000000
+#define  FLASH_5787VENDOR_MICRO_EEPROM_376KHZ	 0x02000000
+#define  FLASH_5761VENDOR_ATMEL_MDB021D	 0x00800003
+#define  FLASH_5761VENDOR_ATMEL_MDB041D	 0x00800000
+#define  FLASH_5761VENDOR_ATMEL_MDB081D	 0x00800002
+#define  FLASH_5761VENDOR_ATMEL_MDB161D	 0x00800001
+#define  FLASH_5761VENDOR_ATMEL_ADB021D	 0x00000003
+#define  FLASH_5761VENDOR_ATMEL_ADB041D	 0x00000000
+#define  FLASH_5761VENDOR_ATMEL_ADB081D	 0x00000002
+#define  FLASH_5761VENDOR_ATMEL_ADB161D	 0x00000001
+#define  FLASH_5761VENDOR_ST_M_M45PE20	 0x02800001
+#define  FLASH_5761VENDOR_ST_M_M45PE40	 0x02800000
+#define  FLASH_5761VENDOR_ST_M_M45PE80	 0x02800002
+#define  FLASH_5761VENDOR_ST_M_M45PE16	 0x02800003
+#define  FLASH_5761VENDOR_ST_A_M45PE20	 0x02000001
+#define  FLASH_5761VENDOR_ST_A_M45PE40	 0x02000000
+#define  FLASH_5761VENDOR_ST_A_M45PE80	 0x02000002
+#define  FLASH_5761VENDOR_ST_A_M45PE16	 0x02000003
+#define  FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000
+#define  FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000
+#define  FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002
+#define  FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002
+#define  FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001
+#define  FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001
+#define  FLASH_5717VENDOR_ATMEL_EEPROM	 0x02000001
+#define  FLASH_5717VENDOR_MICRO_EEPROM	 0x02000003
+#define  FLASH_5717VENDOR_ATMEL_MDB011D	 0x01000001
+#define  FLASH_5717VENDOR_ATMEL_MDB021D	 0x01000003
+#define  FLASH_5717VENDOR_ST_M_M25PE10	 0x02000000
+#define  FLASH_5717VENDOR_ST_M_M25PE20	 0x02000002
+#define  FLASH_5717VENDOR_ST_M_M45PE10	 0x00000001
+#define  FLASH_5717VENDOR_ST_M_M45PE20	 0x00000003
+#define  FLASH_5717VENDOR_ATMEL_ADB011B	 0x01400000
+#define  FLASH_5717VENDOR_ATMEL_ADB021B	 0x01400002
+#define  FLASH_5717VENDOR_ATMEL_ADB011D	 0x01400001
+#define  FLASH_5717VENDOR_ATMEL_ADB021D	 0x01400003
+#define  FLASH_5717VENDOR_ST_A_M25PE10	 0x02400000
+#define  FLASH_5717VENDOR_ST_A_M25PE20	 0x02400002
+#define  FLASH_5717VENDOR_ST_A_M45PE10	 0x02400001
+#define  FLASH_5717VENDOR_ST_A_M45PE20	 0x02400003
+#define  FLASH_5717VENDOR_ATMEL_45USPT	 0x03400000
+#define  FLASH_5717VENDOR_ST_25USPT	 0x03400002
+#define  FLASH_5717VENDOR_ST_45USPT	 0x03400001
+#define  FLASH_5720_EEPROM_HD		 0x00000001
+#define  FLASH_5720_EEPROM_LD		 0x00000003
+#define  FLASH_5762_EEPROM_HD		 0x02000001
+#define  FLASH_5762_EEPROM_LD		 0x02000003
+#define  FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
+#define  FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
+#define  FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
+#define  FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003
+#define  FLASH_5720VENDOR_M_ST_M25PE10	 0x02000000
+#define  FLASH_5720VENDOR_M_ST_M25PE20	 0x02000002
+#define  FLASH_5720VENDOR_M_ST_M25PE40	 0x02000001
+#define  FLASH_5720VENDOR_M_ST_M25PE80	 0x02000003
+#define  FLASH_5720VENDOR_M_ST_M45PE10	 0x03000000
+#define  FLASH_5720VENDOR_M_ST_M45PE20	 0x03000002
+#define  FLASH_5720VENDOR_M_ST_M45PE40	 0x03000001
+#define  FLASH_5720VENDOR_M_ST_M45PE80	 0x03000003
+#define  FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000
+#define  FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002
+#define  FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001
+#define  FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000
+#define  FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002
+#define  FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001
+#define  FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003
+#define  FLASH_5720VENDOR_A_ST_M25PE10	 0x02800000
+#define  FLASH_5720VENDOR_A_ST_M25PE20	 0x02800002
+#define  FLASH_5720VENDOR_A_ST_M25PE40	 0x02800001
+#define  FLASH_5720VENDOR_A_ST_M25PE80	 0x02800003
+#define  FLASH_5720VENDOR_A_ST_M45PE10	 0x02c00000
+#define  FLASH_5720VENDOR_A_ST_M45PE20	 0x02c00002
+#define  FLASH_5720VENDOR_A_ST_M45PE40	 0x02c00001
+#define  FLASH_5720VENDOR_A_ST_M45PE80	 0x02c00003
+#define  FLASH_5720VENDOR_ATMEL_45USPT	 0x03c00000
+#define  FLASH_5720VENDOR_ST_25USPT	 0x03c00002
+#define  FLASH_5720VENDOR_ST_45USPT	 0x03c00001
+#define  NVRAM_CFG1_5752PAGE_SIZE_MASK	 0x70000000
+#define  FLASH_5752PAGE_SIZE_256	 0x00000000
+#define  FLASH_5752PAGE_SIZE_512	 0x10000000
+#define  FLASH_5752PAGE_SIZE_1K		 0x20000000
+#define  FLASH_5752PAGE_SIZE_2K		 0x30000000
+#define  FLASH_5752PAGE_SIZE_4K		 0x40000000
+#define  FLASH_5752PAGE_SIZE_264	 0x50000000
+#define  FLASH_5752PAGE_SIZE_528	 0x60000000
+#define NVRAM_CFG2			0x00007018
+#define NVRAM_CFG3			0x0000701c
+#define NVRAM_SWARB			0x00007020
+#define  SWARB_REQ_SET0			 0x00000001
+#define  SWARB_REQ_SET1			 0x00000002
+#define  SWARB_REQ_SET2			 0x00000004
+#define  SWARB_REQ_SET3			 0x00000008
+#define  SWARB_REQ_CLR0			 0x00000010
+#define  SWARB_REQ_CLR1			 0x00000020
+#define  SWARB_REQ_CLR2			 0x00000040
+#define  SWARB_REQ_CLR3			 0x00000080
+#define  SWARB_GNT0			 0x00000100
+#define  SWARB_GNT1			 0x00000200
+#define  SWARB_GNT2			 0x00000400
+#define  SWARB_GNT3			 0x00000800
+#define  SWARB_REQ0			 0x00001000
+#define  SWARB_REQ1			 0x00002000
+#define  SWARB_REQ2			 0x00004000
+#define  SWARB_REQ3			 0x00008000
+#define NVRAM_ACCESS			0x00007024
+#define  ACCESS_ENABLE			 0x00000001
+#define  ACCESS_WR_ENABLE		 0x00000002
+#define NVRAM_WRITE1			0x00007028
+/* 0x702c unused */
+
+#define NVRAM_ADDR_LOCKOUT		0x00007030
+/* 0x7034 --> 0x7500 unused */
+
+#define OTP_MODE			0x00007500
+#define OTP_MODE_OTP_THRU_GRC		 0x00000001
+#define OTP_CTRL			0x00007504
+#define OTP_CTRL_OTP_PROG_ENABLE	 0x00200000
+#define OTP_CTRL_OTP_CMD_READ		 0x00000000
+#define OTP_CTRL_OTP_CMD_INIT		 0x00000008
+#define OTP_CTRL_OTP_CMD_START		 0x00000001
+#define OTP_STATUS			0x00007508
+#define OTP_STATUS_CMD_DONE		 0x00000001
+#define OTP_ADDRESS			0x0000750c
+#define OTP_ADDRESS_MAGIC1		 0x000000a0
+#define OTP_ADDRESS_MAGIC2		 0x00000080
+/* 0x7510 unused */
+
+#define OTP_READ_DATA			0x00007514
+/* 0x7518 --> 0x7c04 unused */
+
+#define PCIE_TRANSACTION_CFG		0x00007c04
+#define PCIE_TRANS_CFG_1SHOT_MSI	 0x20000000
+#define PCIE_TRANS_CFG_LOM		 0x00000020
+/* 0x7c08 --> 0x7d28 unused */
+
+#define PCIE_PWR_MGMT_THRESH		0x00007d28
+#define PCIE_PWR_MGMT_L1_THRESH_MSK	 0x0000ff00
+#define PCIE_PWR_MGMT_L1_THRESH_4MS	 0x0000ff00
+#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN	 0x01000000
+/* 0x7d2c --> 0x7d54 unused */
+
+#define TG3_PCIE_LNKCTL			0x00007d54
+#define  TG3_PCIE_LNKCTL_L1_PLL_PD_EN	 0x00000008
+#define  TG3_PCIE_LNKCTL_L1_PLL_PD_DIS	 0x00000080
+/* 0x7d58 --> 0x7e70 unused */
+
+#define TG3_PCIE_PHY_TSTCTL		0x00007e2c
+#define  TG3_PCIE_PHY_TSTCTL_PCIE10	 0x00000040
+#define  TG3_PCIE_PHY_TSTCTL_PSCRAM	 0x00000020
+
+#define TG3_PCIE_EIDLE_DELAY		0x00007e70
+#define  TG3_PCIE_EIDLE_DELAY_MASK	 0x0000001f
+#define  TG3_PCIE_EIDLE_DELAY_13_CLKS	 0x0000000c
+/* 0x7e74 --> 0x8000 unused */
+
+
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT		0x00007c00
+#define TG3_PCIE_DL_LO_FTSMAX		0x0000000c
+#define TG3_PCIE_DL_LO_FTSMAX_MSK	0x000000ff
+#define TG3_PCIE_DL_LO_FTSMAX_VAL	0x0000002c
+#define TG3_PCIE_PL_LO_PHYCTL1		 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN	  0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5		 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ	  0x80000000
+
+#define TG3_REG_BLK_SIZE		0x00008000
+
+/* OTP bit definitions */
+#define TG3_OTP_AGCTGT_MASK		0x000000e0
+#define TG3_OTP_AGCTGT_SHIFT		1
+#define TG3_OTP_HPFFLTR_MASK		0x00000300
+#define TG3_OTP_HPFFLTR_SHIFT		1
+#define TG3_OTP_HPFOVER_MASK		0x00000400
+#define TG3_OTP_HPFOVER_SHIFT		1
+#define TG3_OTP_LPFDIS_MASK		0x00000800
+#define TG3_OTP_LPFDIS_SHIFT		11
+#define TG3_OTP_VDAC_MASK		0xff000000
+#define TG3_OTP_VDAC_SHIFT		24
+#define TG3_OTP_10BTAMP_MASK		0x0000f000
+#define TG3_OTP_10BTAMP_SHIFT		8
+#define TG3_OTP_ROFF_MASK		0x00e00000
+#define TG3_OTP_ROFF_SHIFT		11
+#define TG3_OTP_RCOFF_MASK		0x001c0000
+#define TG3_OTP_RCOFF_SHIFT		16
+
+#define TG3_OTP_DEFAULT			0x286c1640
+
+
+/* Hardware Legacy NVRAM layout */
+#define TG3_NVM_VPD_OFF			0x100
+#define TG3_NVM_VPD_LEN			256
+
+/* Hardware Selfboot NVRAM layout */
+#define TG3_NVM_HWSB_CFG1		0x00000004
+#define  TG3_NVM_HWSB_CFG1_MAJMSK	0xf8000000
+#define  TG3_NVM_HWSB_CFG1_MAJSFT	27
+#define  TG3_NVM_HWSB_CFG1_MINMSK	0x07c00000
+#define  TG3_NVM_HWSB_CFG1_MINSFT	22
+
+#define TG3_EEPROM_MAGIC		0x669955aa
+#define TG3_EEPROM_MAGIC_FW		0xa5000000
+#define TG3_EEPROM_MAGIC_FW_MSK		0xff000000
+#define TG3_EEPROM_SB_FORMAT_MASK	0x00e00000
+#define TG3_EEPROM_SB_FORMAT_1		0x00200000
+#define TG3_EEPROM_SB_REVISION_MASK	0x001f0000
+#define TG3_EEPROM_SB_REVISION_0	0x00000000
+#define TG3_EEPROM_SB_REVISION_2	0x00020000
+#define TG3_EEPROM_SB_REVISION_3	0x00030000
+#define TG3_EEPROM_SB_REVISION_4	0x00040000
+#define TG3_EEPROM_SB_REVISION_5	0x00050000
+#define TG3_EEPROM_SB_REVISION_6	0x00060000
+#define TG3_EEPROM_MAGIC_HW		0xabcd
+#define TG3_EEPROM_MAGIC_HW_MSK		0xffff
+
+#define TG3_NVM_DIR_START		0x18
+#define TG3_NVM_DIR_END			0x78
+#define TG3_NVM_DIRENT_SIZE		0xc
+#define TG3_NVM_DIRTYPE_SHIFT		24
+#define TG3_NVM_DIRTYPE_LENMSK		0x003fffff
+#define TG3_NVM_DIRTYPE_ASFINI		1
+#define TG3_NVM_DIRTYPE_EXTVPD		20
+#define TG3_NVM_PTREV_BCVER		0x94
+#define TG3_NVM_BCVER_MAJMSK		0x0000ff00
+#define TG3_NVM_BCVER_MAJSFT		8
+#define TG3_NVM_BCVER_MINMSK		0x000000ff
+
+#define TG3_EEPROM_SB_F1R0_EDH_OFF	0x10
+#define TG3_EEPROM_SB_F1R2_EDH_OFF	0x14
+#define TG3_EEPROM_SB_F1R2_MBA_OFF	0x10
+#define TG3_EEPROM_SB_F1R3_EDH_OFF	0x18
+#define TG3_EEPROM_SB_F1R4_EDH_OFF	0x1c
+#define TG3_EEPROM_SB_F1R5_EDH_OFF	0x20
+#define TG3_EEPROM_SB_F1R6_EDH_OFF	0x4c
+#define TG3_EEPROM_SB_EDH_MAJ_MASK	0x00000700
+#define TG3_EEPROM_SB_EDH_MAJ_SHFT	8
+#define TG3_EEPROM_SB_EDH_MIN_MASK	0x000000ff
+#define TG3_EEPROM_SB_EDH_BLD_MASK	0x0000f800
+#define TG3_EEPROM_SB_EDH_BLD_SHFT	11
+
+
+/* 32K Window into NIC internal memory */
+#define NIC_SRAM_WIN_BASE		0x00008000
+
+/* Offsets into first 32k of NIC internal memory. */
+#define NIC_SRAM_PAGE_ZERO		0x00000000
+#define NIC_SRAM_SEND_RCB		0x00000100 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_RCV_RET_RCB		0x00000200 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_STATS_BLK		0x00000300
+#define NIC_SRAM_STATUS_BLK		0x00000b00
+
+#define NIC_SRAM_FIRMWARE_MBOX		0x00000b50
+#define  NIC_SRAM_FIRMWARE_MBOX_MAGIC1	 0x4B657654
+#define  NIC_SRAM_FIRMWARE_MBOX_MAGIC2	 0x4861764b /* !dma on linkchg */
+
+#define NIC_SRAM_DATA_SIG		0x00000b54
+#define  NIC_SRAM_DATA_SIG_MAGIC	 0x4b657654 /* ascii for 'KevT' */
+
+#define NIC_SRAM_DATA_CFG			0x00000b58
+#define  NIC_SRAM_DATA_CFG_LED_MODE_MASK	 0x0000000c
+#define  NIC_SRAM_DATA_CFG_LED_MODE_MAC		 0x00000000
+#define  NIC_SRAM_DATA_CFG_LED_MODE_PHY_1	 0x00000004
+#define  NIC_SRAM_DATA_CFG_LED_MODE_PHY_2	 0x00000008
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_MASK	 0x00000030
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN	 0x00000000
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER	 0x00000010
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER	 0x00000020
+#define  NIC_SRAM_DATA_CFG_WOL_ENABLE		 0x00000040
+#define  NIC_SRAM_DATA_CFG_ASF_ENABLE		 0x00000080
+#define  NIC_SRAM_DATA_CFG_EEPROM_WP		 0x00000100
+#define  NIC_SRAM_DATA_CFG_MINI_PCI		 0x00001000
+#define  NIC_SRAM_DATA_CFG_FIBER_WOL		 0x00004000
+#define  NIC_SRAM_DATA_CFG_NO_GPIO2		 0x00100000
+#define  NIC_SRAM_DATA_CFG_APE_ENABLE		 0x00200000
+
+#define NIC_SRAM_DATA_VER			0x00000b5c
+#define  NIC_SRAM_DATA_VER_SHIFT		 16
+
+#define NIC_SRAM_DATA_PHY_ID		0x00000b74
+#define  NIC_SRAM_DATA_PHY_ID1_MASK	 0xffff0000
+#define  NIC_SRAM_DATA_PHY_ID2_MASK	 0x0000ffff
+
+#define NIC_SRAM_FW_CMD_MBOX		0x00000b78
+#define  FWCMD_NICDRV_ALIVE		 0x00000001
+#define  FWCMD_NICDRV_PAUSE_FW		 0x00000002
+#define  FWCMD_NICDRV_IPV4ADDR_CHG	 0x00000003
+#define  FWCMD_NICDRV_IPV6ADDR_CHG	 0x00000004
+#define  FWCMD_NICDRV_FIX_DMAR		 0x00000005
+#define  FWCMD_NICDRV_FIX_DMAW		 0x00000006
+#define  FWCMD_NICDRV_LINK_UPDATE	 0x0000000c
+#define  FWCMD_NICDRV_ALIVE2		 0x0000000d
+#define  FWCMD_NICDRV_ALIVE3		 0x0000000e
+#define NIC_SRAM_FW_CMD_LEN_MBOX	0x00000b7c
+#define NIC_SRAM_FW_CMD_DATA_MBOX	0x00000b80
+#define NIC_SRAM_FW_ASF_STATUS_MBOX	0x00000c00
+#define NIC_SRAM_FW_DRV_STATE_MBOX	0x00000c04
+#define  DRV_STATE_START		 0x00000001
+#define  DRV_STATE_START_DONE		 0x80000001
+#define  DRV_STATE_UNLOAD		 0x00000002
+#define  DRV_STATE_UNLOAD_DONE		 0x80000002
+#define  DRV_STATE_WOL			 0x00000003
+#define  DRV_STATE_SUSPEND		 0x00000004
+
+#define NIC_SRAM_FW_RESET_TYPE_MBOX	0x00000c08
+
+#define NIC_SRAM_MAC_ADDR_HIGH_MBOX	0x00000c14
+#define NIC_SRAM_MAC_ADDR_LOW_MBOX	0x00000c18
+
+#define NIC_SRAM_WOL_MBOX		0x00000d30
+#define  WOL_SIGNATURE			 0x474c0000
+#define  WOL_DRV_STATE_SHUTDOWN		 0x00000001
+#define  WOL_DRV_WOL			 0x00000002
+#define  WOL_SET_MAGIC_PKT		 0x00000004
+
+#define NIC_SRAM_DATA_CFG_2		0x00000d38
+
+#define  NIC_SRAM_DATA_CFG_2_APD_EN	 0x00004000
+#define  SHASTA_EXT_LED_MODE_MASK	 0x00018000
+#define  SHASTA_EXT_LED_LEGACY		 0x00000000
+#define  SHASTA_EXT_LED_SHARED		 0x00008000
+#define  SHASTA_EXT_LED_MAC		 0x00010000
+#define  SHASTA_EXT_LED_COMBO		 0x00018000
+
+#define NIC_SRAM_DATA_CFG_3		0x00000d3c
+#define  NIC_SRAM_ASPM_DEBOUNCE		 0x00000002
+#define  NIC_SRAM_LNK_FLAP_AVOID	 0x00400000
+#define  NIC_SRAM_1G_ON_VAUX_OK		 0x00800000
+
+#define NIC_SRAM_DATA_CFG_4		0x00000d60
+#define  NIC_SRAM_GMII_MODE		 0x00000002
+#define  NIC_SRAM_RGMII_INBAND_DISABLE	 0x00000004
+#define  NIC_SRAM_RGMII_EXT_IBND_RX_EN	 0x00000008
+#define  NIC_SRAM_RGMII_EXT_IBND_TX_EN	 0x00000010
+
+#define NIC_SRAM_CPMU_STATUS		0x00000e00
+#define  NIC_SRAM_CPMUSTAT_SIG		0x0000362c
+#define  NIC_SRAM_CPMUSTAT_SIG_MSK	0x0000ffff
+
+#define NIC_SRAM_DATA_CFG_5		0x00000e0c
+#define  NIC_SRAM_DISABLE_1G_HALF_ADV	0x00000002
+
+#define NIC_SRAM_RX_MINI_BUFFER_DESC	0x00001000
+
+#define NIC_SRAM_DMA_DESC_POOL_BASE	0x00002000
+#define  NIC_SRAM_DMA_DESC_POOL_SIZE	 0x00002000
+#define NIC_SRAM_TX_BUFFER_DESC		0x00004000 /* 512 entries */
+#define NIC_SRAM_RX_BUFFER_DESC		0x00006000 /* 256 entries */
+#define NIC_SRAM_RX_JUMBO_BUFFER_DESC	0x00007000 /* 256 entries */
+#define NIC_SRAM_MBUF_POOL_BASE		0x00008000
+#define  NIC_SRAM_MBUF_POOL_SIZE96	 0x00018000
+#define  NIC_SRAM_MBUF_POOL_SIZE64	 0x00010000
+#define  NIC_SRAM_MBUF_POOL_BASE5705	0x00010000
+#define  NIC_SRAM_MBUF_POOL_SIZE5705	0x0000e000
+
+#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766	0x00030000
+#define  TG3_SRAM_RXCPU_SCRATCH_SIZE_57766	 0x00010000
+#define TG3_57766_FW_BASE_ADDR			0x00030000
+#define TG3_57766_FW_HANDSHAKE			0x0003fccc
+#define TG3_SBROM_IN_SERVICE_LOOP		0x51
+
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700	128
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755	64
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906	32
+
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700	64
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717	16
+
+
+/* Currently this is fixed. */
+#define TG3_PHY_MII_ADDR		0x01
+
+
+/*** Tigon3 specific PHY MII registers. ***/
+#define MII_TG3_MMD_CTRL		0x0d /* MMD Access Control register */
+#define MII_TG3_MMD_CTRL_DATA_NOINC	0x4000
+#define MII_TG3_MMD_ADDRESS		0x0e /* MMD Address Data register */
+
+#define MII_TG3_EXT_CTRL		0x10 /* Extended control register */
+#define  MII_TG3_EXT_CTRL_FIFO_ELASTIC	0x0001
+#define  MII_TG3_EXT_CTRL_LNK3_LED_MODE	0x0002
+#define  MII_TG3_EXT_CTRL_FORCE_LED_OFF	0x0008
+#define  MII_TG3_EXT_CTRL_TBI		0x8000
+
+#define MII_TG3_EXT_STAT		0x11 /* Extended status register */
+#define  MII_TG3_EXT_STAT_MDIX		0x2000
+#define  MII_TG3_EXT_STAT_LPASS		0x0100
+
+#define MII_TG3_RXR_COUNTERS		0x14 /* Local/Remote Receiver Counts */
+#define MII_TG3_DSP_RW_PORT		0x15 /* DSP coefficient read/write port */
+#define MII_TG3_DSP_CONTROL		0x16 /* DSP control register */
+#define MII_TG3_DSP_ADDRESS		0x17 /* DSP address register */
+
+#define MII_TG3_DSP_TAP1		0x0001
+#define  MII_TG3_DSP_TAP1_AGCTGT_DFLT	0x0007
+#define MII_TG3_DSP_TAP26		0x001a
+#define  MII_TG3_DSP_TAP26_ALNOKO	0x0001
+#define  MII_TG3_DSP_TAP26_RMRXSTO	0x0002
+#define  MII_TG3_DSP_TAP26_OPCSINPT	0x0004
+#define MII_TG3_DSP_AADJ1CH0		0x001f
+#define MII_TG3_DSP_CH34TP2		0x4022
+#define MII_TG3_DSP_CH34TP2_HIBW01	0x01ff
+#define MII_TG3_DSP_AADJ1CH3		0x601f
+#define  MII_TG3_DSP_AADJ1CH3_ADCCKADJ	0x0002
+#define MII_TG3_DSP_EXP1_INT_STAT	0x0f01
+#define MII_TG3_DSP_EXP8		0x0f08
+#define  MII_TG3_DSP_EXP8_REJ2MHz	0x0001
+#define  MII_TG3_DSP_EXP8_AEDW		0x0200
+#define MII_TG3_DSP_EXP75		0x0f75
+#define MII_TG3_DSP_EXP96		0x0f96
+#define MII_TG3_DSP_EXP97		0x0f97
+
+#define MII_TG3_AUX_CTRL		0x18 /* auxiliary control register */
+
+#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL	0x0000
+#define MII_TG3_AUXCTL_ACTL_TX_6DB	0x0400
+#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA	0x0800
+#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN	0x4000
+#define MII_TG3_AUXCTL_ACTL_EXTLOOPBK	0x8000
+
+#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL	0x0002
+#define MII_TG3_AUXCTL_PCTL_WOL_EN	0x0008
+#define MII_TG3_AUXCTL_PCTL_100TX_LPWR	0x0010
+#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE	0x0020
+#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC	0x0040
+#define MII_TG3_AUXCTL_PCTL_VREG_11V	0x0180
+
+#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST	0x0004
+
+#define MII_TG3_AUXCTL_SHDWSEL_MISC	0x0007
+#define MII_TG3_AUXCTL_MISC_WIRESPD_EN	0x0010
+#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX	0x0200
+#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT	12
+#define MII_TG3_AUXCTL_MISC_WREN	0x8000
+
+
+#define MII_TG3_AUX_STAT		0x19 /* auxiliary status register */
+#define MII_TG3_AUX_STAT_LPASS		0x0004
+#define MII_TG3_AUX_STAT_SPDMASK	0x0700
+#define MII_TG3_AUX_STAT_10HALF		0x0100
+#define MII_TG3_AUX_STAT_10FULL		0x0200
+#define MII_TG3_AUX_STAT_100HALF	0x0300
+#define MII_TG3_AUX_STAT_100_4		0x0400
+#define MII_TG3_AUX_STAT_100FULL	0x0500
+#define MII_TG3_AUX_STAT_1000HALF	0x0600
+#define MII_TG3_AUX_STAT_1000FULL	0x0700
+#define MII_TG3_AUX_STAT_100		0x0008
+#define MII_TG3_AUX_STAT_FULL		0x0001
+
+#define MII_TG3_ISTAT			0x1a /* IRQ status register */
+#define MII_TG3_IMASK			0x1b /* IRQ mask register */
+
+/* ISTAT/IMASK event bits */
+#define MII_TG3_INT_LINKCHG		0x0002
+#define MII_TG3_INT_SPEEDCHG		0x0004
+#define MII_TG3_INT_DUPLEXCHG		0x0008
+#define MII_TG3_INT_ANEG_PAGE_RX	0x0400
+
+#define MII_TG3_MISC_SHDW		0x1c
+#define MII_TG3_MISC_SHDW_WREN		0x8000
+
+#define MII_TG3_MISC_SHDW_APD_WKTM_84MS	0x0001
+#define MII_TG3_MISC_SHDW_APD_ENABLE	0x0020
+#define MII_TG3_MISC_SHDW_APD_SEL	0x2800
+
+#define MII_TG3_MISC_SHDW_SCR5_C125OE	0x0001
+#define MII_TG3_MISC_SHDW_SCR5_DLLAPD	0x0002
+#define MII_TG3_MISC_SHDW_SCR5_SDTL	0x0004
+#define MII_TG3_MISC_SHDW_SCR5_DLPTLM	0x0008
+#define MII_TG3_MISC_SHDW_SCR5_LPED	0x0010
+#define MII_TG3_MISC_SHDW_SCR5_SEL	0x1400
+
+#define MII_TG3_TEST1			0x1e
+#define MII_TG3_TEST1_TRIM_EN		0x0010
+#define MII_TG3_TEST1_CRC_EN		0x8000
+
+/* Clause 45 expansion registers */
+#define TG3_CL45_D7_EEERES_STAT		0x803e
+#define TG3_CL45_D7_EEERES_STAT_LP_100TX	0x0002
+#define TG3_CL45_D7_EEERES_STAT_LP_1000T	0x0004
+
+
+/* Fast Ethernet Tranceiver definitions */
+#define MII_TG3_FET_PTEST		0x17
+#define  MII_TG3_FET_PTEST_TRIM_SEL	0x0010
+#define  MII_TG3_FET_PTEST_TRIM_2	0x0002
+#define  MII_TG3_FET_PTEST_FRC_TX_LINK	0x1000
+#define  MII_TG3_FET_PTEST_FRC_TX_LOCK	0x0800
+
+#define MII_TG3_FET_GEN_STAT		0x1c
+#define  MII_TG3_FET_GEN_STAT_MDIXSTAT	0x2000
+
+#define MII_TG3_FET_TEST		0x1f
+#define  MII_TG3_FET_SHADOW_EN		0x0080
+
+#define MII_TG3_FET_SHDW_MISCCTRL	0x10
+#define  MII_TG3_FET_SHDW_MISCCTRL_MDIX	0x4000
+
+#define MII_TG3_FET_SHDW_AUXMODE4	0x1a
+#define MII_TG3_FET_SHDW_AUXMODE4_SBPD	0x0008
+
+#define MII_TG3_FET_SHDW_AUXSTAT2	0x1b
+#define  MII_TG3_FET_SHDW_AUXSTAT2_APD	0x0020
+
+/* Serdes PHY Register Definitions */
+#define SERDES_TG3_1000X_STATUS		0x14
+#define  SERDES_TG3_SGMII_MODE		 0x0001
+#define  SERDES_TG3_LINK_UP		 0x0002
+#define  SERDES_TG3_FULL_DUPLEX		 0x0004
+#define  SERDES_TG3_SPEED_100		 0x0008
+#define  SERDES_TG3_SPEED_1000		 0x0010
+
+/* APE registers.  Accessible through BAR1 */
+#define TG3_APE_GPIO_MSG		0x0008
+#define TG3_APE_GPIO_MSG_SHIFT		4
+#define TG3_APE_EVENT			0x000c
+#define  APE_EVENT_1			 0x00000001
+#define TG3_APE_LOCK_REQ		0x002c
+#define  APE_LOCK_REQ_DRIVER		 0x00001000
+#define TG3_APE_LOCK_GRANT		0x004c
+#define  APE_LOCK_GRANT_DRIVER		 0x00001000
+#define TG3_APE_OTP_CTRL		0x00e8
+#define  APE_OTP_CTRL_PROG_EN		 0x200000
+#define  APE_OTP_CTRL_CMD_RD		 0x000000
+#define  APE_OTP_CTRL_START		 0x000001
+#define TG3_APE_OTP_STATUS		0x00ec
+#define  APE_OTP_STATUS_CMD_DONE	 0x000001
+#define TG3_APE_OTP_ADDR		0x00f0
+#define  APE_OTP_ADDR_CPU_ENABLE	 0x80000000
+#define TG3_APE_OTP_RD_DATA		0x00f8
+
+#define OTP_ADDRESS_MAGIC0		 0x00000050
+#define TG3_OTP_MAGIC0_VALID(val)		\
+	((((val) & 0xf0000000) == 0xa0000000) ||\
+	 (((val) & 0x0f000000) == 0x0a000000))
+
+/* APE shared memory.  Accessible through BAR1 */
+#define TG3_APE_SHMEM_BASE		0x4000
+#define TG3_APE_SEG_SIG			0x4000
+#define  APE_SEG_SIG_MAGIC		 0x41504521
+#define TG3_APE_FW_STATUS		0x400c
+#define  APE_FW_STATUS_READY		 0x00000100
+#define TG3_APE_FW_FEATURES		0x4010
+#define  TG3_APE_FW_FEATURE_NCSI	 0x00000002
+#define TG3_APE_FW_VERSION		0x4018
+#define  APE_FW_VERSION_MAJMSK		 0xff000000
+#define  APE_FW_VERSION_MAJSFT		 24
+#define  APE_FW_VERSION_MINMSK		 0x00ff0000
+#define  APE_FW_VERSION_MINSFT		 16
+#define  APE_FW_VERSION_REVMSK		 0x0000ff00
+#define  APE_FW_VERSION_REVSFT		 8
+#define  APE_FW_VERSION_BLDMSK		 0x000000ff
+#define TG3_APE_SEG_MSG_BUF_OFF		0x401c
+#define TG3_APE_SEG_MSG_BUF_LEN		0x4020
+#define TG3_APE_HOST_SEG_SIG		0x4200
+#define  APE_HOST_SEG_SIG_MAGIC		 0x484f5354
+#define TG3_APE_HOST_SEG_LEN		0x4204
+#define  APE_HOST_SEG_LEN_MAGIC		 0x00000020
+#define TG3_APE_HOST_INIT_COUNT		0x4208
+#define TG3_APE_HOST_DRIVER_ID		0x420c
+#define  APE_HOST_DRIVER_ID_LINUX	 0xf0000000
+#define  APE_HOST_DRIVER_ID_MAGIC(maj, min)	\
+	(APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8)
+#define TG3_APE_HOST_BEHAVIOR		0x4210
+#define  APE_HOST_BEHAV_NO_PHYLOCK	 0x00000001
+#define TG3_APE_HOST_HEARTBEAT_INT_MS	0x4214
+#define  APE_HOST_HEARTBEAT_INT_DISABLE	 0
+#define  APE_HOST_HEARTBEAT_INT_5SEC	 5000
+#define TG3_APE_HOST_HEARTBEAT_COUNT	0x4218
+#define TG3_APE_HOST_DRVR_STATE		0x421c
+#define TG3_APE_HOST_DRVR_STATE_START	 0x00000001
+#define TG3_APE_HOST_DRVR_STATE_UNLOAD	 0x00000002
+#define TG3_APE_HOST_DRVR_STATE_WOL	 0x00000003
+#define TG3_APE_HOST_WOL_SPEED		0x4224
+#define TG3_APE_HOST_WOL_SPEED_AUTO	 0x00008000
+
+#define TG3_APE_EVENT_STATUS		0x4300
+
+#define  APE_EVENT_STATUS_DRIVER_EVNT	 0x00000010
+#define  APE_EVENT_STATUS_STATE_CHNGE	 0x00000500
+#define  APE_EVENT_STATUS_SCRTCHPD_READ	 0x00001600
+#define  APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700
+#define  APE_EVENT_STATUS_STATE_START	 0x00010000
+#define  APE_EVENT_STATUS_STATE_UNLOAD	 0x00020000
+#define  APE_EVENT_STATUS_STATE_WOL	 0x00030000
+#define  APE_EVENT_STATUS_STATE_SUSPEND	 0x00040000
+#define  APE_EVENT_STATUS_EVENT_PENDING	 0x80000000
+
+#define TG3_APE_PER_LOCK_REQ		0x8400
+#define  APE_LOCK_PER_REQ_DRIVER	 0x00001000
+#define TG3_APE_PER_LOCK_GRANT		0x8420
+#define  APE_PER_LOCK_GRANT_DRIVER	 0x00001000
+
+/* APE convenience enumerations. */
+#define TG3_APE_LOCK_PHY0		0
+#define TG3_APE_LOCK_GRC		1
+#define TG3_APE_LOCK_PHY1		2
+#define TG3_APE_LOCK_PHY2		3
+#define TG3_APE_LOCK_MEM		4
+#define TG3_APE_LOCK_PHY3		5
+#define TG3_APE_LOCK_GPIO		7
+
+#define TG3_EEPROM_SB_F1R2_MBA_OFF	0x10
+
+
+/* There are two ways to manage the TX descriptors on the tigon3.
+ * Either the descriptors are in host DMA'able memory, or they
+ * exist only in the cards on-chip SRAM.  All 16 send bds are under
+ * the same mode, they may not be configured individually.
+ *
+ * This driver always uses host memory TX descriptors.
+ *
+ * To use host memory TX descriptors:
+ *	1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register.
+ *	   Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear.
+ *	2) Allocate DMA'able memory.
+ *	3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ *	   a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory
+ *	      obtained in step 2
+ *	   b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC.
+ *	   c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number
+ *            of TX descriptors.  Leave flags field clear.
+ *	4) Access TX descriptors via host memory.  The chip
+ *	   will refetch into local SRAM as needed when producer
+ *	   index mailboxes are updated.
+ *
+ * To use on-chip TX descriptors:
+ *	1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register.
+ *	   Make sure GRC_MODE_HOST_SENDBDS is clear.
+ *	2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ *	   a) Set TG3_BDINFO_HOST_ADDR to zero.
+ *	   b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC
+ *	   c) TG3_BDINFO_MAXLEN_FLAGS is don't care.
+ *	3) Access TX descriptors directly in on-chip SRAM
+ *	   using normal {read,write}l().  (and not using
+ *         pointer dereferencing of ioremap()'d memory like
+ *	   the broken Broadcom driver does)
+ *
+ * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of
+ * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices.
+ */
+struct tg3_tx_buffer_desc {
+	u32				addr_hi;
+	u32				addr_lo;
+
+	u32				len_flags;
+#define TXD_FLAG_TCPUDP_CSUM		0x0001
+#define TXD_FLAG_IP_CSUM		0x0002
+#define TXD_FLAG_END			0x0004
+#define TXD_FLAG_IP_FRAG		0x0008
+#define TXD_FLAG_JMB_PKT		0x0008
+#define TXD_FLAG_IP_FRAG_END		0x0010
+#define TXD_FLAG_HWTSTAMP		0x0020
+#define TXD_FLAG_VLAN			0x0040
+#define TXD_FLAG_COAL_NOW		0x0080
+#define TXD_FLAG_CPU_PRE_DMA		0x0100
+#define TXD_FLAG_CPU_POST_DMA		0x0200
+#define TXD_FLAG_ADD_SRC_ADDR		0x1000
+#define TXD_FLAG_CHOOSE_SRC_ADDR	0x6000
+#define TXD_FLAG_NO_CRC			0x8000
+#define TXD_LEN_SHIFT			16
+
+	u32				vlan_tag;
+#define TXD_VLAN_TAG_SHIFT		0
+#define TXD_MSS_SHIFT			16
+};
+
+#define TXD_ADDR			0x00UL /* 64-bit */
+#define TXD_LEN_FLAGS			0x08UL /* 32-bit (upper 16-bits are len) */
+#define TXD_VLAN_TAG			0x0cUL /* 32-bit (upper 16-bits are tag) */
+#define TXD_SIZE			0x10UL
+
+struct tg3_rx_buffer_desc {
+	u32				addr_hi;
+	u32				addr_lo;
+
+	u32				idx_len;
+#define RXD_IDX_MASK	0xffff0000
+#define RXD_IDX_SHIFT	16
+#define RXD_LEN_MASK	0x0000ffff
+#define RXD_LEN_SHIFT	0
+
+	u32				type_flags;
+#define RXD_TYPE_SHIFT	16
+#define RXD_FLAGS_SHIFT	0
+
+#define RXD_FLAG_END			0x0004
+#define RXD_FLAG_MINI			0x0800
+#define RXD_FLAG_JUMBO			0x0020
+#define RXD_FLAG_VLAN			0x0040
+#define RXD_FLAG_ERROR			0x0400
+#define RXD_FLAG_IP_CSUM		0x1000
+#define RXD_FLAG_TCPUDP_CSUM		0x2000
+#define RXD_FLAG_IS_TCP			0x4000
+#define RXD_FLAG_PTPSTAT_MASK		0x0210
+#define RXD_FLAG_PTPSTAT_PTPV1		0x0010
+#define RXD_FLAG_PTPSTAT_PTPV2		0x0200
+
+	u32				ip_tcp_csum;
+#define RXD_IPCSUM_MASK		0xffff0000
+#define RXD_IPCSUM_SHIFT	16
+#define RXD_TCPCSUM_MASK	0x0000ffff
+#define RXD_TCPCSUM_SHIFT	0
+
+	u32				err_vlan;
+
+#define RXD_VLAN_MASK			0x0000ffff
+
+#define RXD_ERR_BAD_CRC			0x00010000
+#define RXD_ERR_COLLISION		0x00020000
+#define RXD_ERR_LINK_LOST		0x00040000
+#define RXD_ERR_PHY_DECODE		0x00080000
+#define RXD_ERR_ODD_NIBBLE_RCVD_MII	0x00100000
+#define RXD_ERR_MAC_ABRT		0x00200000
+#define RXD_ERR_TOO_SMALL		0x00400000
+#define RXD_ERR_NO_RESOURCES		0x00800000
+#define RXD_ERR_HUGE_FRAME		0x01000000
+
+#define RXD_ERR_MASK	(RXD_ERR_BAD_CRC | RXD_ERR_COLLISION |		\
+			 RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE |	\
+			 RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL |		\
+			 RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
+
+	u32				reserved;
+	u32				opaque;
+#define RXD_OPAQUE_INDEX_MASK		0x0000ffff
+#define RXD_OPAQUE_INDEX_SHIFT		0
+#define RXD_OPAQUE_RING_STD		0x00010000
+#define RXD_OPAQUE_RING_JUMBO		0x00020000
+#define RXD_OPAQUE_RING_MINI		0x00040000
+#define RXD_OPAQUE_RING_MASK		0x00070000
+};
+
+struct tg3_ext_rx_buffer_desc {
+	struct {
+		u32			addr_hi;
+		u32			addr_lo;
+	}				addrlist[3];
+	u32				len2_len1;
+	u32				resv_len3;
+	struct tg3_rx_buffer_desc	std;
+};
+
+/* We only use this when testing out the DMA engine
+ * at probe time.  This is the internal format of buffer
+ * descriptors used by the chip at NIC_SRAM_DMA_DESCS.
+ */
+struct tg3_internal_buffer_desc {
+	u32				addr_hi;
+	u32				addr_lo;
+	u32				nic_mbuf;
+	/* XXX FIX THIS */
+#ifdef __BIG_ENDIAN
+	u16				cqid_sqid;
+	u16				len;
+#else
+	u16				len;
+	u16				cqid_sqid;
+#endif
+	u32				flags;
+	u32				__cookie1;
+	u32				__cookie2;
+	u32				__cookie3;
+};
+
+#define TG3_HW_STATUS_SIZE		0x50
+struct tg3_hw_status {
+	u32				status;
+#define SD_STATUS_UPDATED		0x00000001
+#define SD_STATUS_LINK_CHG		0x00000002
+#define SD_STATUS_ERROR			0x00000004
+
+	u32				status_tag;
+
+#ifdef __BIG_ENDIAN
+	u16				rx_consumer;
+	u16				rx_jumbo_consumer;
+#else
+	u16				rx_jumbo_consumer;
+	u16				rx_consumer;
+#endif
+
+#ifdef __BIG_ENDIAN
+	u16				reserved;
+	u16				rx_mini_consumer;
+#else
+	u16				rx_mini_consumer;
+	u16				reserved;
+#endif
+	struct {
+#ifdef __BIG_ENDIAN
+		u16			tx_consumer;
+		u16			rx_producer;
+#else
+		u16			rx_producer;
+		u16			tx_consumer;
+#endif
+	}				idx[16];
+};
+
+typedef struct {
+	u32 high, low;
+} tg3_stat64_t;
+
+struct tg3_hw_stats {
+	u8				__reserved0[0x400-0x300];
+
+	/* Statistics maintained by Receive MAC. */
+	tg3_stat64_t			rx_octets;
+	u64				__reserved1;
+	tg3_stat64_t			rx_fragments;
+	tg3_stat64_t			rx_ucast_packets;
+	tg3_stat64_t			rx_mcast_packets;
+	tg3_stat64_t			rx_bcast_packets;
+	tg3_stat64_t			rx_fcs_errors;
+	tg3_stat64_t			rx_align_errors;
+	tg3_stat64_t			rx_xon_pause_rcvd;
+	tg3_stat64_t			rx_xoff_pause_rcvd;
+	tg3_stat64_t			rx_mac_ctrl_rcvd;
+	tg3_stat64_t			rx_xoff_entered;
+	tg3_stat64_t			rx_frame_too_long_errors;
+	tg3_stat64_t			rx_jabbers;
+	tg3_stat64_t			rx_undersize_packets;
+	tg3_stat64_t			rx_in_length_errors;
+	tg3_stat64_t			rx_out_length_errors;
+	tg3_stat64_t			rx_64_or_less_octet_packets;
+	tg3_stat64_t			rx_65_to_127_octet_packets;
+	tg3_stat64_t			rx_128_to_255_octet_packets;
+	tg3_stat64_t			rx_256_to_511_octet_packets;
+	tg3_stat64_t			rx_512_to_1023_octet_packets;
+	tg3_stat64_t			rx_1024_to_1522_octet_packets;
+	tg3_stat64_t			rx_1523_to_2047_octet_packets;
+	tg3_stat64_t			rx_2048_to_4095_octet_packets;
+	tg3_stat64_t			rx_4096_to_8191_octet_packets;
+	tg3_stat64_t			rx_8192_to_9022_octet_packets;
+
+	u64				__unused0[37];
+
+	/* Statistics maintained by Transmit MAC. */
+	tg3_stat64_t			tx_octets;
+	u64				__reserved2;
+	tg3_stat64_t			tx_collisions;
+	tg3_stat64_t			tx_xon_sent;
+	tg3_stat64_t			tx_xoff_sent;
+	tg3_stat64_t			tx_flow_control;
+	tg3_stat64_t			tx_mac_errors;
+	tg3_stat64_t			tx_single_collisions;
+	tg3_stat64_t			tx_mult_collisions;
+	tg3_stat64_t			tx_deferred;
+	u64				__reserved3;
+	tg3_stat64_t			tx_excessive_collisions;
+	tg3_stat64_t			tx_late_collisions;
+	tg3_stat64_t			tx_collide_2times;
+	tg3_stat64_t			tx_collide_3times;
+	tg3_stat64_t			tx_collide_4times;
+	tg3_stat64_t			tx_collide_5times;
+	tg3_stat64_t			tx_collide_6times;
+	tg3_stat64_t			tx_collide_7times;
+	tg3_stat64_t			tx_collide_8times;
+	tg3_stat64_t			tx_collide_9times;
+	tg3_stat64_t			tx_collide_10times;
+	tg3_stat64_t			tx_collide_11times;
+	tg3_stat64_t			tx_collide_12times;
+	tg3_stat64_t			tx_collide_13times;
+	tg3_stat64_t			tx_collide_14times;
+	tg3_stat64_t			tx_collide_15times;
+	tg3_stat64_t			tx_ucast_packets;
+	tg3_stat64_t			tx_mcast_packets;
+	tg3_stat64_t			tx_bcast_packets;
+	tg3_stat64_t			tx_carrier_sense_errors;
+	tg3_stat64_t			tx_discards;
+	tg3_stat64_t			tx_errors;
+
+	u64				__unused1[31];
+
+	/* Statistics maintained by Receive List Placement. */
+	tg3_stat64_t			COS_rx_packets[16];
+	tg3_stat64_t			COS_rx_filter_dropped;
+	tg3_stat64_t			dma_writeq_full;
+	tg3_stat64_t			dma_write_prioq_full;
+	tg3_stat64_t			rxbds_empty;
+	tg3_stat64_t			rx_discards;
+	tg3_stat64_t			rx_errors;
+	tg3_stat64_t			rx_threshold_hit;
+
+	u64				__unused2[9];
+
+	/* Statistics maintained by Send Data Initiator. */
+	tg3_stat64_t			COS_out_packets[16];
+	tg3_stat64_t			dma_readq_full;
+	tg3_stat64_t			dma_read_prioq_full;
+	tg3_stat64_t			tx_comp_queue_full;
+
+	/* Statistics maintained by Host Coalescing. */
+	tg3_stat64_t			ring_set_send_prod_index;
+	tg3_stat64_t			ring_status_update;
+	tg3_stat64_t			nic_irqs;
+	tg3_stat64_t			nic_avoided_irqs;
+	tg3_stat64_t			nic_tx_threshold_hit;
+
+	/* NOT a part of the hardware statistics block format.
+	 * These stats are here as storage for tg3_periodic_fetch_stats().
+	 */
+	tg3_stat64_t			mbuf_lwm_thresh_hit;
+
+	u8				__reserved4[0xb00-0x9c8];
+};
+
+#define TG3_SD_NUM_RECS			3
+#define TG3_OCIR_LEN			(sizeof(struct tg3_ocir))
+#define TG3_OCIR_SIG_MAGIC		0x5253434f
+#define TG3_OCIR_FLAG_ACTIVE		0x00000001
+
+#define TG3_TEMP_CAUTION_OFFSET		0xc8
+#define TG3_TEMP_MAX_OFFSET		0xcc
+#define TG3_TEMP_SENSOR_OFFSET		0xd4
+
+
+struct tg3_ocir {
+	u32				signature;
+	u16				version_flags;
+	u16				refresh_int;
+	u32				refresh_tmr;
+	u32				update_tmr;
+	u32				dst_base_addr;
+	u16				src_hdr_offset;
+	u16				src_hdr_length;
+	u16				src_data_offset;
+	u16				src_data_length;
+	u16				dst_hdr_offset;
+	u16				dst_data_offset;
+	u16				dst_reg_upd_offset;
+	u16				dst_sem_offset;
+	u32				reserved1[2];
+	u32				port0_flags;
+	u32				port1_flags;
+	u32				port2_flags;
+	u32				port3_flags;
+	u32				reserved2[1];
+};
+
+
+/* 'mapping' is superfluous as the chip does not write into
+ * the tx/rx post rings so we could just fetch it from there.
+ * But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
+struct ring_info {
+	u8				*data;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct tg3_tx_ring_info {
+	struct sk_buff			*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	bool				fragmented;
+};
+
+struct tg3_link_config {
+	/* Describes what we're trying to get. */
+	u32				advertising;
+	u16				speed;
+	u8				duplex;
+	u8				autoneg;
+	u8				flowctrl;
+
+	/* Describes what we actually have. */
+	u8				active_flowctrl;
+
+	u8				active_duplex;
+	u16				active_speed;
+	u32				rmt_adv;
+};
+
+struct tg3_bufmgr_config {
+	u32		mbuf_read_dma_low_water;
+	u32		mbuf_mac_rx_low_water;
+	u32		mbuf_high_water;
+
+	u32		mbuf_read_dma_low_water_jumbo;
+	u32		mbuf_mac_rx_low_water_jumbo;
+	u32		mbuf_high_water_jumbo;
+
+	u32		dma_low_water;
+	u32		dma_high_water;
+};
+
+struct tg3_ethtool_stats {
+	/* Statistics maintained by Receive MAC. */
+	u64		rx_octets;
+	u64		rx_fragments;
+	u64		rx_ucast_packets;
+	u64		rx_mcast_packets;
+	u64		rx_bcast_packets;
+	u64		rx_fcs_errors;
+	u64		rx_align_errors;
+	u64		rx_xon_pause_rcvd;
+	u64		rx_xoff_pause_rcvd;
+	u64		rx_mac_ctrl_rcvd;
+	u64		rx_xoff_entered;
+	u64		rx_frame_too_long_errors;
+	u64		rx_jabbers;
+	u64		rx_undersize_packets;
+	u64		rx_in_length_errors;
+	u64		rx_out_length_errors;
+	u64		rx_64_or_less_octet_packets;
+	u64		rx_65_to_127_octet_packets;
+	u64		rx_128_to_255_octet_packets;
+	u64		rx_256_to_511_octet_packets;
+	u64		rx_512_to_1023_octet_packets;
+	u64		rx_1024_to_1522_octet_packets;
+	u64		rx_1523_to_2047_octet_packets;
+	u64		rx_2048_to_4095_octet_packets;
+	u64		rx_4096_to_8191_octet_packets;
+	u64		rx_8192_to_9022_octet_packets;
+
+	/* Statistics maintained by Transmit MAC. */
+	u64		tx_octets;
+	u64		tx_collisions;
+	u64		tx_xon_sent;
+	u64		tx_xoff_sent;
+	u64		tx_flow_control;
+	u64		tx_mac_errors;
+	u64		tx_single_collisions;
+	u64		tx_mult_collisions;
+	u64		tx_deferred;
+	u64		tx_excessive_collisions;
+	u64		tx_late_collisions;
+	u64		tx_collide_2times;
+	u64		tx_collide_3times;
+	u64		tx_collide_4times;
+	u64		tx_collide_5times;
+	u64		tx_collide_6times;
+	u64		tx_collide_7times;
+	u64		tx_collide_8times;
+	u64		tx_collide_9times;
+	u64		tx_collide_10times;
+	u64		tx_collide_11times;
+	u64		tx_collide_12times;
+	u64		tx_collide_13times;
+	u64		tx_collide_14times;
+	u64		tx_collide_15times;
+	u64		tx_ucast_packets;
+	u64		tx_mcast_packets;
+	u64		tx_bcast_packets;
+	u64		tx_carrier_sense_errors;
+	u64		tx_discards;
+	u64		tx_errors;
+
+	/* Statistics maintained by Receive List Placement. */
+	u64		dma_writeq_full;
+	u64		dma_write_prioq_full;
+	u64		rxbds_empty;
+	u64		rx_discards;
+	u64		rx_errors;
+	u64		rx_threshold_hit;
+
+	/* Statistics maintained by Send Data Initiator. */
+	u64		dma_readq_full;
+	u64		dma_read_prioq_full;
+	u64		tx_comp_queue_full;
+
+	/* Statistics maintained by Host Coalescing. */
+	u64		ring_set_send_prod_index;
+	u64		ring_status_update;
+	u64		nic_irqs;
+	u64		nic_avoided_irqs;
+	u64		nic_tx_threshold_hit;
+
+	u64		mbuf_lwm_thresh_hit;
+};
+
+struct tg3_rx_prodring_set {
+	u32				rx_std_prod_idx;
+	u32				rx_std_cons_idx;
+	u32				rx_jmb_prod_idx;
+	u32				rx_jmb_cons_idx;
+	struct tg3_rx_buffer_desc	*rx_std;
+	struct tg3_ext_rx_buffer_desc	*rx_jmb;
+	struct ring_info		*rx_std_buffers;
+	struct ring_info		*rx_jmb_buffers;
+	dma_addr_t			rx_std_mapping;
+	dma_addr_t			rx_jmb_mapping;
+};
+
+#define TG3_RSS_MAX_NUM_QS		4
+#define TG3_IRQ_MAX_VECS_RSS		(TG3_RSS_MAX_NUM_QS + 1)
+#define TG3_IRQ_MAX_VECS		TG3_IRQ_MAX_VECS_RSS
+
+struct tg3_napi {
+	struct napi_struct		napi	____cacheline_aligned;
+	struct tg3			*tp;
+	struct tg3_hw_status		*hw_status;
+
+	u32				chk_msi_cnt;
+	u32				last_tag;
+	u32				last_irq_tag;
+	u32				int_mbox;
+	u32				coal_now;
+
+	u32				consmbox ____cacheline_aligned;
+	u32				rx_rcb_ptr;
+	u32				last_rx_cons;
+	u16				*rx_rcb_prod_idx;
+	struct tg3_rx_prodring_set	prodring;
+	struct tg3_rx_buffer_desc	*rx_rcb;
+
+	u32				tx_prod	____cacheline_aligned;
+	u32				tx_cons;
+	u32				tx_pending;
+	u32				last_tx_cons;
+	u32				prodmbox;
+	struct tg3_tx_buffer_desc	*tx_ring;
+	struct tg3_tx_ring_info		*tx_buffers;
+
+	dma_addr_t			status_mapping;
+	dma_addr_t			rx_rcb_mapping;
+	dma_addr_t			tx_desc_mapping;
+
+	char				irq_lbl[IFNAMSIZ];
+	unsigned int			irq_vec;
+};
+
+enum TG3_FLAGS {
+	TG3_FLAG_TAGGED_STATUS = 0,
+	TG3_FLAG_TXD_MBOX_HWBUG,
+	TG3_FLAG_USE_LINKCHG_REG,
+	TG3_FLAG_ERROR_PROCESSED,
+	TG3_FLAG_ENABLE_ASF,
+	TG3_FLAG_ASPM_WORKAROUND,
+	TG3_FLAG_POLL_SERDES,
+	TG3_FLAG_POLL_CPMU_LINK,
+	TG3_FLAG_MBOX_WRITE_REORDER,
+	TG3_FLAG_PCIX_TARGET_HWBUG,
+	TG3_FLAG_WOL_SPEED_100MB,
+	TG3_FLAG_WOL_ENABLE,
+	TG3_FLAG_EEPROM_WRITE_PROT,
+	TG3_FLAG_NVRAM,
+	TG3_FLAG_NVRAM_BUFFERED,
+	TG3_FLAG_SUPPORT_MSI,
+	TG3_FLAG_SUPPORT_MSIX,
+	TG3_FLAG_USING_MSI,
+	TG3_FLAG_USING_MSIX,
+	TG3_FLAG_PCIX_MODE,
+	TG3_FLAG_PCI_HIGH_SPEED,
+	TG3_FLAG_PCI_32BIT,
+	TG3_FLAG_SRAM_USE_CONFIG,
+	TG3_FLAG_TX_RECOVERY_PENDING,
+	TG3_FLAG_WOL_CAP,
+	TG3_FLAG_JUMBO_RING_ENABLE,
+	TG3_FLAG_PAUSE_AUTONEG,
+	TG3_FLAG_CPMU_PRESENT,
+	TG3_FLAG_40BIT_DMA_BUG,
+	TG3_FLAG_BROKEN_CHECKSUMS,
+	TG3_FLAG_JUMBO_CAPABLE,
+	TG3_FLAG_CHIP_RESETTING,
+	TG3_FLAG_INIT_COMPLETE,
+	TG3_FLAG_MAX_RXPEND_64,
+	TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
+	TG3_FLAG_ASF_NEW_HANDSHAKE,
+	TG3_FLAG_HW_AUTONEG,
+	TG3_FLAG_IS_NIC,
+	TG3_FLAG_FLASH,
+	TG3_FLAG_FW_TSO,
+	TG3_FLAG_HW_TSO_1,
+	TG3_FLAG_HW_TSO_2,
+	TG3_FLAG_HW_TSO_3,
+	TG3_FLAG_TSO_CAPABLE,
+	TG3_FLAG_TSO_BUG,
+	TG3_FLAG_ICH_WORKAROUND,
+	TG3_FLAG_1SHOT_MSI,
+	TG3_FLAG_NO_FWARE_REPORTED,
+	TG3_FLAG_NO_NVRAM_ADDR_TRANS,
+	TG3_FLAG_ENABLE_APE,
+	TG3_FLAG_PROTECTED_NVRAM,
+	TG3_FLAG_5701_DMA_BUG,
+	TG3_FLAG_USE_PHYLIB,
+	TG3_FLAG_MDIOBUS_INITED,
+	TG3_FLAG_LRG_PROD_RING_CAP,
+	TG3_FLAG_RGMII_INBAND_DISABLE,
+	TG3_FLAG_RGMII_EXT_IBND_RX_EN,
+	TG3_FLAG_RGMII_EXT_IBND_TX_EN,
+	TG3_FLAG_CLKREQ_BUG,
+	TG3_FLAG_NO_NVRAM,
+	TG3_FLAG_ENABLE_RSS,
+	TG3_FLAG_ENABLE_TSS,
+	TG3_FLAG_SHORT_DMA_BUG,
+	TG3_FLAG_USE_JUMBO_BDFLAG,
+	TG3_FLAG_L1PLLPD_EN,
+	TG3_FLAG_APE_HAS_NCSI,
+	TG3_FLAG_TX_TSTAMP_EN,
+	TG3_FLAG_4K_FIFO_LIMIT,
+	TG3_FLAG_5719_5720_RDMA_BUG,
+	TG3_FLAG_RESET_TASK_PENDING,
+	TG3_FLAG_PTP_CAPABLE,
+	TG3_FLAG_5705_PLUS,
+	TG3_FLAG_IS_5788,
+	TG3_FLAG_5750_PLUS,
+	TG3_FLAG_5780_CLASS,
+	TG3_FLAG_5755_PLUS,
+	TG3_FLAG_57765_PLUS,
+	TG3_FLAG_57765_CLASS,
+	TG3_FLAG_5717_PLUS,
+	TG3_FLAG_IS_SSB_CORE,
+	TG3_FLAG_FLUSH_POSTED_WRITES,
+	TG3_FLAG_ROBOSWITCH,
+	TG3_FLAG_ONE_DMA_AT_ONCE,
+	TG3_FLAG_RGMII_MODE,
+
+	/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
+	TG3_FLAG_NUMBER_OF_FLAGS,	/* Last entry in enum TG3_FLAGS */
+};
+
+struct tg3_firmware_hdr {
+	__be32 version; /* unused for fragments */
+	__be32 base_addr;
+	__be32 len;
+};
+#define TG3_FW_HDR_LEN         (sizeof(struct tg3_firmware_hdr))
+
+struct tg3 {
+	/* begin "general, frequently-used members" cacheline section */
+
+	/* If the IRQ handler (which runs lockless) needs to be
+	 * quiesced, the following bitmask state is used.  The
+	 * SYNC flag is set by non-IRQ context code to initiate
+	 * the quiescence.
+	 *
+	 * When the IRQ handler notices that SYNC is set, it
+	 * disables interrupts and returns.
+	 *
+	 * When all outstanding IRQ handlers have returned after
+	 * the SYNC flag has been set, the setter can be assured
+	 * that interrupts will no longer get run.
+	 *
+	 * In this way all SMP driver locks are never acquired
+	 * in hw IRQ context, only sw IRQ context or lower.
+	 */
+	unsigned int			irq_sync;
+
+	/* SMP locking strategy:
+	 *
+	 * lock: Held during reset, PHY access, timer, and when
+	 *       updating tg3_flags.
+	 *
+	 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
+	 *                netif_tx_lock when it needs to call
+	 *                netif_wake_queue.
+	 *
+	 * Both of these locks are to be held with BH safety.
+	 *
+	 * Because the IRQ handler, tg3_poll, and tg3_start_xmit
+	 * are running lockless, it is necessary to completely
+	 * quiesce the chip with tg3_netif_stop and tg3_full_lock
+	 * before reconfiguring the device.
+	 *
+	 * indirect_lock: Held when accessing registers indirectly
+	 *                with IRQ disabling.
+	 */
+	spinlock_t			lock;
+	spinlock_t			indirect_lock;
+
+	u32				(*read32) (struct tg3 *, u32);
+	void				(*write32) (struct tg3 *, u32, u32);
+	u32				(*read32_mbox) (struct tg3 *, u32);
+	void				(*write32_mbox) (struct tg3 *, u32,
+							 u32);
+	void __iomem			*regs;
+	void __iomem			*aperegs;
+	struct net_device		*dev;
+	struct pci_dev			*pdev;
+
+	u32				coal_now;
+	u32				msg_enable;
+
+	struct ptp_clock_info		ptp_info;
+	struct ptp_clock		*ptp_clock;
+	s64				ptp_adjust;
+
+	/* begin "tx thread" cacheline section */
+	void				(*write32_tx_mbox) (struct tg3 *, u32,
+							    u32);
+	u32				dma_limit;
+	u32				txq_req;
+	u32				txq_cnt;
+	u32				txq_max;
+
+	/* begin "rx thread" cacheline section */
+	struct tg3_napi			napi[TG3_IRQ_MAX_VECS];
+	void				(*write32_rx_mbox) (struct tg3 *, u32,
+							    u32);
+	u32				rx_copy_thresh;
+	u32				rx_std_ring_mask;
+	u32				rx_jmb_ring_mask;
+	u32				rx_ret_ring_mask;
+	u32				rx_pending;
+	u32				rx_jumbo_pending;
+	u32				rx_std_max_post;
+	u32				rx_offset;
+	u32				rx_pkt_map_sz;
+	u32				rxq_req;
+	u32				rxq_cnt;
+	u32				rxq_max;
+	bool				rx_refill;
+
+
+	/* begin "everything else" cacheline(s) section */
+	unsigned long			rx_dropped;
+	unsigned long			tx_dropped;
+	struct rtnl_link_stats64	net_stats_prev;
+	struct tg3_ethtool_stats	estats_prev;
+
+	DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
+
+	union {
+	unsigned long			phy_crc_errors;
+	unsigned long			last_event_jiffies;
+	};
+
+	struct timer_list		timer;
+	u16				timer_counter;
+	u16				timer_multiplier;
+	u32				timer_offset;
+	u16				asf_counter;
+	u16				asf_multiplier;
+
+	/* 1 second counter for transient serdes link events */
+	u32				serdes_counter;
+#define SERDES_AN_TIMEOUT_5704S		2
+#define SERDES_PARALLEL_DET_TIMEOUT	1
+#define SERDES_AN_TIMEOUT_5714S		1
+
+	struct tg3_link_config		link_config;
+	struct tg3_bufmgr_config	bufmgr_config;
+
+	/* cache h/w values, often passed straight to h/w */
+	u32				rx_mode;
+	u32				tx_mode;
+	u32				mac_mode;
+	u32				mi_mode;
+	u32				misc_host_ctrl;
+	u32				grc_mode;
+	u32				grc_local_ctrl;
+	u32				dma_rwctrl;
+	u32				coalesce_mode;
+	u32				pwrmgmt_thresh;
+	u32				rxptpctl;
+
+	/* PCI block */
+	u32				pci_chip_rev_id;
+	u16				pci_cmd;
+	u8				pci_cacheline_sz;
+	u8				pci_lat_timer;
+
+	int				pci_fn;
+	int				msi_cap;
+	int				pcix_cap;
+	int				pcie_readrq;
+
+	struct mii_bus			*mdio_bus;
+	int				mdio_irq[PHY_MAX_ADDR];
+	int				old_link;
+
+	u8				phy_addr;
+	u8				phy_ape_lock;
+
+	/* PHY info */
+	u32				phy_id;
+#define TG3_PHY_ID_MASK			0xfffffff0
+#define TG3_PHY_ID_BCM5400		0x60008040
+#define TG3_PHY_ID_BCM5401		0x60008050
+#define TG3_PHY_ID_BCM5411		0x60008070
+#define TG3_PHY_ID_BCM5701		0x60008110
+#define TG3_PHY_ID_BCM5703		0x60008160
+#define TG3_PHY_ID_BCM5704		0x60008190
+#define TG3_PHY_ID_BCM5705		0x600081a0
+#define TG3_PHY_ID_BCM5750		0x60008180
+#define TG3_PHY_ID_BCM5752		0x60008100
+#define TG3_PHY_ID_BCM5714		0x60008340
+#define TG3_PHY_ID_BCM5780		0x60008350
+#define TG3_PHY_ID_BCM5755		0xbc050cc0
+#define TG3_PHY_ID_BCM5787		0xbc050ce0
+#define TG3_PHY_ID_BCM5756		0xbc050ed0
+#define TG3_PHY_ID_BCM5784		0xbc050fa0
+#define TG3_PHY_ID_BCM5761		0xbc050fd0
+#define TG3_PHY_ID_BCM5718C		0x5c0d8a00
+#define TG3_PHY_ID_BCM5718S		0xbc050ff0
+#define TG3_PHY_ID_BCM57765		0x5c0d8a40
+#define TG3_PHY_ID_BCM5719C		0x5c0d8a20
+#define TG3_PHY_ID_BCM5720C		0x5c0d8b60
+#define TG3_PHY_ID_BCM5762		0x85803780
+#define TG3_PHY_ID_BCM5906		0xdc00ac40
+#define TG3_PHY_ID_BCM8002		0x60010140
+#define TG3_PHY_ID_INVALID		0xffffffff
+
+#define PHY_ID_RTL8211C			0x001cc910
+#define PHY_ID_RTL8201E			0x00008200
+
+#define TG3_PHY_ID_REV_MASK		0x0000000f
+#define TG3_PHY_REV_BCM5401_B0		0x1
+
+	/* This macro assumes the passed PHY ID is
+	 * already masked with TG3_PHY_ID_MASK.
+	 */
+#define TG3_KNOWN_PHY_ID(X)		\
+	((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \
+	 (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \
+	 (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \
+	 (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \
+	 (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \
+	 (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \
+	 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
+	 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
+	 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
+	 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+	 (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \
+	 (X) == TG3_PHY_ID_BCM8002)
+
+	u32				phy_flags;
+#define TG3_PHYFLG_IS_LOW_POWER		0x00000001
+#define TG3_PHYFLG_IS_CONNECTED		0x00000002
+#define TG3_PHYFLG_USE_MI_INTERRUPT	0x00000004
+#define TG3_PHYFLG_USER_CONFIGURED	0x00000008
+#define TG3_PHYFLG_PHY_SERDES		0x00000010
+#define TG3_PHYFLG_MII_SERDES		0x00000020
+#define TG3_PHYFLG_ANY_SERDES		(TG3_PHYFLG_PHY_SERDES |	\
+					TG3_PHYFLG_MII_SERDES)
+#define TG3_PHYFLG_IS_FET		0x00000040
+#define TG3_PHYFLG_10_100_ONLY		0x00000080
+#define TG3_PHYFLG_ENABLE_APD		0x00000100
+#define TG3_PHYFLG_CAPACITIVE_COUPLING	0x00000200
+#define TG3_PHYFLG_NO_ETH_WIRE_SPEED	0x00000400
+#define TG3_PHYFLG_JITTER_BUG		0x00000800
+#define TG3_PHYFLG_ADJUST_TRIM		0x00001000
+#define TG3_PHYFLG_ADC_BUG		0x00002000
+#define TG3_PHYFLG_5704_A0_BUG		0x00004000
+#define TG3_PHYFLG_BER_BUG		0x00008000
+#define TG3_PHYFLG_SERDES_PREEMPHASIS	0x00010000
+#define TG3_PHYFLG_PARALLEL_DETECT	0x00020000
+#define TG3_PHYFLG_EEE_CAP		0x00040000
+#define TG3_PHYFLG_1G_ON_VAUX_OK	0x00080000
+#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN	0x00100000
+#define TG3_PHYFLG_MDIX_STATE		0x00200000
+#define TG3_PHYFLG_DISABLE_1G_HD_ADV	0x00400000
+
+	u32				led_ctrl;
+	u32				phy_otp;
+	u32				setlpicnt;
+	u8				rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
+
+#define TG3_BPN_SIZE			24
+	char				board_part_number[TG3_BPN_SIZE];
+#define TG3_VER_SIZE			ETHTOOL_FWVERS_LEN
+	char				fw_ver[TG3_VER_SIZE];
+	u32				nic_sram_data_cfg;
+	u32				pci_clock_ctrl;
+	struct pci_dev			*pdev_peer;
+
+	struct tg3_hw_stats		*hw_stats;
+	dma_addr_t			stats_mapping;
+	struct work_struct		reset_task;
+
+	int				nvram_lock_cnt;
+	u32				nvram_size;
+#define TG3_NVRAM_SIZE_2KB		0x00000800
+#define TG3_NVRAM_SIZE_64KB		0x00010000
+#define TG3_NVRAM_SIZE_128KB		0x00020000
+#define TG3_NVRAM_SIZE_256KB		0x00040000
+#define TG3_NVRAM_SIZE_512KB		0x00080000
+#define TG3_NVRAM_SIZE_1MB		0x00100000
+#define TG3_NVRAM_SIZE_2MB		0x00200000
+
+	u32				nvram_pagesize;
+	u32				nvram_jedecnum;
+
+#define JEDEC_ATMEL			0x1f
+#define JEDEC_ST			0x20
+#define JEDEC_SAIFUN			0x4f
+#define JEDEC_SST			0xbf
+
+#define ATMEL_AT24C02_CHIP_SIZE		TG3_NVRAM_SIZE_2KB
+#define ATMEL_AT24C02_PAGE_SIZE		(8)
+
+#define ATMEL_AT24C64_CHIP_SIZE		TG3_NVRAM_SIZE_64KB
+#define ATMEL_AT24C64_PAGE_SIZE		(32)
+
+#define ATMEL_AT24C512_CHIP_SIZE	TG3_NVRAM_SIZE_512KB
+#define ATMEL_AT24C512_PAGE_SIZE	(128)
+
+#define ATMEL_AT45DB0X1B_PAGE_POS	9
+#define ATMEL_AT45DB0X1B_PAGE_SIZE	264
+
+#define ATMEL_AT25F512_PAGE_SIZE	256
+
+#define ST_M45PEX0_PAGE_SIZE		256
+
+#define SAIFUN_SA25F0XX_PAGE_SIZE	256
+
+#define SST_25VF0X0_PAGE_SIZE		4098
+
+	unsigned int			irq_max;
+	unsigned int			irq_cnt;
+
+	struct ethtool_coalesce		coal;
+	struct ethtool_eee		eee;
+
+	/* firmware info */
+	const char			*fw_needed;
+	const struct firmware		*fw;
+	u32				fw_len; /* includes BSS */
+
+	struct device			*hwmon_dev;
+	bool				link_up;
+	bool				pcierr_recovery;
+};
+
+/* Accessor macros for chip and asic attributes
+ *
+ * nb: Using static inlines equivalent to the accessor macros generates
+ *     larger object code with gcc 4.7.
+ *     Using statement expression macros to check tp with
+ *     typecheck(struct tg3 *, tp) also creates larger objects.
+ */
+#define tg3_chip_rev_id(tp)					\
+	((tp)->pci_chip_rev_id)
+#define tg3_asic_rev(tp)					\
+	((tp)->pci_chip_rev_id >> 12)
+#define tg3_chip_rev(tp)					\
+	((tp)->pci_chip_rev_id >> 8)
+
+#endif /* !(_T3_H) */