File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
new file mode 100644
index 0000000..a79813a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -0,0 +1,128 @@
+#
+# Chelsio device configuration
+#
+
+config NET_VENDOR_CHELSIO
+	bool "Chelsio devices"
+	default y
+	depends on PCI
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Chelsio devices. If you say Y, you will be asked for
+	  your specific card in the following questions.
+
+if NET_VENDOR_CHELSIO
+
+config CHELSIO_T1
+	tristate "Chelsio 10Gb Ethernet support"
+	depends on PCI
+	select CRC32
+	select MDIO
+	---help---
+	  This driver supports Chelsio gigabit and 10-gigabit
+	  Ethernet cards. More information about adapter features and
+	  performance tuning is in <file:Documentation/networking/cxgb.txt>.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.html>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cxgb.
+
+config CHELSIO_T1_1G
+	bool "Chelsio gigabit Ethernet support"
+	depends on CHELSIO_T1
+	---help---
+	  Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
+	  are using only 10G cards say 'N' here.
+
+config CHELSIO_T3
+	tristate "Chelsio Communications T3 10Gb Ethernet support"
+	depends on PCI && INET
+	select FW_LOADER
+	select MDIO
+	---help---
+	  This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
+	  adapters.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.html>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cxgb3.
+
+config CHELSIO_T4
+	tristate "Chelsio Communications T4/T5 Ethernet support"
+	depends on PCI && (IPV6 || IPV6=n)
+	select FW_LOADER
+	select MDIO
+	---help---
+	  This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+	  adapter and T5 based 40Gb Ethernet adapter.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.html>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module choose M here; the module
+	  will be called cxgb4.
+
+config CHELSIO_T4_DCB
+	bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards"
+	default n
+	depends on CHELSIO_T4 && DCB
+	---help---
+	  Enable DCB support through rtNetlink interface.
+	  Say Y here if you want to enable Data Center Bridging (DCB) support
+	  in the driver.
+
+	  If unsure, say N.
+
+config CHELSIO_T4_FCOE
+	bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
+	default n
+	depends on CHELSIO_T4 && CHELSIO_T4_DCB && FCOE
+	---help---
+	  Enable FCoE offload features.
+	  Say Y here if you want to enable Fibre Channel over Ethernet (FCoE) support
+	  in the driver.
+
+	  If unsure, say N.
+
+config CHELSIO_T4VF
+	tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
+	depends on PCI
+	---help---
+	  This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+	  adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
+	  Functions.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.html>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module choose M here; the module
+	  will be called cxgb4vf.
+
+endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
new file mode 100644
index 0000000..390510b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Chelsio network device drivers.
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb/
+obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+obj-$(CONFIG_CHELSIO_T4) += cxgb4/
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
diff --git a/drivers/net/ethernet/chelsio/cxgb/Makefile b/drivers/net/ethernet/chelsio/cxgb/Makefile
new file mode 100644
index 0000000..57a4b26
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/Makefile
@@ -0,0 +1,9 @@
+#
+# Chelsio T1 driver
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb.o
+
+cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o
+cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \
+	     mv88x201x.o my3126.o $(cxgb-y)
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
new file mode 100644
index 0000000..53b1f94
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -0,0 +1,351 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: common.h                                                            *
+ * $Revision: 1.21 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#define pr_fmt(fmt) "cxgb: " fmt
+
+#ifndef _CXGB_COMMON_H_
+#define _CXGB_COMMON_H_
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/pci_ids.h>
+
+#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
+#define DRV_NAME "cxgb"
+#define DRV_VERSION "2.2"
+
+#define CH_DEVICE(devid, ssid, idx) \
+	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+
+#define SUPPORTED_PAUSE       (1 << 13)
+#define SUPPORTED_LOOPBACK    (1 << 15)
+
+#define ADVERTISED_PAUSE      (1 << 13)
+#define ADVERTISED_ASYM_PAUSE (1 << 14)
+
+typedef struct adapter adapter_t;
+
+struct t1_rx_mode {
+       struct net_device *dev;
+};
+
+#define t1_rx_mode_promisc(rm)	(rm->dev->flags & IFF_PROMISC)
+#define t1_rx_mode_allmulti(rm)	(rm->dev->flags & IFF_ALLMULTI)
+#define t1_rx_mode_mc_cnt(rm)	(netdev_mc_count(rm->dev))
+#define t1_get_netdev(rm)	(rm->dev)
+
+#define	MAX_NPORTS 4
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+#define NMTUS      8
+#define TCB_SIZE   128
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+enum {
+	CHBT_BOARD_N110,
+	CHBT_BOARD_N210,
+	CHBT_BOARD_7500,
+	CHBT_BOARD_8000,
+	CHBT_BOARD_CHT101,
+	CHBT_BOARD_CHT110,
+	CHBT_BOARD_CHT210,
+	CHBT_BOARD_CHT204,
+	CHBT_BOARD_CHT204V,
+	CHBT_BOARD_CHT204E,
+	CHBT_BOARD_CHN204,
+	CHBT_BOARD_COUGAR,
+	CHBT_BOARD_6800,
+	CHBT_BOARD_SIMUL,
+};
+
+enum {
+	CHBT_TERM_FPGA,
+	CHBT_TERM_T1,
+	CHBT_TERM_T2,
+	CHBT_TERM_T3
+};
+
+enum {
+	CHBT_MAC_CHELSIO_A,
+	CHBT_MAC_IXF1010,
+	CHBT_MAC_PM3393,
+	CHBT_MAC_VSC7321,
+	CHBT_MAC_DUMMY
+};
+
+enum {
+	CHBT_PHY_88E1041,
+	CHBT_PHY_88E1111,
+	CHBT_PHY_88X2010,
+	CHBT_PHY_XPAK,
+	CHBT_PHY_MY3126,
+	CHBT_PHY_8244,
+	CHBT_PHY_DUMMY
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+/* Revisions of T1 chip */
+enum {
+	TERM_T1A   = 0,
+	TERM_T1B   = 1,
+	TERM_T2    = 3
+};
+
+struct sge_params {
+	unsigned int cmdQ_size[2];
+	unsigned int freelQ_size[2];
+	unsigned int large_buf_capacity;
+	unsigned int rx_coalesce_usecs;
+	unsigned int last_rx_coalesce_raw;
+	unsigned int default_rx_coalesce_usecs;
+	unsigned int sample_interval_usecs;
+	unsigned int coalesce_enable;
+	unsigned int polling;
+};
+
+struct chelsio_pci_params {
+	unsigned short speed;
+	unsigned char  width;
+	unsigned char  is_pcix;
+};
+
+struct tp_params {
+	unsigned int pm_size;
+	unsigned int cm_size;
+	unsigned int pm_rx_base;
+	unsigned int pm_tx_base;
+	unsigned int pm_rx_pg_size;
+	unsigned int pm_tx_pg_size;
+	unsigned int pm_rx_num_pgs;
+	unsigned int pm_tx_num_pgs;
+	unsigned int rx_coalescing_size;
+	unsigned int use_5tuple_mode;
+};
+
+struct mc5_params {
+	unsigned int mode;       /* selects MC5 width */
+	unsigned int nservers;   /* size of server region */
+	unsigned int nroutes;    /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+#define DEFAULT_SERVER_REGION_LEN 256
+#define DEFAULT_RT_REGION_LEN 1024
+
+struct adapter_params {
+	struct sge_params sge;
+	struct mc5_params mc5;
+	struct tp_params  tp;
+	struct chelsio_pci_params pci;
+
+	const struct board_info *brd_info;
+
+	unsigned short mtus[NMTUS];
+	unsigned int   nports;          /* # of ethernet ports */
+	unsigned int   stats_update_period;
+	unsigned short chip_revision;
+	unsigned char  chip_version;
+	unsigned char  is_asic;
+	unsigned char  has_msi;
+};
+
+struct link_config {
+	unsigned int   supported;        /* link capabilities */
+	unsigned int   advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_duplex; /* duplex user has requested */
+	unsigned char  duplex;           /* actual link duplex */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+};
+
+struct cmac;
+struct cphy;
+
+struct port_info {
+	struct net_device *dev;
+	struct cmac *mac;
+	struct cphy *phy;
+	struct link_config link_config;
+	struct net_device_stats netstats;
+};
+
+struct sge;
+struct peespi;
+
+struct adapter {
+	u8 __iomem *regs;
+	struct pci_dev *pdev;
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+
+	const char *name;
+	int msg_enable;
+	u32 mmio_len;
+
+	struct work_struct ext_intr_handler_task;
+	struct adapter_params params;
+
+	/* Terminator modules. */
+	struct sge    *sge;
+	struct peespi *espi;
+	struct petp   *tp;
+
+	struct napi_struct napi;
+	struct port_info port[MAX_NPORTS];
+	struct delayed_work stats_update_task;
+	struct timer_list stats_update_timer;
+
+	spinlock_t tpi_lock;
+	spinlock_t work_lock;
+	spinlock_t mac_lock;
+
+	/* guards async operations */
+	spinlock_t async_lock ____cacheline_aligned;
+	u32 slow_intr_mask;
+	int t1powersave;
+};
+
+enum {                                           /* adapter flags */
+	FULL_INIT_DONE        = 1 << 0,
+};
+
+struct mdio_ops;
+struct gmac;
+struct gphy;
+
+struct board_info {
+	unsigned char           board;
+	unsigned char           port_number;
+	unsigned long           caps;
+	unsigned char           chip_term;
+	unsigned char           chip_mac;
+	unsigned char           chip_phy;
+	unsigned int            clock_core;
+	unsigned int            clock_mc3;
+	unsigned int            clock_mc4;
+	unsigned int            espi_nports;
+	unsigned int            clock_elmer0;
+	unsigned char           mdio_mdien;
+	unsigned char           mdio_mdiinv;
+	unsigned char           mdio_mdc;
+	unsigned char           mdio_phybaseaddr;
+	const struct gmac      *gmac;
+	const struct gphy      *gphy;
+	const struct mdio_ops  *mdio_ops;
+	const char             *desc;
+};
+
+static inline int t1_is_asic(const adapter_t *adapter)
+{
+	return adapter->params.is_asic;
+}
+
+extern const struct pci_device_id t1_pci_tbl[];
+
+static inline int adapter_matches_type(const adapter_t *adapter,
+				       int version, int revision)
+{
+	return adapter->params.chip_version == version &&
+	       adapter->params.chip_revision == revision;
+}
+
+#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
+#define is_T2(adap)     adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
+
+/* Returns true if an adapter supports VLAN acceleration and TSO */
+static inline int vlan_tso_capable(const adapter_t *adapter)
+{
+	return !t1_is_T1B(adapter);
+}
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define board_info(adapter) ((adapter)->params.brd_info)
+#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
+
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
+{
+	return board_info(adap)->clock_core / 1000000;
+}
+
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
+
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+						    unsigned short ssid);
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+		     struct adapter_params *p);
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+			    int speed, int duplex, int pause);
+#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
new file mode 100644
index 0000000..a4d2a4c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -0,0 +1,174 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cphy.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPHY_H_
+#define _CXGB_CPHY_H_
+
+#include "common.h"
+
+struct mdio_ops {
+	void (*init)(adapter_t *adapter, const struct board_info *bi);
+	int  (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+		     u16 reg_addr);
+	int  (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+		      u16 reg_addr, u16 val);
+	unsigned mode_support;
+};
+
+/* PHY interrupt types */
+enum {
+	cphy_cause_link_change = 0x1,
+	cphy_cause_error = 0x2,
+	cphy_cause_fifo_error = 0x3
+};
+
+enum {
+	PHY_LINK_UP = 0x1,
+	PHY_AUTONEG_RDY = 0x2,
+	PHY_AUTONEG_EN = 0x4
+};
+
+struct cphy;
+
+/* PHY operations */
+struct cphy_ops {
+	void (*destroy)(struct cphy *);
+	int (*reset)(struct cphy *, int wait);
+
+	int (*interrupt_enable)(struct cphy *);
+	int (*interrupt_disable)(struct cphy *);
+	int (*interrupt_clear)(struct cphy *);
+	int (*interrupt_handler)(struct cphy *);
+
+	int (*autoneg_enable)(struct cphy *);
+	int (*autoneg_disable)(struct cphy *);
+	int (*autoneg_restart)(struct cphy *);
+
+	int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+	int (*set_loopback)(struct cphy *, int on);
+	int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+	int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+			       int *duplex, int *fc);
+
+	u32 mmds;
+};
+
+/* A PHY instance */
+struct cphy {
+	int state;	/* Link status state machine */
+	adapter_t *adapter;                  /* associated adapter */
+
+	struct delayed_work phy_update;
+
+	u16 bmsr;
+	int count;
+	int act_count;
+	int act_on;
+
+	u32 elmer_gpo;
+
+	const struct cphy_ops *ops;            /* PHY operations */
+	struct mdio_if_info mdio;
+	struct cphy_instance *instance;
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg,
+				 unsigned int *valp)
+{
+	int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd,
+				      reg);
+	*valp = (rc >= 0) ? rc : -1;
+	return (rc >= 0) ? 0 : rc;
+}
+
+static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg,
+				  unsigned int val)
+{
+	return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd,
+				     reg, val);
+}
+
+static inline int simple_mdio_read(struct cphy *cphy, int reg,
+				   unsigned int *valp)
+{
+	return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp);
+}
+
+static inline int simple_mdio_write(struct cphy *cphy, int reg,
+				    unsigned int val)
+{
+	return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, struct net_device *dev,
+			     int phy_addr, struct cphy_ops *phy_ops,
+			     const struct mdio_ops *mdio_ops)
+{
+	struct adapter *adapter = netdev_priv(dev);
+	phy->adapter = adapter;
+	phy->ops     = phy_ops;
+	if (mdio_ops) {
+		phy->mdio.prtad = phy_addr;
+		phy->mdio.mmds = phy_ops->mmds;
+		phy->mdio.mode_support = mdio_ops->mode_support;
+		phy->mdio.mdio_read = mdio_ops->read;
+		phy->mdio.mdio_write = mdio_ops->write;
+	}
+	phy->mdio.dev = dev;
+}
+
+/* Operations of the PHY-instance factory */
+struct gphy {
+	/* Construct a PHY instance with the given PHY address */
+	struct cphy *(*create)(struct net_device *dev, int phy_addr,
+			       const struct mdio_ops *mdio_ops);
+
+	/*
+	 * Reset the PHY chip.  This resets the whole PHY chip, not individual
+	 * ports.
+	 */
+	int (*reset)(adapter_t *adapter);
+};
+
+extern const struct gphy t1_my3126_ops;
+extern const struct gphy t1_mv88e1xxx_ops;
+extern const struct gphy t1_vsc8244_ops;
+extern const struct gphy t1_mv88x201x_ops;
+
+#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
new file mode 100644
index 0000000..5249686
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -0,0 +1,638 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cpl5_cmd.h                                                          *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPL5_CMD_H_
+#define _CXGB_CPL5_CMD_H_
+
+#include <asm/byteorder.h>
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+enum CPL_opcode {
+	CPL_PASS_OPEN_REQ     = 0x1,
+	CPL_PASS_OPEN_RPL     = 0x2,
+	CPL_PASS_ESTABLISH    = 0x3,
+	CPL_PASS_ACCEPT_REQ   = 0xE,
+	CPL_PASS_ACCEPT_RPL   = 0x4,
+	CPL_ACT_OPEN_REQ      = 0x5,
+	CPL_ACT_OPEN_RPL      = 0x6,
+	CPL_CLOSE_CON_REQ     = 0x7,
+	CPL_CLOSE_CON_RPL     = 0x8,
+	CPL_CLOSE_LISTSRV_REQ = 0x9,
+	CPL_CLOSE_LISTSRV_RPL = 0xA,
+	CPL_ABORT_REQ         = 0xB,
+	CPL_ABORT_RPL         = 0xC,
+	CPL_PEER_CLOSE        = 0xD,
+	CPL_ACT_ESTABLISH     = 0x17,
+
+	CPL_GET_TCB           = 0x24,
+	CPL_GET_TCB_RPL       = 0x25,
+	CPL_SET_TCB           = 0x26,
+	CPL_SET_TCB_FIELD     = 0x27,
+	CPL_SET_TCB_RPL       = 0x28,
+	CPL_PCMD              = 0x29,
+
+	CPL_PCMD_READ         = 0x31,
+	CPL_PCMD_READ_RPL     = 0x32,
+
+
+	CPL_RX_DATA           = 0xA0,
+	CPL_RX_DATA_DDP       = 0xA1,
+	CPL_RX_DATA_ACK       = 0xA3,
+	CPL_RX_PKT            = 0xAD,
+	CPL_RX_ISCSI_HDR      = 0xAF,
+	CPL_TX_DATA_ACK       = 0xB0,
+	CPL_TX_DATA           = 0xB1,
+	CPL_TX_PKT            = 0xB2,
+	CPL_TX_PKT_LSO        = 0xB6,
+
+	CPL_RTE_DELETE_REQ    = 0xC0,
+	CPL_RTE_DELETE_RPL    = 0xC1,
+	CPL_RTE_WRITE_REQ     = 0xC2,
+	CPL_RTE_WRITE_RPL     = 0xD3,
+	CPL_RTE_READ_REQ      = 0xC3,
+	CPL_RTE_READ_RPL      = 0xC4,
+	CPL_L2T_WRITE_REQ     = 0xC5,
+	CPL_L2T_WRITE_RPL     = 0xD4,
+	CPL_L2T_READ_REQ      = 0xC6,
+	CPL_L2T_READ_RPL      = 0xC7,
+	CPL_SMT_WRITE_REQ     = 0xC8,
+	CPL_SMT_WRITE_RPL     = 0xD5,
+	CPL_SMT_READ_REQ      = 0xC9,
+	CPL_SMT_READ_RPL      = 0xCA,
+	CPL_ARP_MISS_REQ      = 0xCD,
+	CPL_ARP_MISS_RPL      = 0xCE,
+	CPL_MIGRATE_C2T_REQ   = 0xDC,
+	CPL_MIGRATE_C2T_RPL   = 0xDD,
+	CPL_ERROR             = 0xD7,
+
+	/* internal: driver -> TOM */
+	CPL_MSS_CHANGE        = 0xE1
+};
+
+#define NUM_CPL_CMDS 256
+
+enum CPL_error {
+	CPL_ERR_NONE               = 0,
+	CPL_ERR_TCAM_PARITY        = 1,
+	CPL_ERR_TCAM_FULL          = 3,
+	CPL_ERR_CONN_RESET         = 20,
+	CPL_ERR_CONN_EXIST         = 22,
+	CPL_ERR_ARP_MISS           = 23,
+	CPL_ERR_BAD_SYN            = 24,
+	CPL_ERR_CONN_TIMEDOUT      = 30,
+	CPL_ERR_XMIT_TIMEDOUT      = 31,
+	CPL_ERR_PERSIST_TIMEDOUT   = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT  = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_ABORT_FAILED       = 42,
+	CPL_ERR_GENERAL            = 99
+};
+
+enum {
+	CPL_CONN_POLICY_AUTO = 0,
+	CPL_CONN_POLICY_ASK  = 1,
+	CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+	ULP_MODE_NONE   = 0,
+	ULP_MODE_TCPDDP = 1,
+	ULP_MODE_ISCSI  = 2,
+	ULP_MODE_IWARP  = 3,
+	ULP_MODE_SSL    = 4
+};
+
+enum {
+	CPL_PASS_OPEN_ACCEPT,
+	CPL_PASS_OPEN_REJECT
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+	CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum {                // TX_PKT_LSO ethernet types
+	CPL_ETH_II,
+	CPL_ETH_II_VLAN,
+	CPL_ETH_802_3,
+	CPL_ETH_802_3_VLAN
+};
+
+union opcode_tid {
+	u32 opcode_tid;
+	u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+	u16 mss;
+	u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd:4;
+	u8 ecn:1;
+	u8 sack:1;
+	u8 tstamp:1;
+#else
+	u8 tstamp:1;
+	u8 sack:1;
+	u8 ecn:1;
+	u8 rsvd:4;
+#endif
+};
+
+struct cpl_pass_open_req {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 opt0h;
+	u32 opt0l;
+	u32 peer_netmask;
+	u32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u8 resvd[7];
+	u8 status;
+};
+
+struct cpl_pass_establish {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 tos_tid;
+	u8  l2t_idx;
+	u8  rsvd[3];
+	u32 snd_isn;
+	u32 rcv_isn;
+};
+
+struct cpl_pass_accept_req {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 tos_tid;
+	struct tcp_options tcp_options;
+	u8  dst_mac[6];
+	u16 vlan_tag;
+	u8  src_mac[6];
+	u8  rsvd[2];
+	u32 rcv_isn;
+	u32 unknown_tcp_options;
+};
+
+struct cpl_pass_accept_rpl {
+	union opcode_tid ot;
+	u32 rsvd0;
+	u32 rsvd1;
+	u32 peer_ip;
+	u32 opt0h;
+	union {
+		u32 opt0l;
+		struct {
+		    u8 rsvd[3];
+		    u8 status;
+		};
+	};
+};
+
+struct cpl_act_open_req {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 opt0h;
+	u32 opt0l;
+	u32 iff_vlantag;
+	u32 rsvd;
+};
+
+struct cpl_act_open_rpl {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 new_tid;
+	u8  rsvd[3];
+	u8  status;
+};
+
+struct cpl_act_establish {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 tos_tid;
+	u32 rsvd;
+	u32 snd_isn;
+	u32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+	union opcode_tid ot;
+	u16 len;
+	u8 rsvd;
+	u8 status;
+};
+
+struct cpl_set_tcb {
+	union opcode_tid ot;
+	u16 len;
+	u16 rsvd;
+};
+
+struct cpl_set_tcb_field {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 offset;
+	u32 mask;
+	u32 val;
+};
+
+struct cpl_set_tcb_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_pcmd {
+	union opcode_tid ot;
+	u16 dlen_in;
+	u16 dlen_out;
+	u32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_read {
+	union opcode_tid ot;
+	u32 rsvd1;
+	u16 rsvd2;
+	u32 addr;
+	u16 len;
+};
+
+struct cpl_pcmd_read_rpl {
+	union opcode_tid ot;
+	u16 len;
+};
+
+struct cpl_close_con_req {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+	u32 snd_nxt;
+	u32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_close_listserv_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_req {
+	union opcode_tid ot;
+	u32 rsvd0;
+	u8  rsvd1;
+	u8  cmd;
+	u8  rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+	union opcode_tid ot;
+	u32 rsvd0;
+	u8  rsvd1;
+	u8  status;
+	u8  rsvd2[6];
+};
+
+struct cpl_peer_close {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_tx_data {
+	union opcode_tid ot;
+	u32 len;
+	u32 rsvd0;
+	u16 urg;
+	u16 flags;
+};
+
+struct cpl_tx_data_ack {
+	union opcode_tid ot;
+	u32 ack_seq;
+};
+
+struct cpl_rx_data {
+	union opcode_tid ot;
+	u32 len;
+	u32 seq;
+	u16 urg;
+	u8  rsvd;
+	u8  status;
+};
+
+struct cpl_rx_data_ack {
+	union opcode_tid ot;
+	u32 credit;
+};
+
+struct cpl_rx_data_ddp {
+	union opcode_tid ot;
+	u32 len;
+	u32 seq;
+	u32 nxt_seq;
+	u32 ulp_crc;
+	u16 ddp_status;
+	u8  rsvd;
+	u8  status;
+};
+
+/*
+ * We want this header's alignment to be no more stringent than 2-byte aligned.
+ * All fields are u8 or u16 except for the length.  However that field is not
+ * used so we break it into 2 16-bit parts to easily meet our alignment needs.
+ */
+struct cpl_tx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 ip_csum_dis:1;
+	u8 l4_csum_dis:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 l4_csum_dis:1;
+	u8 ip_csum_dis:1;
+	u8 iff:4;
+#endif
+	u16 vlan;
+	u16 len_hi;
+	u16 len_lo;
+};
+
+struct cpl_tx_pkt_lso {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 ip_csum_dis:1;
+	u8 l4_csum_dis:1;
+	u8 vlan_valid:1;
+	u8 :1;
+#else
+	u8 :1;
+	u8 vlan_valid:1;
+	u8 l4_csum_dis:1;
+	u8 ip_csum_dis:1;
+	u8 iff:4;
+#endif
+	u16 vlan;
+	__be32 len;
+
+	u8 rsvd[5];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 tcp_hdr_words:4;
+	u8 ip_hdr_words:4;
+#else
+	u8 ip_hdr_words:4;
+	u8 tcp_hdr_words:4;
+#endif
+	__be16 eth_type_mss;
+};
+
+struct cpl_rx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 csum_valid:1;
+	u8 bad_pkt:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 bad_pkt:1;
+	u8 csum_valid:1;
+	u8 iff:4;
+#endif
+	u16 csum;
+	u16 vlan;
+	u16 len;
+};
+
+struct cpl_l2t_write_req {
+	union opcode_tid ot;
+	u32 params;
+	u8 rsvd1[2];
+	u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+	union opcode_tid ot;
+	u32 params;
+	u8 rsvd1[2];
+	u8 dst_mac[6];
+};
+
+struct cpl_smt_write_req {
+	union opcode_tid ot;
+	u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:1;
+	u8 mtu_idx:3;
+	u8 iff:4;
+#else
+	u8 iff:4;
+	u8 mtu_idx:3;
+	u8 rsvd1:1;
+#endif
+	u16 rsvd2;
+	u16 rsvd3;
+	u8  src_mac1[6];
+	u16 rsvd4;
+	u8  src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+	union opcode_tid ot;
+	u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:4;
+	u8 iff:4;
+#else
+	u8 iff:4;
+	u8 rsvd1:4;
+#endif
+	u16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+	union opcode_tid ot;
+	u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:1;
+	u8 mtu_idx:3;
+	u8 rsvd0:4;
+#else
+	u8 rsvd0:4;
+	u8 mtu_idx:3;
+	u8 rsvd1:1;
+#endif
+	u16 rsvd2;
+	u16 rsvd3;
+	u8  src_mac1[6];
+	u16 rsvd4;
+	u8  src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+	union opcode_tid ot;
+	u32 params;
+};
+
+struct cpl_rte_delete_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+	union opcode_tid ot;
+	u32 params;
+	u32 netmask;
+	u32 faddr;
+};
+
+struct cpl_rte_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+	union opcode_tid ot;
+	u32 params;
+};
+
+struct cpl_rte_read_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd0[2];
+	u8 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:7;
+	u8 select:1;
+#else
+	u8 select:1;
+	u8 rsvd1:7;
+#endif
+	u8 rsvd2[3];
+	u32 addr;
+};
+
+struct cpl_mss_change {
+	union opcode_tid ot;
+	u32 mss;
+};
+
+#endif /* _CXGB_CPL5_CMD_H_ */
+
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
new file mode 100644
index 0000000..f5f1b0b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -0,0 +1,1357 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cxgb2.c                                                             *
+ * $Revision: 1.25 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  Chelsio 10Gb Ethernet Driver.                                            *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/sockios.h>
+#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
+
+#include "cpl5_cmd.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "tp.h"
+#include "espi.h"
+#include "elmer0.h"
+
+#include <linux/workqueue.h>
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+	cancel_delayed_work(&ap->stats_update_task);
+}
+
+#define MAX_CMDQ_ENTRIES	16384
+#define MAX_CMDQ1_ENTRIES	1024
+#define MAX_RX_BUFFERS		16384
+#define MAX_RX_JUMBO_BUFFERS	16384
+#define MAX_TX_BUFFERS_HIGH	16384U
+#define MAX_TX_BUFFERS_LOW	1536U
+#define MAX_TX_BUFFERS		1460U
+#define MIN_FL_ENTRIES		32
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+/*
+ * The EEPROM is actually bigger but only the first few bytes are used so we
+ * only report those.
+ */
+#define EEPROM_SIZE 32
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("GPL");
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
+
+#define HCLOCK 0x0
+#define LCLOCK 0x1
+
+/* T1 cards powersave mode */
+static int t1_clock(struct adapter *adapter, int mode);
+static int t1powersave = 1;	/* HW default is powersave mode. */
+
+module_param(t1powersave, int, 0);
+MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
+
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+static const char pci_speed[][4] = {
+	"33", "66", "100", "133"
+};
+
+/*
+ * Setup MAC to receive the types of packets we want.
+ */
+static void t1_set_rxmode(struct net_device *dev)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	struct t1_rx_mode rm;
+
+	rm.dev = dev;
+	mac->ops->set_rx_mode(mac, &rm);
+}
+
+static void link_report(struct port_info *p)
+{
+	if (!netif_carrier_ok(p->dev))
+		netdev_info(p->dev, "link down\n");
+	else {
+		const char *s = "10Mbps";
+
+		switch (p->link_config.speed) {
+			case SPEED_10000: s = "10Gbps"; break;
+			case SPEED_1000:  s = "1000Mbps"; break;
+			case SPEED_100:   s = "100Mbps"; break;
+		}
+
+		netdev_info(p->dev, "link up, %s, %s-duplex\n",
+			    s, p->link_config.duplex == DUPLEX_FULL
+			    ? "full" : "half");
+	}
+}
+
+void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
+			int speed, int duplex, int pause)
+{
+	struct port_info *p = &adapter->port[port_id];
+
+	if (link_stat != netif_carrier_ok(p->dev)) {
+		if (link_stat)
+			netif_carrier_on(p->dev);
+		else
+			netif_carrier_off(p->dev);
+		link_report(p);
+
+		/* multi-ports: inform toe */
+		if ((speed > 0) && (adapter->params.nports > 1)) {
+			unsigned int sched_speed = 10;
+			switch (speed) {
+			case SPEED_1000:
+				sched_speed = 1000;
+				break;
+			case SPEED_100:
+				sched_speed = 100;
+				break;
+			case SPEED_10:
+				sched_speed = 10;
+				break;
+			}
+			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
+		}
+	}
+}
+
+static void link_start(struct port_info *p)
+{
+	struct cmac *mac = p->mac;
+
+	mac->ops->reset(mac);
+	if (mac->ops->macaddress_set)
+		mac->ops->macaddress_set(mac, p->dev->dev_addr);
+	t1_set_rxmode(p->dev);
+	t1_link_start(p->phy, mac, &p->link_config);
+	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static void enable_hw_csum(struct adapter *adapter)
+{
+	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
+		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
+	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
+}
+
+/*
+ * Things to do upon first use of a card.
+ * This must run with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adapter)
+{
+	int err = 0;
+
+	if (!(adapter->flags & FULL_INIT_DONE)) {
+		err = t1_init_hw_modules(adapter);
+		if (err)
+			goto out_err;
+
+		enable_hw_csum(adapter);
+		adapter->flags |= FULL_INIT_DONE;
+	}
+
+	t1_interrupts_clear(adapter);
+
+	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
+	err = request_irq(adapter->pdev->irq, t1_interrupt,
+			  adapter->params.has_msi ? 0 : IRQF_SHARED,
+			  adapter->name, adapter);
+	if (err) {
+		if (adapter->params.has_msi)
+			pci_disable_msi(adapter->pdev);
+
+		goto out_err;
+	}
+
+	t1_sge_start(adapter->sge);
+	t1_interrupts_enable(adapter);
+out_err:
+	return err;
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter)
+{
+	t1_sge_stop(adapter->sge);
+	t1_interrupts_disable(adapter);
+	free_irq(adapter->pdev->irq, adapter);
+	if (adapter->params.has_msi)
+		pci_disable_msi(adapter->pdev);
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+	int err;
+	struct adapter *adapter = dev->ml_priv;
+	int other_ports = adapter->open_device_map & PORT_MASK;
+
+	napi_enable(&adapter->napi);
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
+		napi_disable(&adapter->napi);
+		return err;
+	}
+
+	__set_bit(dev->if_port, &adapter->open_device_map);
+	link_start(&adapter->port[dev->if_port]);
+	netif_start_queue(dev);
+	if (!other_ports && adapter->params.stats_update_period)
+		schedule_mac_stats_update(adapter,
+					  adapter->params.stats_update_period);
+
+	t1_vlan_mode(adapter, dev->features);
+	return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct cmac *mac = p->mac;
+
+	netif_stop_queue(dev);
+	napi_disable(&adapter->napi);
+	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+	netif_carrier_off(dev);
+
+	clear_bit(dev->if_port, &adapter->open_device_map);
+	if (adapter->params.stats_update_period &&
+	    !(adapter->open_device_map & PORT_MASK)) {
+		/* Stop statistics accumulation. */
+		smp_mb__after_atomic();
+		spin_lock(&adapter->work_lock);   /* sync with update task */
+		spin_unlock(&adapter->work_lock);
+		cancel_mac_stats_update(adapter);
+	}
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter);
+	return 0;
+}
+
+static struct net_device_stats *t1_get_stats(struct net_device *dev)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct net_device_stats *ns = &p->netstats;
+	const struct cmac_statistics *pstats;
+
+	/* Do a full update of the MAC stats */
+	pstats = p->mac->ops->statistics_update(p->mac,
+						MAC_STATS_UPDATE_FULL);
+
+	ns->tx_packets = pstats->TxUnicastFramesOK +
+		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
+
+	ns->rx_packets = pstats->RxUnicastFramesOK +
+		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
+
+	ns->tx_bytes = pstats->TxOctetsOK;
+	ns->rx_bytes = pstats->RxOctetsOK;
+
+	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
+		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
+	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
+		pstats->RxFCSErrors + pstats->RxAlignErrors +
+		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
+		pstats->RxSymbolErrors + pstats->RxRuntErrors;
+
+	ns->multicast  = pstats->RxMulticastFramesOK;
+	ns->collisions = pstats->TxTotalCollisions;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
+		pstats->RxJabberErrors;
+	ns->rx_over_errors   = 0;
+	ns->rx_crc_errors    = pstats->RxFCSErrors;
+	ns->rx_frame_errors  = pstats->RxAlignErrors;
+	ns->rx_fifo_errors   = 0;
+	ns->rx_missed_errors = 0;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
+	ns->tx_carrier_errors   = 0;
+	ns->tx_fifo_errors      = pstats->TxUnderrun;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors    = pstats->TxLateCollisions;
+	return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	struct adapter *adapter = dev->ml_priv;
+
+	return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	struct adapter *adapter = dev->ml_priv;
+
+	adapter->msg_enable = val;
+}
+
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+	"TxOctetsOK",
+	"TxOctetsBad",
+	"TxUnicastFramesOK",
+	"TxMulticastFramesOK",
+	"TxBroadcastFramesOK",
+	"TxPauseFrames",
+	"TxFramesWithDeferredXmissions",
+	"TxLateCollisions",
+	"TxTotalCollisions",
+	"TxFramesAbortedDueToXSCollisions",
+	"TxUnderrun",
+	"TxLengthErrors",
+	"TxInternalMACXmitError",
+	"TxFramesWithExcessiveDeferral",
+	"TxFCSErrors",
+	"TxJumboFramesOk",
+	"TxJumboOctetsOk",
+	
+	"RxOctetsOK",
+	"RxOctetsBad",
+	"RxUnicastFramesOK",
+	"RxMulticastFramesOK",
+	"RxBroadcastFramesOK",
+	"RxPauseFrames",
+	"RxFCSErrors",
+	"RxAlignErrors",
+	"RxSymbolErrors",
+	"RxDataErrors",
+	"RxSequenceErrors",
+	"RxRuntErrors",
+	"RxJabberErrors",
+	"RxInternalMACRcvError",
+	"RxInRangeLengthErrors",
+	"RxOutOfRangeLengthField",
+	"RxFrameTooLongErrors",
+	"RxJumboFramesOk",
+	"RxJumboOctetsOk",
+
+	/* Port stats */
+	"RxCsumGood",
+	"TxCsumOffload",
+	"TxTso",
+	"RxVlan",
+	"TxVlan",
+	"TxNeedHeadroom", 
+	
+	/* Interrupt stats */
+	"rx drops",
+	"pure_rsps",
+	"unhandled irqs",
+	"respQ_empty",
+	"respQ_overflow",
+	"freelistQ_empty",
+	"pkt_too_big",
+	"pkt_mismatch",
+	"cmdQ_full0",
+	"cmdQ_full1",
+
+	"espi_DIP2ParityErr",
+	"espi_DIP4Err",
+	"espi_RxDrops",
+	"espi_TxDrops",
+	"espi_RxOvfl",
+	"espi_ParityErr"
+};
+
+#define T2_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+	return T2_REGMAP_SIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct adapter *adapter = dev->ml_priv;
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(adapter->pdev),
+		sizeof(info->bus_info));
+}
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(stats_strings);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	const struct cmac_statistics *s;
+	const struct sge_intr_counts *t;
+	struct sge_port_stats ss;
+
+	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
+	t = t1_sge_get_intr_counts(adapter->sge);
+	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+
+	*data++ = s->TxOctetsOK;
+	*data++ = s->TxOctetsBad;
+	*data++ = s->TxUnicastFramesOK;
+	*data++ = s->TxMulticastFramesOK;
+	*data++ = s->TxBroadcastFramesOK;
+	*data++ = s->TxPauseFrames;
+	*data++ = s->TxFramesWithDeferredXmissions;
+	*data++ = s->TxLateCollisions;
+	*data++ = s->TxTotalCollisions;
+	*data++ = s->TxFramesAbortedDueToXSCollisions;
+	*data++ = s->TxUnderrun;
+	*data++ = s->TxLengthErrors;
+	*data++ = s->TxInternalMACXmitError;
+	*data++ = s->TxFramesWithExcessiveDeferral;
+	*data++ = s->TxFCSErrors;
+	*data++ = s->TxJumboFramesOK;
+	*data++ = s->TxJumboOctetsOK;
+
+	*data++ = s->RxOctetsOK;
+	*data++ = s->RxOctetsBad;
+	*data++ = s->RxUnicastFramesOK;
+	*data++ = s->RxMulticastFramesOK;
+	*data++ = s->RxBroadcastFramesOK;
+	*data++ = s->RxPauseFrames;
+	*data++ = s->RxFCSErrors;
+	*data++ = s->RxAlignErrors;
+	*data++ = s->RxSymbolErrors;
+	*data++ = s->RxDataErrors;
+	*data++ = s->RxSequenceErrors;
+	*data++ = s->RxRuntErrors;
+	*data++ = s->RxJabberErrors;
+	*data++ = s->RxInternalMACRcvError;
+	*data++ = s->RxInRangeLengthErrors;
+	*data++ = s->RxOutOfRangeLengthField;
+	*data++ = s->RxFrameTooLongErrors;
+	*data++ = s->RxJumboFramesOK;
+	*data++ = s->RxJumboOctetsOK;
+
+	*data++ = ss.rx_cso_good;
+	*data++ = ss.tx_cso;
+	*data++ = ss.tx_tso;
+	*data++ = ss.vlan_xtract;
+	*data++ = ss.vlan_insert;
+	*data++ = ss.tx_need_hdrroom;
+	
+	*data++ = t->rx_drops;
+	*data++ = t->pure_rsps;
+	*data++ = t->unhandled_irqs;
+	*data++ = t->respQ_empty;
+	*data++ = t->respQ_overflow;
+	*data++ = t->freelistQ_empty;
+	*data++ = t->pkt_too_big;
+	*data++ = t->pkt_mismatch;
+	*data++ = t->cmdQ_full[0];
+	*data++ = t->cmdQ_full[1];
+
+	if (adapter->espi) {
+		const struct espi_intr_counts *e;
+
+		e = t1_espi_get_intr_counts(adapter->espi);
+		*data++ = e->DIP2_parity_err;
+		*data++ = e->DIP4_err;
+		*data++ = e->rx_drops;
+		*data++ = e->tx_drops;
+		*data++ = e->rx_ovflw;
+		*data++ = e->parity_err;
+	}
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+				  unsigned int start, unsigned int end)
+{
+	u32 *p = buf + start;
+
+	for ( ; start <= end; start += sizeof(u32))
+		*p++ = readl(ap->regs + start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	struct adapter *ap = dev->ml_priv;
+
+	/*
+	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
+	 */
+	regs->version = 2;
+
+	memset(buf, 0, T2_REGMAP_SIZE);
+	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
+	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
+	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
+	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
+	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
+	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
+	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
+	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
+	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+
+	cmd->supported = p->link_config.supported;
+	cmd->advertising = p->link_config.advertising;
+
+	if (netif_carrier_ok(dev)) {
+		ethtool_cmd_speed_set(cmd, p->link_config.speed);
+		cmd->duplex = p->link_config.duplex;
+	} else {
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+	cmd->phy_address = p->phy->mdio.prtad;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = p->link_config.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+	int cap = 0;
+
+	switch (speed) {
+	case SPEED_10:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10baseT_Full;
+		else
+			cap = SUPPORTED_10baseT_Half;
+		break;
+	case SPEED_100:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_100baseT_Full;
+		else
+			cap = SUPPORTED_100baseT_Half;
+		break;
+	case SPEED_1000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_1000baseT_Full;
+		else
+			cap = SUPPORTED_1000baseT_Half;
+		break;
+	case SPEED_10000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10000baseT_Full;
+	}
+	return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+		      ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct link_config *lc = &p->link_config;
+
+	if (!(lc->supported & SUPPORTED_Autoneg))
+		return -EOPNOTSUPP;             /* can't change speed/duplex */
+
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		u32 speed = ethtool_cmd_speed(cmd);
+		int cap = speed_duplex_to_caps(speed, cmd->duplex);
+
+		if (!(lc->supported & cap) || (speed == SPEED_1000))
+			return -EINVAL;
+		lc->requested_speed = speed;
+		lc->requested_duplex = cmd->duplex;
+		lc->advertising = 0;
+	} else {
+		cmd->advertising &= ADVERTISED_MASK;
+		if (cmd->advertising & (cmd->advertising - 1))
+			cmd->advertising = lc->supported;
+		cmd->advertising &= lc->supported;
+		if (!cmd->advertising)
+			return -EINVAL;
+		lc->requested_speed = SPEED_INVALID;
+		lc->requested_duplex = DUPLEX_INVALID;
+		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+	}
+	lc->autoneg = cmd->autoneg;
+	if (netif_running(dev))
+		t1_link_start(p->phy, p->mac, lc);
+	return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+
+	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct link_config *lc = &p->link_config;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & SUPPORTED_Autoneg)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (lc->autoneg == AUTONEG_ENABLE) {
+		if (netif_running(dev))
+			t1_link_start(p->phy, p->mac, lc);
+	} else {
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		if (netif_running(dev))
+			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
+							 lc->fc);
+	}
+	return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->ml_priv;
+	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+	e->tx_max_pending = MAX_CMDQ_ENTRIES;
+
+	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
+	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
+	e->tx_pending = adapter->params.sge.cmdQ_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->ml_priv;
+	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
+	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+	    e->tx_pending > MAX_CMDQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES ||
+	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
+	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
+	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
+	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
+		MAX_CMDQ1_ENTRIES : e->tx_pending;
+	return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->ml_priv;
+
+	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
+	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
+	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
+	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
+	return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->ml_priv;
+
+	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
+	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
+	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
+	return 0;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+	struct adapter *adapter = dev->ml_priv;
+
+	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
+}
+
+#define EEPROM_MAGIC(ap) \
+	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 *data)
+{
+	int i;
+	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
+	struct adapter *adapter = dev->ml_priv;
+
+	e->magic = EEPROM_MAGIC(adapter);
+	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
+		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
+	memcpy(data, buf + e->offset, e->len);
+	return 0;
+}
+
+static const struct ethtool_ops t1_ethtool_ops = {
+	.get_settings      = get_settings,
+	.set_settings      = set_settings,
+	.get_drvinfo       = get_drvinfo,
+	.get_msglevel      = get_msglevel,
+	.set_msglevel      = set_msglevel,
+	.get_ringparam     = get_sge_param,
+	.set_ringparam     = set_sge_param,
+	.get_coalesce      = get_coalesce,
+	.set_coalesce      = set_coalesce,
+	.get_eeprom_len    = get_eeprom_len,
+	.get_eeprom        = get_eeprom,
+	.get_pauseparam    = get_pauseparam,
+	.set_pauseparam    = set_pauseparam,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = get_strings,
+	.get_sset_count	   = get_sset_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len      = get_regs_len,
+	.get_regs          = get_regs,
+};
+
+static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
+
+	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
+}
+
+static int t1_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct adapter *adapter = dev->ml_priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+
+	if (!mac->ops->set_mtu)
+		return -EOPNOTSUPP;
+	if (new_mtu < 68)
+		return -EINVAL;
+	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
+		return ret;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int t1_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	struct sockaddr *addr = p;
+
+	if (!mac->ops->macaddress_set)
+		return -EOPNOTSUPP;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	mac->ops->macaddress_set(mac, dev->dev_addr);
+	return 0;
+}
+
+static netdev_features_t t1_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	/*
+	 * Since there is no support for separate rx/tx vlan accel
+	 * enable/disable make sure tx flag is always in same state as rx.
+	 */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		features |= NETIF_F_HW_VLAN_CTAG_TX;
+	else
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+	return features;
+}
+
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
+{
+	netdev_features_t changed = dev->features ^ features;
+	struct adapter *adapter = dev->ml_priv;
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		t1_vlan_mode(adapter, features);
+
+	return 0;
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void t1_netpoll(struct net_device *dev)
+{
+	unsigned long flags;
+	struct adapter *adapter = dev->ml_priv;
+
+	local_irq_save(flags);
+	t1_interrupt(adapter->pdev->irq, adapter);
+	local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.  This is used only if the MAC
+ * does not have any other way to prevent stats counter overflow.
+ */
+static void mac_stats_task(struct work_struct *work)
+{
+	int i;
+	struct adapter *adapter =
+		container_of(work, struct adapter, stats_update_task.work);
+
+	for_each_port(adapter, i) {
+		struct port_info *p = &adapter->port[i];
+
+		if (netif_running(p->dev))
+			p->mac->ops->statistics_update(p->mac,
+						       MAC_STATS_UPDATE_FAST);
+	}
+
+	/* Schedule the next statistics update if any port is active. */
+	spin_lock(&adapter->work_lock);
+	if (adapter->open_device_map & PORT_MASK)
+		schedule_mac_stats_update(adapter,
+					  adapter->params.stats_update_period);
+	spin_unlock(&adapter->work_lock);
+}
+
+/*
+ * Processes elmer0 external interrupts in process context.
+ */
+static void ext_intr_task(struct work_struct *work)
+{
+	struct adapter *adapter =
+		container_of(work, struct adapter, ext_intr_handler_task);
+
+	t1_elmer0_ext_intr_handler(adapter);
+
+	/* Now reenable external interrupts */
+	spin_lock_irq(&adapter->async_lock);
+	adapter->slow_intr_mask |= F_PL_INTR_EXT;
+	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+		   adapter->regs + A_PL_ENABLE);
+	spin_unlock_irq(&adapter->async_lock);
+}
+
+/*
+ * Interrupt-context handler for elmer0 external interrupts.
+ */
+void t1_elmer0_ext_intr(struct adapter *adapter)
+{
+	/*
+	 * Schedule a task to handle external interrupts as we require
+	 * a process context.  We disable EXT interrupts in the interim
+	 * and let the task reenable them when it's done.
+	 */
+	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+		   adapter->regs + A_PL_ENABLE);
+	schedule_work(&adapter->ext_intr_handler_task);
+}
+
+void t1_fatal_err(struct adapter *adapter)
+{
+	if (adapter->flags & FULL_INIT_DONE) {
+		t1_sge_stop(adapter->sge);
+		t1_interrupts_disable(adapter);
+	}
+	pr_alert("%s: encountered fatal error, operation suspended\n",
+		 adapter->name);
+}
+
+static const struct net_device_ops cxgb_netdev_ops = {
+	.ndo_open		= cxgb_open,
+	.ndo_stop		= cxgb_close,
+	.ndo_start_xmit		= t1_start_xmit,
+	.ndo_get_stats		= t1_get_stats,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= t1_set_rxmode,
+	.ndo_do_ioctl		= t1_ioctl,
+	.ndo_change_mtu		= t1_change_mtu,
+	.ndo_set_mac_address	= t1_set_mac_addr,
+	.ndo_fix_features	= t1_fix_features,
+	.ndo_set_features	= t1_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= t1_netpoll,
+#endif
+};
+
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int i, err, pci_using_dac = 0;
+	unsigned long mmio_start, mmio_len;
+	const struct board_info *bi;
+	struct adapter *adapter = NULL;
+	struct port_info *pi;
+
+	pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		pr_err("%s: cannot find PCI device memory base address\n",
+		       pci_name(pdev));
+		err = -ENODEV;
+		goto out_disable_pdev;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_using_dac = 1;
+
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+			pr_err("%s: unable to obtain 64-bit DMA for "
+			       "consistent allocations\n", pci_name(pdev));
+			err = -ENODEV;
+			goto out_disable_pdev;
+		}
+
+	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+		pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
+		goto out_disable_pdev;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
+		goto out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bi = t1_get_board_info(ent->driver_data);
+
+	for (i = 0; i < bi->port_number; ++i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		if (!adapter) {
+			adapter = netdev_priv(netdev);
+			adapter->pdev = pdev;
+			adapter->port[0].dev = netdev;  /* so we don't leak it */
+
+			adapter->regs = ioremap(mmio_start, mmio_len);
+			if (!adapter->regs) {
+				pr_err("%s: cannot map device registers\n",
+				       pci_name(pdev));
+				err = -ENOMEM;
+				goto out_free_dev;
+			}
+
+			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
+				err = -ENODEV;	  /* Can't handle this chip rev */
+				goto out_free_dev;
+			}
+
+			adapter->name = pci_name(pdev);
+			adapter->msg_enable = dflt_msg_enable;
+			adapter->mmio_len = mmio_len;
+
+			spin_lock_init(&adapter->tpi_lock);
+			spin_lock_init(&adapter->work_lock);
+			spin_lock_init(&adapter->async_lock);
+			spin_lock_init(&adapter->mac_lock);
+
+			INIT_WORK(&adapter->ext_intr_handler_task,
+				  ext_intr_task);
+			INIT_DELAYED_WORK(&adapter->stats_update_task,
+					  mac_stats_task);
+
+			pci_set_drvdata(pdev, netdev);
+		}
+
+		pi = &adapter->port[i];
+		pi->dev = netdev;
+		netif_carrier_off(netdev);
+		netdev->irq = pdev->irq;
+		netdev->if_port = i;
+		netdev->mem_start = mmio_start;
+		netdev->mem_end = mmio_start + mmio_len - 1;
+		netdev->ml_priv = adapter;
+		netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+			NETIF_F_RXCSUM;
+		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+			NETIF_F_RXCSUM | NETIF_F_LLTX;
+
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+		if (vlan_tso_capable(adapter)) {
+			netdev->features |=
+				NETIF_F_HW_VLAN_CTAG_TX |
+				NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+			/* T204: disable TSO */
+			if (!(is_T2(adapter)) || bi->port_number != 4) {
+				netdev->hw_features |= NETIF_F_TSO;
+				netdev->features |= NETIF_F_TSO;
+			}
+		}
+
+		netdev->netdev_ops = &cxgb_netdev_ops;
+		netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
+			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
+
+		netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+
+		netdev->ethtool_ops = &t1_ethtool_ops;
+	}
+
+	if (t1_init_sw_modules(adapter, bi) < 0) {
+		err = -ENODEV;
+		goto out_free_dev;
+	}
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for (i = 0; i < bi->port_number; ++i) {
+		err = register_netdev(adapter->port[i].dev);
+		if (err)
+			pr_warn("%s: cannot register net device %s, skipping\n",
+				pci_name(pdev), adapter->port[i].dev->name);
+		else {
+			/*
+			 * Change the name we use for messages to the name of
+			 * the first successfully registered interface.
+			 */
+			if (!adapter->registered_device_map)
+				adapter->name = adapter->port[i].dev->name;
+
+			__set_bit(i, &adapter->registered_device_map);
+		}
+	}
+	if (!adapter->registered_device_map) {
+		pr_err("%s: could not register any net devices\n",
+		       pci_name(pdev));
+		goto out_release_adapter_res;
+	}
+
+	pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
+		adapter->name, bi->desc, adapter->params.chip_revision,
+		adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+		adapter->params.pci.speed, adapter->params.pci.width);
+
+	/*
+	 * Set the T1B ASIC and memory clocks.
+	 */
+	if (t1powersave)
+		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
+	else
+		adapter->t1powersave = HCLOCK;
+	if (t1_is_T1B(adapter))
+		t1_clock(adapter, t1powersave);
+
+	return 0;
+
+out_release_adapter_res:
+	t1_free_sw_modules(adapter);
+out_free_dev:
+	if (adapter) {
+		if (adapter->regs)
+			iounmap(adapter->regs);
+		for (i = bi->port_number - 1; i >= 0; --i)
+			if (adapter->port[i].dev)
+				free_netdev(adapter->port[i].dev);
+	}
+	pci_release_regions(pdev);
+out_disable_pdev:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
+{
+	int data;
+	int i;
+	u32 val;
+
+	enum {
+		S_CLOCK = 1 << 3,
+		S_DATA = 1 << 4
+	};
+
+	for (i = (nbits - 1); i > -1; i--) {
+
+		udelay(50);
+
+		data = ((bitdata >> i) & 0x1);
+		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+
+		if (data)
+			val |= S_DATA;
+		else
+			val &= ~S_DATA;
+
+		udelay(50);
+
+		/* Set SCLOCK low */
+		val &= ~S_CLOCK;
+		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		udelay(50);
+
+		/* Write SCLOCK high */
+		val |= S_CLOCK;
+		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+	}
+}
+
+static int t1_clock(struct adapter *adapter, int mode)
+{
+	u32 val;
+	int M_CORE_VAL;
+	int M_MEM_VAL;
+
+	enum {
+		M_CORE_BITS	= 9,
+		T_CORE_VAL	= 0,
+		T_CORE_BITS	= 2,
+		N_CORE_VAL	= 0,
+		N_CORE_BITS	= 2,
+		M_MEM_BITS	= 9,
+		T_MEM_VAL	= 0,
+		T_MEM_BITS	= 2,
+		N_MEM_VAL	= 0,
+		N_MEM_BITS	= 2,
+		NP_LOAD		= 1 << 17,
+		S_LOAD_MEM	= 1 << 5,
+		S_LOAD_CORE	= 1 << 6,
+		S_CLOCK		= 1 << 3
+	};
+
+	if (!t1_is_T1B(adapter))
+		return -ENODEV;	/* Can't re-clock this chip. */
+
+	if (mode & 2)
+		return 0;	/* show current mode. */
+
+	if ((adapter->t1powersave & 1) == (mode & 1))
+		return -EALREADY;	/* ASIC already running in mode. */
+
+	if ((mode & 1) == HCLOCK) {
+		M_CORE_VAL = 0x14;
+		M_MEM_VAL = 0x18;
+		adapter->t1powersave = HCLOCK;	/* overclock */
+	} else {
+		M_CORE_VAL = 0xe;
+		M_MEM_VAL = 0x10;
+		adapter->t1powersave = LCLOCK;	/* underclock */
+	}
+
+	/* Don't interrupt this serial stream! */
+	spin_lock(&adapter->tpi_lock);
+
+	/* Initialize for ASIC core */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= NP_LOAD;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_CORE;
+	val &= ~S_CLOCK;
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+
+	/* Serial program the ASIC clock synthesizer */
+	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
+	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
+	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
+	udelay(50);
+
+	/* Finish ASIC core */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= S_LOAD_CORE;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_CORE;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+
+	/* Initialize for memory */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= NP_LOAD;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_MEM;
+	val &= ~S_CLOCK;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+
+	/* Serial program the memory clock synthesizer */
+	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
+	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
+	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
+	udelay(50);
+
+	/* Finish memory */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= S_LOAD_MEM;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_MEM;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+	spin_unlock(&adapter->tpi_lock);
+
+	return 0;
+}
+
+static inline void t1_sw_reset(struct pci_dev *pdev)
+{
+	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
+	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct adapter *adapter = dev->ml_priv;
+	int i;
+
+	for_each_port(adapter, i) {
+		if (test_bit(i, &adapter->registered_device_map))
+			unregister_netdev(adapter->port[i].dev);
+	}
+
+	t1_free_sw_modules(adapter);
+	iounmap(adapter->regs);
+
+	while (--i >= 0) {
+		if (adapter->port[i].dev)
+			free_netdev(adapter->port[i].dev);
+	}
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	t1_sw_reset(pdev);
+}
+
+static struct pci_driver cxgb_pci_driver = {
+	.name     = DRV_NAME,
+	.id_table = t1_pci_tbl,
+	.probe    = init_one,
+	.remove   = remove_one,
+};
+
+module_pci_driver(cxgb_pci_driver);
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
new file mode 100644
index 0000000..81526ad
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -0,0 +1,157 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: elmer0.h                                                            *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 22:49:43 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ELMER0_H_
+#define _CXGB_ELMER0_H_
+
+/* ELMER0 flavors */
+enum {
+	ELMER0_XC2S300E_6FT256_C,
+	ELMER0_XC2S100E_6TQ144_C
+};
+
+/* ELMER0 registers */
+#define A_ELMER0_VERSION	0x100000
+#define A_ELMER0_PHY_CFG	0x100004
+#define A_ELMER0_INT_ENABLE	0x100008
+#define A_ELMER0_INT_CAUSE	0x10000c
+#define A_ELMER0_GPI_CFG	0x100010
+#define A_ELMER0_GPI_STAT	0x100014
+#define A_ELMER0_GPO		0x100018
+#define A_ELMER0_PORT0_MI1_CFG	0x400000
+
+#define S_MI1_MDI_ENABLE    0
+#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
+#define F_MI1_MDI_ENABLE    V_MI1_MDI_ENABLE(1U)
+
+#define S_MI1_MDI_INVERT    1
+#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
+#define F_MI1_MDI_INVERT    V_MI1_MDI_INVERT(1U)
+
+#define S_MI1_PREAMBLE_ENABLE    2
+#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
+#define F_MI1_PREAMBLE_ENABLE    V_MI1_PREAMBLE_ENABLE(1U)
+
+#define S_MI1_SOF    3
+#define M_MI1_SOF    0x3
+#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
+#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
+
+#define S_MI1_CLK_DIV    5
+#define M_MI1_CLK_DIV    0xff
+#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
+#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
+
+#define A_ELMER0_PORT0_MI1_ADDR 0x400004
+
+#define S_MI1_REG_ADDR    0
+#define M_MI1_REG_ADDR    0x1f
+#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
+#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
+
+#define S_MI1_PHY_ADDR    5
+#define M_MI1_PHY_ADDR    0x1f
+#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
+#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
+
+#define A_ELMER0_PORT0_MI1_DATA 0x400008
+
+#define S_MI1_DATA    0
+#define M_MI1_DATA    0xffff
+#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
+#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
+
+#define A_ELMER0_PORT0_MI1_OP 0x40000c
+
+#define S_MI1_OP    0
+#define M_MI1_OP    0x3
+#define V_MI1_OP(x) ((x) << S_MI1_OP)
+#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
+
+#define S_MI1_ADDR_AUTOINC    2
+#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
+#define F_MI1_ADDR_AUTOINC    V_MI1_ADDR_AUTOINC(1U)
+
+#define S_MI1_OP_BUSY    31
+#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
+#define F_MI1_OP_BUSY    V_MI1_OP_BUSY(1U)
+
+#define A_ELMER0_PORT1_MI1_CFG	0x500000
+#define A_ELMER0_PORT1_MI1_ADDR	0x500004
+#define A_ELMER0_PORT1_MI1_DATA	0x500008
+#define A_ELMER0_PORT1_MI1_OP	0x50000c
+#define A_ELMER0_PORT2_MI1_CFG	0x600000
+#define A_ELMER0_PORT2_MI1_ADDR	0x600004
+#define A_ELMER0_PORT2_MI1_DATA	0x600008
+#define A_ELMER0_PORT2_MI1_OP	0x60000c
+#define A_ELMER0_PORT3_MI1_CFG	0x700000
+#define A_ELMER0_PORT3_MI1_ADDR	0x700004
+#define A_ELMER0_PORT3_MI1_DATA	0x700008
+#define A_ELMER0_PORT3_MI1_OP	0x70000c
+
+/* Simple bit definition for GPI and GP0 registers. */
+#define     ELMER0_GP_BIT0              0x0001
+#define     ELMER0_GP_BIT1              0x0002
+#define     ELMER0_GP_BIT2              0x0004
+#define     ELMER0_GP_BIT3              0x0008
+#define     ELMER0_GP_BIT4              0x0010
+#define     ELMER0_GP_BIT5              0x0020
+#define     ELMER0_GP_BIT6              0x0040
+#define     ELMER0_GP_BIT7              0x0080
+#define     ELMER0_GP_BIT8              0x0100
+#define     ELMER0_GP_BIT9              0x0200
+#define     ELMER0_GP_BIT10             0x0400
+#define     ELMER0_GP_BIT11             0x0800
+#define     ELMER0_GP_BIT12             0x1000
+#define     ELMER0_GP_BIT13             0x2000
+#define     ELMER0_GP_BIT14             0x4000
+#define     ELMER0_GP_BIT15             0x8000
+#define     ELMER0_GP_BIT16             0x10000
+#define     ELMER0_GP_BIT17             0x20000
+#define     ELMER0_GP_BIT18             0x40000
+#define     ELMER0_GP_BIT19             0x80000
+
+#define MI1_OP_DIRECT_WRITE 1
+#define MI1_OP_DIRECT_READ  2
+
+#define MI1_OP_INDIRECT_ADDRESS  0
+#define MI1_OP_INDIRECT_WRITE    1
+#define MI1_OP_INDIRECT_READ_INC 2
+#define MI1_OP_INDIRECT_READ     3
+
+#endif /* _CXGB_ELMER0_H_ */
+
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
new file mode 100644
index 0000000..3e182ee
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -0,0 +1,372 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.c                                                              *
+ * $Revision: 1.14 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  Ethernet SPI functionality.                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "espi.h"
+
+struct peespi {
+	adapter_t *adapter;
+	struct espi_intr_counts intr_cnt;
+	u32 misc_ctrl;
+	spinlock_t lock;
+};
+
+#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
+			F_RAMPARITYERR | F_DIP2PARITYERR)
+#define MON_MASK  (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
+		   | F_MONITORED_INTERFACE)
+
+#define TRICN_CNFG 14
+#define TRICN_CMD_READ  0x11
+#define TRICN_CMD_WRITE 0x21
+#define TRICN_CMD_ATTEMPTS 10
+
+static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
+		       int ch_addr, int reg_offset, u32 wr_data)
+{
+	int busy, attempts = TRICN_CMD_ATTEMPTS;
+
+	writel(V_WRITE_DATA(wr_data) |
+	       V_REGISTER_OFFSET(reg_offset) |
+	       V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
+	       V_BUNDLE_ADDR(bundle_addr) |
+	       V_SPI4_COMMAND(TRICN_CMD_WRITE),
+	       adapter->regs + A_ESPI_CMD_ADDR);
+	writel(0, adapter->regs + A_ESPI_GOSTAT);
+
+	do {
+		busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
+	} while (busy && --attempts);
+
+	if (busy)
+		pr_err("%s: TRICN write timed out\n", adapter->name);
+
+	return busy;
+}
+
+static int tricn_init(adapter_t *adapter)
+{
+	int i, sme = 1;
+
+	if (!(readl(adapter->regs + A_ESPI_RX_RESET)  & F_RX_CLK_STATUS)) {
+		pr_err("%s: ESPI clock not ready\n", adapter->name);
+		return -1;
+	}
+
+	writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET);
+
+	if (sme) {
+		tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+	}
+	for (i = 1; i <= 8; i++)
+		tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+	for (i = 1; i <= 2; i++)
+		tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+	for (i = 1; i <= 3; i++)
+		tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+	tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1);
+	tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1);
+	tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1);
+	tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80);
+	tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1);
+
+	writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST,
+	       adapter->regs + A_ESPI_RX_RESET);
+
+	return 0;
+}
+
+void t1_espi_intr_enable(struct peespi *espi)
+{
+	u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+	/*
+	 * Cannot enable ESPI interrupts on T1B because HW asserts the
+	 * interrupt incorrectly, namely the driver gets ESPI interrupts
+	 * but no data is actually dropped (can verify this reading the ESPI
+	 * drop registers).  Also, once the ESPI interrupt is asserted it
+	 * cannot be cleared (HW bug).
+	 */
+	enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
+	writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+void t1_espi_intr_clear(struct peespi *espi)
+{
+	readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+	writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
+	writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
+}
+
+void t1_espi_intr_disable(struct peespi *espi)
+{
+	u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+	writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+int t1_espi_intr_handler(struct peespi *espi)
+{
+	u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+	if (status & F_DIP4ERR)
+		espi->intr_cnt.DIP4_err++;
+	if (status & F_RXDROP)
+		espi->intr_cnt.rx_drops++;
+	if (status & F_TXDROP)
+		espi->intr_cnt.tx_drops++;
+	if (status & F_RXOVERFLOW)
+		espi->intr_cnt.rx_ovflw++;
+	if (status & F_RAMPARITYERR)
+		espi->intr_cnt.parity_err++;
+	if (status & F_DIP2PARITYERR) {
+		espi->intr_cnt.DIP2_parity_err++;
+
+		/*
+		 * Must read the error count to clear the interrupt
+		 * that it causes.
+		 */
+		readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+	}
+
+	/*
+	 * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+	 * write the status as is.
+	 */
+	if (status && t1_is_T1B(espi->adapter))
+		status = 1;
+	writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+	return 0;
+}
+
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
+{
+	return &espi->intr_cnt;
+}
+
+static void espi_setup_for_pm3393(adapter_t *adapter)
+{
+	u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
+
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
+	writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+	writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+	writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
+}
+
+static void espi_setup_for_vsc7321(adapter_t *adapter)
+{
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+	writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+	writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+	writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG);
+
+	writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+}
+
+/*
+ * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
+ */
+static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
+{
+	writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	if (nports == 4) {
+		if (is_T2(adapter)) {
+			writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+			writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+		} else {
+			writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+			writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+		}
+	} else {
+		writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+		writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	}
+	writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG);
+
+}
+
+int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+{
+	u32 status_enable_extra = 0;
+	adapter_t *adapter = espi->adapter;
+
+	/* Disable ESPI training.  MACs that can handle it enable it below. */
+	writel(0, adapter->regs + A_ESPI_TRAIN);
+
+	if (is_T2(adapter)) {
+		writel(V_OUT_OF_SYNC_COUNT(4) |
+		       V_DIP2_PARITY_ERR_THRES(3) |
+		       V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
+		writel(nports == 4 ? 0x200040 : 0x1000080,
+		       adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+	} else
+		writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+
+	if (mac_type == CHBT_MAC_PM3393)
+		espi_setup_for_pm3393(adapter);
+	else if (mac_type == CHBT_MAC_VSC7321)
+		espi_setup_for_vsc7321(adapter);
+	else if (mac_type == CHBT_MAC_IXF1010) {
+		status_enable_extra = F_INTEL1010MODE;
+		espi_setup_for_ixf1010(adapter, nports);
+	} else
+		return -1;
+
+	writel(status_enable_extra | F_RXSTATUSENABLE,
+	       adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
+
+	if (is_T2(adapter)) {
+		tricn_init(adapter);
+		/*
+		 * Always position the control at the 1st port egress IN
+		 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
+		 */
+		espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL);
+		espi->misc_ctrl &= ~MON_MASK;
+		espi->misc_ctrl |= F_MONITORED_DIRECTION;
+		if (adapter->params.nports == 1)
+			espi->misc_ctrl |= F_MONITORED_INTERFACE;
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+		spin_lock_init(&espi->lock);
+	}
+
+	return 0;
+}
+
+void t1_espi_destroy(struct peespi *espi)
+{
+	kfree(espi);
+}
+
+struct peespi *t1_espi_create(adapter_t *adapter)
+{
+	struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL);
+
+	if (espi)
+		espi->adapter = adapter;
+	return espi;
+}
+
+#if 0
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
+{
+	struct peespi *espi = adapter->espi;
+
+	if (!is_T2(adapter))
+		return;
+	spin_lock(&espi->lock);
+	espi->misc_ctrl = (val & ~MON_MASK) |
+			  (espi->misc_ctrl & MON_MASK);
+	writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	spin_unlock(&espi->lock);
+}
+#endif  /*  0  */
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
+{
+	struct peespi *espi = adapter->espi;
+	u32 sel;
+
+	if (!is_T2(adapter))
+		return 0;
+
+	sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
+	if (!wait) {
+		if (!spin_trylock(&espi->lock))
+			return 0;
+	} else
+		spin_lock(&espi->lock);
+
+	if ((sel != (espi->misc_ctrl & MON_MASK))) {
+		writel(((espi->misc_ctrl & ~MON_MASK) | sel),
+		       adapter->regs + A_ESPI_MISC_CONTROL);
+		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	} else
+		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+	spin_unlock(&espi->lock);
+	return sel;
+}
+
+/*
+ * This function is for T204 only.
+ * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
+ * one shot, since there is no per port counter on the out side.
+ */
+int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
+{
+	struct peespi *espi = adapter->espi;
+	u8 i, nport = (u8)adapter->params.nports;
+
+	if (!wait) {
+		if (!spin_trylock(&espi->lock))
+			return -1;
+	} else
+		spin_lock(&espi->lock);
+
+	if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
+		espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
+					F_MONITORED_DIRECTION;
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	}
+	for (i = 0 ; i < nport; i++, valp++) {
+		if (i) {
+			writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
+			       adapter->regs + A_ESPI_MISC_CONTROL);
+		}
+		*valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+	}
+
+	writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	spin_unlock(&espi->lock);
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
new file mode 100644
index 0000000..162de52
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -0,0 +1,67 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ESPI_H_
+#define _CXGB_ESPI_H_
+
+#include "common.h"
+
+struct espi_intr_counts {
+	unsigned int DIP4_err;
+	unsigned int rx_drops;
+	unsigned int tx_drops;
+	unsigned int rx_ovflw;
+	unsigned int parity_err;
+	unsigned int DIP2_parity_err;
+};
+
+struct peespi;
+
+struct peespi *t1_espi_create(adapter_t *adapter);
+void t1_espi_destroy(struct peespi *espi);
+int t1_espi_init(struct peespi *espi, int mac_type, int nports);
+
+void t1_espi_intr_enable(struct peespi *);
+void t1_espi_intr_clear(struct peespi *);
+void t1_espi_intr_disable(struct peespi *);
+int t1_espi_intr_handler(struct peespi *);
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+int t1_espi_get_mon_t204(adapter_t *, u32 *, u8);
+
+#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
new file mode 100644
index 0000000..ccdb2bc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
@@ -0,0 +1,232 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
+
+/*
+ * FPGA specific definitions
+ */
+
+#ifndef __CHELSIO_FPGA_DEFS_H__
+#define __CHELSIO_FPGA_DEFS_H__
+
+#define FPGA_PCIX_ADDR_VERSION               0xA08
+#define FPGA_PCIX_ADDR_STAT                  0xA0C
+
+/* FPGA master interrupt Cause/Enable bits */
+#define FPGA_PCIX_INTERRUPT_SGE_ERROR        0x1
+#define FPGA_PCIX_INTERRUPT_SGE_DATA         0x2
+#define FPGA_PCIX_INTERRUPT_TP               0x4
+#define FPGA_PCIX_INTERRUPT_MC3              0x8
+#define FPGA_PCIX_INTERRUPT_GMAC             0x10
+#define FPGA_PCIX_INTERRUPT_PCIX             0x20
+
+/* TP interrupt register addresses */
+#define FPGA_TP_ADDR_INTERRUPT_ENABLE        0xA10
+#define FPGA_TP_ADDR_INTERRUPT_CAUSE         0xA14
+#define FPGA_TP_ADDR_VERSION                 0xA18
+
+/* TP interrupt Cause/Enable bits */
+#define FPGA_TP_INTERRUPT_MC4                0x1
+#define FPGA_TP_INTERRUPT_MC5                0x2
+
+/*
+ * PM interrupt register addresses
+ */
+#define FPGA_MC3_REG_INTRENABLE              0xA20
+#define FPGA_MC3_REG_INTRCAUSE               0xA24
+#define FPGA_MC3_REG_VERSION                 0xA28
+
+/*
+ * GMAC interrupt register addresses
+ */
+#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE      0xA30
+#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE       0xA34
+#define FPGA_GMAC_ADDR_VERSION               0xA38
+
+/* GMAC Cause/Enable bits */
+#define FPGA_GMAC_INTERRUPT_PORT0            0x1
+#define FPGA_GMAC_INTERRUPT_PORT1            0x2
+#define FPGA_GMAC_INTERRUPT_PORT2            0x4
+#define FPGA_GMAC_INTERRUPT_PORT3            0x8
+
+/* MI0 registers */
+#define A_MI0_CLK 0xb00
+
+#define S_MI0_CLK_DIV    0
+#define M_MI0_CLK_DIV    0xff
+#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV)
+#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV)
+
+#define S_MI0_CLK_CNT    8
+#define M_MI0_CLK_CNT    0xff
+#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT)
+#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT)
+
+#define A_MI0_CSR 0xb04
+
+#define S_MI0_CSR_POLL    0
+#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL)
+#define F_MI0_CSR_POLL    V_MI0_CSR_POLL(1U)
+
+#define S_MI0_PREAMBLE    1
+#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE)
+#define F_MI0_PREAMBLE    V_MI0_PREAMBLE(1U)
+
+#define S_MI0_INTR_ENABLE    2
+#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE)
+#define F_MI0_INTR_ENABLE    V_MI0_INTR_ENABLE(1U)
+
+#define S_MI0_BUSY    3
+#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY)
+#define F_MI0_BUSY    V_MI0_BUSY(1U)
+
+#define S_MI0_MDIO    4
+#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO)
+#define F_MI0_MDIO    V_MI0_MDIO(1U)
+
+#define A_MI0_ADDR 0xb08
+
+#define S_MI0_PHY_REG_ADDR    0
+#define M_MI0_PHY_REG_ADDR    0x1f
+#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR)
+#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR)
+
+#define S_MI0_PHY_ADDR    5
+#define M_MI0_PHY_ADDR    0x1f
+#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR)
+#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR)
+
+#define A_MI0_DATA_EXT 0xb0c
+#define A_MI0_DATA_INT 0xb10
+
+/* GMAC registers */
+#define A_GMAC_MACID_LO	0x28
+#define A_GMAC_MACID_HI	0x2c
+#define A_GMAC_CSR	0x30
+
+#define S_INTERFACE    0
+#define M_INTERFACE    0x3
+#define V_INTERFACE(x) ((x) << S_INTERFACE)
+#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
+
+#define S_MAC_TX_ENABLE    2
+#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE)
+#define F_MAC_TX_ENABLE    V_MAC_TX_ENABLE(1U)
+
+#define S_MAC_RX_ENABLE    3
+#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE)
+#define F_MAC_RX_ENABLE    V_MAC_RX_ENABLE(1U)
+
+#define S_MAC_LB_ENABLE    4
+#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE)
+#define F_MAC_LB_ENABLE    V_MAC_LB_ENABLE(1U)
+
+#define S_MAC_SPEED    5
+#define M_MAC_SPEED    0x3
+#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED)
+#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED)
+
+#define S_MAC_HD_FC_ENABLE    7
+#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE)
+#define F_MAC_HD_FC_ENABLE    V_MAC_HD_FC_ENABLE(1U)
+
+#define S_MAC_HALF_DUPLEX    8
+#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX)
+#define F_MAC_HALF_DUPLEX    V_MAC_HALF_DUPLEX(1U)
+
+#define S_MAC_PROMISC    9
+#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC)
+#define F_MAC_PROMISC    V_MAC_PROMISC(1U)
+
+#define S_MAC_MC_ENABLE    10
+#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE)
+#define F_MAC_MC_ENABLE    V_MAC_MC_ENABLE(1U)
+
+#define S_MAC_RESET    11
+#define V_MAC_RESET(x) ((x) << S_MAC_RESET)
+#define F_MAC_RESET    V_MAC_RESET(1U)
+
+#define S_MAC_RX_PAUSE_ENABLE    12
+#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE)
+#define F_MAC_RX_PAUSE_ENABLE    V_MAC_RX_PAUSE_ENABLE(1U)
+
+#define S_MAC_TX_PAUSE_ENABLE    13
+#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE)
+#define F_MAC_TX_PAUSE_ENABLE    V_MAC_TX_PAUSE_ENABLE(1U)
+
+#define S_MAC_LWM_ENABLE    14
+#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE)
+#define F_MAC_LWM_ENABLE    V_MAC_LWM_ENABLE(1U)
+
+#define S_MAC_MAGIC_PKT_ENABLE    15
+#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE)
+#define F_MAC_MAGIC_PKT_ENABLE    V_MAC_MAGIC_PKT_ENABLE(1U)
+
+#define S_MAC_ISL_ENABLE    16
+#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE)
+#define F_MAC_ISL_ENABLE    V_MAC_ISL_ENABLE(1U)
+
+#define S_MAC_JUMBO_ENABLE    17
+#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE)
+#define F_MAC_JUMBO_ENABLE    V_MAC_JUMBO_ENABLE(1U)
+
+#define S_MAC_RX_PAD_ENABLE    18
+#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE)
+#define F_MAC_RX_PAD_ENABLE    V_MAC_RX_PAD_ENABLE(1U)
+
+#define S_MAC_RX_CRC_ENABLE    19
+#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE)
+#define F_MAC_RX_CRC_ENABLE    V_MAC_RX_CRC_ENABLE(1U)
+
+#define A_GMAC_IFS 0x34
+
+#define S_MAC_IFS2    0
+#define M_MAC_IFS2    0x3f
+#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2)
+#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2)
+
+#define S_MAC_IFS1    8
+#define M_MAC_IFS1    0x7f
+#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1)
+#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1)
+
+#define A_GMAC_JUMBO_FRAME_LEN 0x38
+#define A_GMAC_LNK_DLY 0x3c
+#define A_GMAC_PAUSETIME 0x40
+#define A_GMAC_MCAST_LO 0x44
+#define A_GMAC_MCAST_HI 0x48
+#define A_GMAC_MCAST_MASK_LO 0x4c
+#define A_GMAC_MCAST_MASK_HI 0x50
+#define A_GMAC_RMT_CNT 0x54
+#define A_GMAC_RMT_DATA 0x58
+#define A_GMAC_BACKOFF_SEED 0x5c
+#define A_GMAC_TXF_THRES 0x60
+
+#define S_TXF_READ_THRESHOLD    0
+#define M_TXF_READ_THRESHOLD    0xff
+#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD)
+#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD)
+
+#define S_TXF_WRITE_THRESHOLD    16
+#define M_TXF_WRITE_THRESHOLD    0xff
+#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD)
+#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD)
+
+#define MAC_REG_BASE 0x600
+#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg))
+
+#define MAC_REG_IDLO(idx)              MAC_REG_ADDR(idx, A_GMAC_MACID_LO)
+#define MAC_REG_IDHI(idx)              MAC_REG_ADDR(idx, A_GMAC_MACID_HI)
+#define MAC_REG_CSR(idx)               MAC_REG_ADDR(idx, A_GMAC_CSR)
+#define MAC_REG_IFS(idx)               MAC_REG_ADDR(idx, A_GMAC_IFS)
+#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN)
+#define MAC_REG_LINKDLY(idx)           MAC_REG_ADDR(idx, A_GMAC_LNK_DLY)
+#define MAC_REG_PAUSETIME(idx)         MAC_REG_ADDR(idx, A_GMAC_PAUSETIME)
+#define MAC_REG_CASTLO(idx)            MAC_REG_ADDR(idx, A_GMAC_MCAST_LO)
+#define MAC_REG_MCASTHI(idx)           MAC_REG_ADDR(idx, A_GMAC_MCAST_HI)
+#define MAC_REG_CASTMASKLO(idx)        MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO)
+#define MAC_REG_MCASTMASKHI(idx)       MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI)
+#define MAC_REG_RMCNT(idx)             MAC_REG_ADDR(idx, A_GMAC_RMT_CNT)
+#define MAC_REG_RMDATA(idx)            MAC_REG_ADDR(idx, A_GMAC_RMT_DATA)
+#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED)
+#define MAC_REG_TXFTHRESHOLDS(idx)     MAC_REG_ADDR(idx, A_GMAC_TXF_THRES)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
new file mode 100644
index 0000000..dfa7749
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -0,0 +1,141 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: gmac.h                                                              *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  Generic MAC functionality.                                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_GMAC_H_
+#define _CXGB_GMAC_H_
+
+#include "common.h"
+
+enum {
+	MAC_STATS_UPDATE_FAST,
+	MAC_STATS_UPDATE_FULL
+};
+
+enum {
+	MAC_DIRECTION_RX = 1,
+	MAC_DIRECTION_TX = 2
+};
+
+struct cmac_statistics {
+	/* Transmit */
+	u64 TxOctetsOK;
+	u64 TxOctetsBad;
+	u64 TxUnicastFramesOK;
+	u64 TxMulticastFramesOK;
+	u64 TxBroadcastFramesOK;
+	u64 TxPauseFrames;
+	u64 TxFramesWithDeferredXmissions;
+	u64 TxLateCollisions;
+	u64 TxTotalCollisions;
+	u64 TxFramesAbortedDueToXSCollisions;
+	u64 TxUnderrun;
+	u64 TxLengthErrors;
+	u64 TxInternalMACXmitError;
+	u64 TxFramesWithExcessiveDeferral;
+	u64 TxFCSErrors;
+	u64 TxJumboFramesOK;
+	u64 TxJumboOctetsOK;
+
+	/* Receive */
+	u64 RxOctetsOK;
+	u64 RxOctetsBad;
+	u64 RxUnicastFramesOK;
+	u64 RxMulticastFramesOK;
+	u64 RxBroadcastFramesOK;
+	u64 RxPauseFrames;
+	u64 RxFCSErrors;
+	u64 RxAlignErrors;
+	u64 RxSymbolErrors;
+	u64 RxDataErrors;
+	u64 RxSequenceErrors;
+	u64 RxRuntErrors;
+	u64 RxJabberErrors;
+	u64 RxInternalMACRcvError;
+	u64 RxInRangeLengthErrors;
+	u64 RxOutOfRangeLengthField;
+	u64 RxFrameTooLongErrors;
+	u64 RxJumboFramesOK;
+	u64 RxJumboOctetsOK;
+};
+
+struct cmac_ops {
+	void (*destroy)(struct cmac *);
+	int (*reset)(struct cmac *);
+	int (*interrupt_enable)(struct cmac *);
+	int (*interrupt_disable)(struct cmac *);
+	int (*interrupt_clear)(struct cmac *);
+	int (*interrupt_handler)(struct cmac *);
+
+	int (*enable)(struct cmac *, int);
+	int (*disable)(struct cmac *, int);
+
+	int (*loopback_enable)(struct cmac *);
+	int (*loopback_disable)(struct cmac *);
+
+	int (*set_mtu)(struct cmac *, int mtu);
+	int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
+
+	int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
+	int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
+				   int *fc);
+
+	const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
+
+	int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
+	int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+};
+
+typedef struct _cmac_instance cmac_instance;
+
+struct cmac {
+	struct cmac_statistics stats;
+	adapter_t *adapter;
+	const struct cmac_ops *ops;
+	cmac_instance *instance;
+};
+
+struct gmac {
+	unsigned int stats_update_period;
+	struct cmac *(*create)(adapter_t *adapter, int index);
+	int (*reset)(adapter_t *);
+};
+
+extern const struct gmac t1_pm3393_ops;
+extern const struct gmac t1_vsc7326_ops;
+
+#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
new file mode 100644
index 0000000..71018a4
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
@@ -0,0 +1,397 @@
+/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */
+#include "common.h"
+#include "mv88e1xxx.h"
+#include "cphy.h"
+#include "elmer0.h"
+
+/* MV88E1XXX MDI crossover register values */
+#define CROSSOVER_MDI   0
+#define CROSSOVER_MDIX  1
+#define CROSSOVER_AUTO  3
+
+#define INTR_ENABLE_MASK 0x6CA0
+
+/*
+ * Set the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+	u32 val;
+
+	(void) simple_mdio_read(cphy, reg, &val);
+	(void) simple_mdio_write(cphy, reg, val | bitval);
+}
+
+/*
+ * Clear the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+	u32 val;
+
+	(void) simple_mdio_read(cphy, reg, &val);
+	(void) simple_mdio_write(cphy, reg, val & ~bitval);
+}
+
+/*
+ * NAME:   phy_reset
+ *
+ * DESC:   Reset the given PHY's port. NOTE: This is not a global
+ *         chip reset.
+ *
+ * PARAMS: cphy     - Pointer to PHY instance data.
+ *
+ * RETURN:  0 - Successful reset.
+ *         -1 - Timeout.
+ */
+static int mv88e1xxx_reset(struct cphy *cphy, int wait)
+{
+	u32 ctl;
+	int time_out = 1000;
+
+	mdio_set_bit(cphy, MII_BMCR, BMCR_RESET);
+
+	do {
+		(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+		ctl &= BMCR_RESET;
+		if (ctl)
+			udelay(1);
+	} while (ctl && --time_out);
+
+	return ctl ? -1 : 0;
+}
+
+static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
+{
+	/* Enable PHY interrupts. */
+	(void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER,
+		   INTR_ENABLE_MASK);
+
+	/* Enable Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer |= ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter))
+		    elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+	return 0;
+}
+
+static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
+{
+	/* Disable all phy interrupts. */
+	(void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0);
+
+	/* Disable Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer &= ~ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter))
+		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+	return 0;
+}
+
+static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
+{
+	u32 elmer;
+
+	/* Clear PHY interrupts by reading the register. */
+	(void) simple_mdio_read(cphy,
+			MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer);
+
+	/* Clear Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+		elmer |= ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter))
+		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	}
+	return 0;
+}
+
+/*
+ * Set the PHY speed and duplex.  This also disables auto-negotiation, except
+ * for 1Gb/s, where auto-negotiation is mandatory.
+ */
+static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	u32 ctl;
+
+	(void) simple_mdio_read(phy, MII_BMCR, &ctl);
+	if (speed >= 0) {
+		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+		if (speed == SPEED_100)
+			ctl |= BMCR_SPEED100;
+		else if (speed == SPEED_1000)
+			ctl |= BMCR_SPEED1000;
+	}
+	if (duplex >= 0) {
+		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+		if (duplex == DUPLEX_FULL)
+			ctl |= BMCR_FULLDPLX;
+	}
+	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for 1Gb/s */
+		ctl |= BMCR_ANENABLE;
+	(void) simple_mdio_write(phy, MII_BMCR, ctl);
+	return 0;
+}
+
+static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
+{
+	u32 data32;
+
+	(void) simple_mdio_read(cphy,
+			MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32);
+	data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE);
+	data32 |= V_PSCR_MDI_XOVER_MODE(crossover);
+	(void) simple_mdio_write(cphy,
+			MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32);
+	return 0;
+}
+
+static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
+{
+	u32 ctl;
+
+	(void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
+
+	(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+	/* restart autoneg for change to take effect */
+	ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
+	(void) simple_mdio_write(cphy, MII_BMCR, ctl);
+	return 0;
+}
+
+static int mv88e1xxx_autoneg_disable(struct cphy *cphy)
+{
+	u32 ctl;
+
+	/*
+	 * Crossover *must* be set to manual in order to disable auto-neg.
+	 * The Alaska FAQs document highlights this point.
+	 */
+	(void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI);
+
+	/*
+	 * Must include autoneg reset when disabling auto-neg. This
+	 * is described in the Alaska FAQ document.
+	 */
+	(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+	ctl &= ~BMCR_ANENABLE;
+	(void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART);
+	return 0;
+}
+
+static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
+{
+	mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART);
+	return 0;
+}
+
+static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+	u32 val = 0;
+
+	if (advertise_map &
+	    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
+		(void) simple_mdio_read(phy, MII_GBCR, &val);
+		val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL);
+		if (advertise_map & ADVERTISED_1000baseT_Half)
+			val |= GBCR_ADV_1000HALF;
+		if (advertise_map & ADVERTISED_1000baseT_Full)
+			val |= GBCR_ADV_1000FULL;
+	}
+	(void) simple_mdio_write(phy, MII_GBCR, val);
+
+	val = 1;
+	if (advertise_map & ADVERTISED_10baseT_Half)
+		val |= ADVERTISE_10HALF;
+	if (advertise_map & ADVERTISED_10baseT_Full)
+		val |= ADVERTISE_10FULL;
+	if (advertise_map & ADVERTISED_100baseT_Half)
+		val |= ADVERTISE_100HALF;
+	if (advertise_map & ADVERTISED_100baseT_Full)
+		val |= ADVERTISE_100FULL;
+	if (advertise_map & ADVERTISED_PAUSE)
+		val |= ADVERTISE_PAUSE;
+	if (advertise_map & ADVERTISED_ASYM_PAUSE)
+		val |= ADVERTISE_PAUSE_ASYM;
+	(void) simple_mdio_write(phy, MII_ADVERTISE, val);
+	return 0;
+}
+
+static int mv88e1xxx_set_loopback(struct cphy *cphy, int on)
+{
+	if (on)
+		mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+	else
+		mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+	return 0;
+}
+
+static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
+				     int *speed, int *duplex, int *fc)
+{
+	u32 status;
+	int sp = -1, dplx = -1, pause = 0;
+
+	(void) simple_mdio_read(cphy,
+			MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+	if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
+		if (status & V_PSSR_RX_PAUSE)
+			pause |= PAUSE_RX;
+		if (status & V_PSSR_TX_PAUSE)
+			pause |= PAUSE_TX;
+		dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+		sp = G_PSSR_SPEED(status);
+		if (sp == 0)
+			sp = SPEED_10;
+		else if (sp == 1)
+			sp = SPEED_100;
+		else
+			sp = SPEED_1000;
+	}
+	if (link_ok)
+		*link_ok = (status & V_PSSR_LINK) != 0;
+	if (speed)
+		*speed = sp;
+	if (duplex)
+		*duplex = dplx;
+	if (fc)
+		*fc = pause;
+	return 0;
+}
+
+static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
+{
+	u32 val;
+
+	(void) simple_mdio_read(cphy,
+		MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val);
+
+	/*
+	 * Set the downshift counter to 2 so we try to establish Gb link
+	 * twice before downshifting.
+	 */
+	val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT));
+
+	if (downshift_enable)
+		val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2);
+	(void) simple_mdio_write(cphy,
+			MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val);
+	return 0;
+}
+
+static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
+{
+	int cphy_cause = 0;
+	u32 status;
+
+	/*
+	 * Loop until cause reads zero. Need to handle bouncing interrupts.
+	 */
+	while (1) {
+		u32 cause;
+
+		(void) simple_mdio_read(cphy,
+				MV88E1XXX_INTERRUPT_STATUS_REGISTER,
+				&cause);
+		cause &= INTR_ENABLE_MASK;
+		if (!cause)
+			break;
+
+		if (cause & MV88E1XXX_INTR_LINK_CHNG) {
+			(void) simple_mdio_read(cphy,
+				MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+
+			if (status & MV88E1XXX_INTR_LINK_CHNG)
+				cphy->state |= PHY_LINK_UP;
+			else {
+				cphy->state &= ~PHY_LINK_UP;
+				if (cphy->state & PHY_AUTONEG_EN)
+					cphy->state &= ~PHY_AUTONEG_RDY;
+				cphy_cause |= cphy_cause_link_change;
+			}
+		}
+
+		if (cause & MV88E1XXX_INTR_AUTONEG_DONE)
+			cphy->state |= PHY_AUTONEG_RDY;
+
+		if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) ==
+			(PHY_LINK_UP | PHY_AUTONEG_RDY))
+				cphy_cause |= cphy_cause_link_change;
+	}
+	return cphy_cause;
+}
+
+static void mv88e1xxx_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops mv88e1xxx_ops = {
+	.destroy              = mv88e1xxx_destroy,
+	.reset                = mv88e1xxx_reset,
+	.interrupt_enable     = mv88e1xxx_interrupt_enable,
+	.interrupt_disable    = mv88e1xxx_interrupt_disable,
+	.interrupt_clear      = mv88e1xxx_interrupt_clear,
+	.interrupt_handler    = mv88e1xxx_interrupt_handler,
+	.autoneg_enable       = mv88e1xxx_autoneg_enable,
+	.autoneg_disable      = mv88e1xxx_autoneg_disable,
+	.autoneg_restart      = mv88e1xxx_autoneg_restart,
+	.advertise            = mv88e1xxx_advertise,
+	.set_loopback         = mv88e1xxx_set_loopback,
+	.set_speed_duplex     = mv88e1xxx_set_speed_duplex,
+	.get_link_status      = mv88e1xxx_get_link_status,
+};
+
+static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr,
+					 const struct mdio_ops *mdio_ops)
+{
+	struct adapter *adapter = netdev_priv(dev);
+	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+	if (!cphy)
+		return NULL;
+
+	cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops);
+
+	/* Configure particular PHY's to run in a different mode. */
+	if ((board_info(adapter)->caps & SUPPORTED_TP) &&
+	    board_info(adapter)->chip_phy == CHBT_PHY_88E1111) {
+		/*
+		 * Configure the PHY transmitter as class A to reduce EMI.
+		 */
+		(void) simple_mdio_write(cphy,
+				MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB);
+		(void) simple_mdio_write(cphy,
+				MV88E1XXX_EXTENDED_REGISTER, 0x8004);
+	}
+	(void) mv88e1xxx_downshift_set(cphy, 1);   /* Enable downshift */
+
+	/* LED */
+	if (is_T2(adapter)) {
+		(void) simple_mdio_write(cphy,
+				MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
+	}
+
+	return cphy;
+}
+
+static int mv88e1xxx_phy_reset(adapter_t* adapter)
+{
+	return 0;
+}
+
+const struct gphy t1_mv88e1xxx_ops = {
+	.create = mv88e1xxx_phy_create,
+	.reset =  mv88e1xxx_phy_reset
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
new file mode 100644
index 0000000..967cc42
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
@@ -0,0 +1,127 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
+#ifndef CHELSIO_MV8E1XXX_H
+#define CHELSIO_MV8E1XXX_H
+
+#ifndef BMCR_SPEED1000
+# define BMCR_SPEED1000 0x40
+#endif
+
+#ifndef ADVERTISE_PAUSE
+# define ADVERTISE_PAUSE 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#define MII_GBCR 9       /* 1000Base-T control register */
+#define MII_GBSR 10      /* 1000Base-T status register */
+
+/* 1000Base-T control register fields */
+#define GBCR_ADV_1000HALF         0x100
+#define GBCR_ADV_1000FULL         0x200
+#define GBCR_PREFER_MASTER        0x400
+#define GBCR_MANUAL_AS_MASTER     0x800
+#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
+
+/* 1000Base-T status register fields */
+#define GBSR_LP_1000HALF  0x400
+#define GBSR_LP_1000FULL  0x800
+#define GBSR_REMOTE_OK    0x1000
+#define GBSR_LOCAL_OK     0x2000
+#define GBSR_LOCAL_MASTER 0x4000
+#define GBSR_MASTER_FAULT 0x8000
+
+/* Marvell PHY interrupt status bits. */
+#define MV88E1XXX_INTR_JABBER          0x0001
+#define MV88E1XXX_INTR_POLARITY_CHNG   0x0002
+#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010
+#define MV88E1XXX_INTR_DOWNSHIFT       0x0020
+#define MV88E1XXX_INTR_MDI_XOVER_CHNG  0x0040
+#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080
+#define MV88E1XXX_INTR_FALSE_CARRIER   0x0100
+#define MV88E1XXX_INTR_SYMBOL_ERROR    0x0200
+#define MV88E1XXX_INTR_LINK_CHNG       0x0400
+#define MV88E1XXX_INTR_AUTONEG_DONE    0x0800
+#define MV88E1XXX_INTR_PAGE_RECV       0x1000
+#define MV88E1XXX_INTR_DUPLEX_CHNG     0x2000
+#define MV88E1XXX_INTR_SPEED_CHNG      0x4000
+#define MV88E1XXX_INTR_AUTONEG_ERR     0x8000
+
+/* Marvell PHY specific registers. */
+#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER               16
+#define MV88E1XXX_SPECIFIC_STATUS_REGISTER              17
+#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER             18
+#define MV88E1XXX_INTERRUPT_STATUS_REGISTER             19
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER       20
+#define MV88E1XXX_RECV_ERR_CNTR_REGISTER                21
+#define MV88E1XXX_RES_REGISTER                          22
+#define MV88E1XXX_GLOBAL_STATUS_REGISTER                23
+#define MV88E1XXX_LED_CONTROL_REGISTER                  24
+#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER          25
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER     26
+#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER      27
+#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER         28
+#define MV88E1XXX_EXTENDED_ADDR_REGISTER                29
+#define MV88E1XXX_EXTENDED_REGISTER                     30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE    5
+#define M_PSCR_MDI_XOVER_MODE    0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT    9
+#define M_DOWNSHIFT_CNT    0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN    7
+#define M_PSSR_CABLE_LEN    0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+#define S_PSSR_LINK 10
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+#define S_PSSR_STATUS_RESOLVED 11
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+#define S_PSSR_DUPLEX 13
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+#define S_PSSR_SPEED    14
+#define M_PSSR_SPEED    0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
new file mode 100644
index 0000000..d0cf611
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -0,0 +1,259 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: mv88x201x.c                                                         *
+ * $Revision: 1.12 $                                                         *
+ * $Date: 2005/04/15 19:27:14 $                                              *
+ * Description:                                                              *
+ *  Marvell PHY (mv88x201x) functionality.                                   *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "cphy.h"
+#include "elmer0.h"
+
+/*
+ * The 88x2010 Rev C. requires some link status registers * to be read
+ * twice in order to get the right values. Future * revisions will fix
+ * this problem and then this macro * can disappear.
+ */
+#define MV88x2010_LINK_STATUS_BUGS    1
+
+static int led_init(struct cphy *cphy)
+{
+	/* Setup the LED registers so we can turn on/off.
+	 * Writing these bits maps control to another
+	 * register. mmd(0x1) addr(0x7)
+	 */
+	cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd);
+	return 0;
+}
+
+static int led_link(struct cphy *cphy, u32 do_enable)
+{
+	u32 led = 0;
+#define LINK_ENABLE_BIT 0x1
+
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led);
+
+	if (do_enable & LINK_ENABLE_BIT) {
+		led |= LINK_ENABLE_BIT;
+		cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
+	} else {
+		led &= ~LINK_ENABLE_BIT;
+		cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
+	}
+	return 0;
+}
+
+/* Port Reset */
+static int mv88x201x_reset(struct cphy *cphy, int wait)
+{
+	/* This can be done through registers.  It is not required since
+	 * a full chip reset is used.
+	 */
+	return 0;
+}
+
+static int mv88x201x_interrupt_enable(struct cphy *cphy)
+{
+	/* Enable PHY LASI interrupts. */
+	cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+			MDIO_PMA_LASI_LSALARM);
+
+	/* Enable Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer |= ELMER0_GP_BIT6;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+	return 0;
+}
+
+static int mv88x201x_interrupt_disable(struct cphy *cphy)
+{
+	/* Disable PHY LASI interrupts. */
+	cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0);
+
+	/* Disable Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer &= ~ELMER0_GP_BIT6;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+	return 0;
+}
+
+static int mv88x201x_interrupt_clear(struct cphy *cphy)
+{
+	u32 elmer;
+	u32 val;
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+	/* Required to read twice before clear takes affect. */
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+
+	/* Read this register after the others above it else
+	 * the register doesn't clear correctly.
+	 */
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+#endif
+
+	/* Clear link status. */
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+	/* Clear PHY LASI interrupts. */
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+	/* Do it again. */
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
+#endif
+
+	/* Clear Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+		elmer |= ELMER0_GP_BIT6;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	}
+	return 0;
+}
+
+static int mv88x201x_interrupt_handler(struct cphy *cphy)
+{
+	/* Clear interrupts */
+	mv88x201x_interrupt_clear(cphy);
+
+	/* We have only enabled link change interrupts and so
+	 * cphy_cause must be a link change interrupt.
+	 */
+	return cphy_cause_link_change;
+}
+
+static int mv88x201x_set_loopback(struct cphy *cphy, int on)
+{
+	return 0;
+}
+
+static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
+				     int *speed, int *duplex, int *fc)
+{
+	u32 val = 0;
+
+	if (link_ok) {
+		/* Read link status. */
+		cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+		val &= MDIO_STAT1_LSTATUS;
+		*link_ok = (val == MDIO_STAT1_LSTATUS);
+		/* Turn on/off Link LED */
+		led_link(cphy, *link_ok);
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (fc)
+		*fc = PAUSE_RX | PAUSE_TX;
+	return 0;
+}
+
+static void mv88x201x_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops mv88x201x_ops = {
+	.destroy           = mv88x201x_destroy,
+	.reset             = mv88x201x_reset,
+	.interrupt_enable  = mv88x201x_interrupt_enable,
+	.interrupt_disable = mv88x201x_interrupt_disable,
+	.interrupt_clear   = mv88x201x_interrupt_clear,
+	.interrupt_handler = mv88x201x_interrupt_handler,
+	.get_link_status   = mv88x201x_get_link_status,
+	.set_loopback      = mv88x201x_set_loopback,
+	.mmds              = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
+			      MDIO_DEVS_PHYXS | MDIO_DEVS_WIS),
+};
+
+static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr,
+					 const struct mdio_ops *mdio_ops)
+{
+	u32 val;
+	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+	if (!cphy)
+		return NULL;
+
+	cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops);
+
+	/* Commands the PHY to enable XFP's clock. */
+	cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val);
+	cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1);
+
+	/* Clear link status. Required because of a bug in the PHY.  */
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val);
+	cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val);
+
+	/* Allows for Link,Ack LED turn on/off */
+	led_init(cphy);
+	return cphy;
+}
+
+/* Chip Reset */
+static int mv88x201x_phy_reset(adapter_t *adapter)
+{
+	u32 val;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~4;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	msleep(100);
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+	msleep(1000);
+
+	/* Now lets enable the Laser. Delay 100us */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= 0x8000;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(100);
+	return 0;
+}
+
+const struct gphy t1_mv88x201x_ops = {
+	.create = mv88x201x_phy_create,
+	.reset = mv88x201x_phy_reset
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
new file mode 100644
index 0000000..a683fd3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
@@ -0,0 +1,209 @@
+/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
+#include "cphy.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* Port Reset */
+static int my3126_reset(struct cphy *cphy, int wait)
+{
+	/*
+	 * This can be done through registers.  It is not required since
+	 * a full chip reset is used.
+	 */
+	return 0;
+}
+
+static int my3126_interrupt_enable(struct cphy *cphy)
+{
+	schedule_delayed_work(&cphy->phy_update, HZ/30);
+	t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
+	return 0;
+}
+
+static int my3126_interrupt_disable(struct cphy *cphy)
+{
+	cancel_delayed_work_sync(&cphy->phy_update);
+	return 0;
+}
+
+static int my3126_interrupt_clear(struct cphy *cphy)
+{
+	return 0;
+}
+
+#define OFFSET(REG_ADDR)    (REG_ADDR << 2)
+
+static int my3126_interrupt_handler(struct cphy *cphy)
+{
+	u32 val;
+	u16 val16;
+	u16 status;
+	u32 act_count;
+	adapter_t *adapter;
+	adapter = cphy->adapter;
+
+	if (cphy->count == 50) {
+		cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+		val16 = (u16) val;
+		status = cphy->bmsr ^ val16;
+
+		if (status & MDIO_STAT1_LSTATUS)
+			t1_link_changed(adapter, 0);
+		cphy->bmsr = val16;
+
+		/* We have only enabled link change interrupts so it
+		   must be that
+		 */
+		cphy->count = 0;
+	}
+
+	t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL),
+		SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+	t1_tpi_read(adapter,
+		OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count);
+	t1_tpi_read(adapter,
+		OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val);
+	act_count += val;
+
+	/* Populate elmer_gpo with the register value */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	cphy->elmer_gpo = val;
+
+	if ( (val & (1 << 8)) || (val & (1 << 19)) ||
+	     (cphy->act_count == act_count) || cphy->act_on ) {
+		if (is_T2(adapter))
+			val |= (1 << 9);
+		else if (t1_is_T1B(adapter))
+			val |= (1 << 20);
+		cphy->act_on = 0;
+	} else {
+		if (is_T2(adapter))
+			val &= ~(1 << 9);
+		else if (t1_is_T1B(adapter))
+			val &= ~(1 << 20);
+		cphy->act_on = 1;
+	}
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+	cphy->elmer_gpo = val;
+	cphy->act_count = act_count;
+	cphy->count++;
+
+	return cphy_cause_link_change;
+}
+
+static void my3216_poll(struct work_struct *work)
+{
+	struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+	my3126_interrupt_handler(cphy);
+}
+
+static int my3126_set_loopback(struct cphy *cphy, int on)
+{
+	return 0;
+}
+
+/* To check the activity LED */
+static int my3126_get_link_status(struct cphy *cphy,
+			int *link_ok, int *speed, int *duplex, int *fc)
+{
+	u32 val;
+	u16 val16;
+	adapter_t *adapter;
+
+	adapter = cphy->adapter;
+	cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+	val16 = (u16) val;
+
+	/* Populate elmer_gpo with the register value */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	cphy->elmer_gpo = val;
+
+	*link_ok = (val16 & MDIO_STAT1_LSTATUS);
+
+	if (*link_ok) {
+		/* Turn on the LED. */
+		if (is_T2(adapter))
+			 val &= ~(1 << 8);
+		else if (t1_is_T1B(adapter))
+			 val &= ~(1 << 19);
+	} else {
+		/* Turn off the LED. */
+		if (is_T2(adapter))
+			 val |= (1 << 8);
+		else if (t1_is_T1B(adapter))
+			 val |= (1 << 19);
+	}
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	cphy->elmer_gpo = val;
+	*speed = SPEED_10000;
+	*duplex = DUPLEX_FULL;
+
+	/* need to add flow control */
+	if (fc)
+		*fc = PAUSE_RX | PAUSE_TX;
+
+	return 0;
+}
+
+static void my3126_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops my3126_ops = {
+	.destroy		= my3126_destroy,
+	.reset			= my3126_reset,
+	.interrupt_enable	= my3126_interrupt_enable,
+	.interrupt_disable	= my3126_interrupt_disable,
+	.interrupt_clear	= my3126_interrupt_clear,
+	.interrupt_handler	= my3126_interrupt_handler,
+	.get_link_status	= my3126_get_link_status,
+	.set_loopback		= my3126_set_loopback,
+	.mmds			= (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
+				   MDIO_DEVS_PHYXS),
+};
+
+static struct cphy *my3126_phy_create(struct net_device *dev,
+			int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
+
+	if (!cphy)
+		return NULL;
+
+	cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
+	INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
+	cphy->bmsr = 0;
+
+	return cphy;
+}
+
+/* Chip Reset */
+static int my3126_phy_reset(adapter_t * adapter)
+{
+	u32 val;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~4;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	msleep(100);
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+	msleep(1000);
+
+	/* Now lets enable the Laser. Delay 100us */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= 0x8000;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(100);
+	return 0;
+}
+
+const struct gphy t1_my3126_ops = {
+	.create = my3126_phy_create,
+	.reset = my3126_phy_reset
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
new file mode 100644
index 0000000..ec5e050
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -0,0 +1,795 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: pm3393.c                                                            *
+ * $Revision: 1.16 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "gmac.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#define OFFSET(REG_ADDR)    ((REG_ADDR) << 2)
+
+/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+#define MAX_FRAME_SIZE  9600
+
+#define IPG 12
+#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
+	SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
+	SUNI1x10GEXP_BITMSK_TXXG_PADEN)
+#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
+	SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
+
+/* Update statistics every 15 minutes */
+#define STATS_TICK_SECS (15 * 60)
+
+enum {                     /* RMON registers */
+	RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
+	RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
+	RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
+	RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
+	RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
+	RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
+	RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
+	RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
+	RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
+	RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
+	RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
+	RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
+	RxUndersizedFrames =  SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+	RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
+	RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
+
+	TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
+	TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
+	TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
+	TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
+	TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
+	TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
+	TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
+	TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
+	TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
+};
+
+struct _cmac_instance {
+	u8 enabled;
+	u8 fc;
+	u8 mac_addr[6];
+};
+
+static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
+{
+	t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
+	return 0;
+}
+
+static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
+{
+	t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
+	return 0;
+}
+
+/* Port reset. */
+static int pm3393_reset(struct cmac *cmac)
+{
+	return 0;
+}
+
+/*
+ * Enable interrupts for the PM3393
+ *
+ *	1. Enable PM3393 BLOCK interrupts.
+ *	2. Enable PM3393 Master Interrupt bit(INTE)
+ *	3. Enable ELMER's PM3393 bit.
+ *	4. Enable Terminator external interrupt.
+ */
+static int pm3393_interrupt_enable(struct cmac *cmac)
+{
+	u32 pl_intr;
+
+	/* PM3393 - Enabling all hardware block interrupts.
+	 */
+	pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
+
+	/* Don't interrupt on statistics overflow, we are polling */
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
+
+	/* PM3393 - Global interrupt enable
+	 */
+	/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
+	pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
+		0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
+
+	/* TERMINATOR - PL_INTERUPTS_EXT */
+	pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
+	pl_intr |= F_PL_INTR_EXT;
+	writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
+	return 0;
+}
+
+static int pm3393_interrupt_disable(struct cmac *cmac)
+{
+	u32 elmer;
+
+	/* PM3393 - Enabling HW interrupt blocks. */
+	pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
+
+	/* PM3393 - Global interrupt enable */
+	pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
+
+	/* ELMER - External chip interrupts. */
+	t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer &= ~ELMER0_GP_BIT1;
+	t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
+
+	/* TERMINATOR - PL_INTERUPTS_EXT */
+	/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
+	 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
+	 */
+
+	return 0;
+}
+
+static int pm3393_interrupt_clear(struct cmac *cmac)
+{
+	u32 elmer;
+	u32 pl_intr;
+	u32 val32;
+
+	/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
+	 *          bit WCIMODE=0 for a clear-on-read.
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
+	       &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
+
+	/* PM3393 - Global interrupt status
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
+
+	/* ELMER - External chip interrupts.
+	 */
+	t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
+	elmer |= ELMER0_GP_BIT1;
+	t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
+
+	/* TERMINATOR - PL_INTERUPTS_EXT
+	 */
+	pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
+	pl_intr |= F_PL_INTR_EXT;
+	writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
+
+	return 0;
+}
+
+/* Interrupt handler */
+static int pm3393_interrupt_handler(struct cmac *cmac)
+{
+	u32 master_intr_status;
+
+	/* Read the master interrupt status register. */
+	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
+	       &master_intr_status);
+	if (netif_msg_intr(cmac->adapter))
+		dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
+			master_intr_status);
+
+	/* TBD XXX Lets just clear everything for now */
+	pm3393_interrupt_clear(cmac);
+
+	return 0;
+}
+
+static int pm3393_enable(struct cmac *cmac, int which)
+{
+	if (which & MAC_DIRECTION_RX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
+			(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
+
+	if (which & MAC_DIRECTION_TX) {
+		u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
+
+		if (cmac->instance->fc & PAUSE_RX)
+			val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
+		if (cmac->instance->fc & PAUSE_TX)
+			val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
+		pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
+	}
+
+	cmac->instance->enabled |= which;
+	return 0;
+}
+
+static int pm3393_enable_port(struct cmac *cmac, int which)
+{
+	/* Clear port statistics */
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+		SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
+	udelay(2);
+	memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
+
+	pm3393_enable(cmac, which);
+
+	/*
+	 * XXX This should be done by the PHY and preferably not at all.
+	 * The PHY doesn't give us link status indication on its own so have
+	 * the link management code query it instead.
+	 */
+	t1_link_changed(cmac->adapter, 0);
+	return 0;
+}
+
+static int pm3393_disable(struct cmac *cmac, int which)
+{
+	if (which & MAC_DIRECTION_RX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
+	if (which & MAC_DIRECTION_TX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
+
+	/*
+	 * The disable is graceful. Give the PM3393 time.  Can't wait very
+	 * long here, we may be holding locks.
+	 */
+	udelay(20);
+
+	cmac->instance->enabled &= ~which;
+	return 0;
+}
+
+static int pm3393_loopback_enable(struct cmac *cmac)
+{
+	return 0;
+}
+
+static int pm3393_loopback_disable(struct cmac *cmac)
+{
+	return 0;
+}
+
+static int pm3393_set_mtu(struct cmac *cmac, int mtu)
+{
+	int enabled = cmac->instance->enabled;
+
+	/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
+	mtu += 14 + 4;
+	if (mtu > MAX_FRAME_SIZE)
+		return -EINVAL;
+
+	/* Disable Rx/Tx MAC before configuring it. */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
+
+	if (enabled)
+		pm3393_enable(cmac, enabled);
+	return 0;
+}
+
+static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
+{
+	int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
+	u32 rx_mode;
+
+	/* Disable MAC RX before reconfiguring it */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX);
+
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
+	rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
+		     SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
+		(u16)rx_mode);
+
+	if (t1_rx_mode_promisc(rm)) {
+		/* Promiscuous mode. */
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
+	}
+	if (t1_rx_mode_allmulti(rm)) {
+		/* Accept all multicast. */
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+	} else if (t1_rx_mode_mc_cnt(rm)) {
+		/* Accept one or more multicast(s). */
+		struct netdev_hw_addr *ha;
+		int bit;
+		u16 mc_filter[4] = { 0, };
+
+		netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
+			/* bit[23:28] */
+			bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
+			mc_filter[bit >> 4] |= 1 << (bit & 0xf);
+		}
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+	}
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
+
+	if (enabled)
+		pm3393_enable(cmac, MAC_DIRECTION_RX);
+
+	return 0;
+}
+
+static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
+				      int *duplex, int *fc)
+{
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (fc)
+		*fc = cmac->instance->fc;
+	return 0;
+}
+
+static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
+				      int fc)
+{
+	if (speed >= 0 && speed != SPEED_10000)
+		return -1;
+	if (duplex >= 0 && duplex != DUPLEX_FULL)
+		return -1;
+	if (fc & ~(PAUSE_TX | PAUSE_RX))
+		return -1;
+
+	if (fc != cmac->instance->fc) {
+		cmac->instance->fc = (u8) fc;
+		if (cmac->instance->enabled & MAC_DIRECTION_TX)
+			pm3393_enable(cmac, MAC_DIRECTION_TX);
+	}
+	return 0;
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+{ \
+	t1_tpi_read((mac)->adapter, OFFSET(name), &val0);     \
+	t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
+	t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
+	(mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
+				 ((u64)(val1 & 0xffff) << 16) | \
+				 ((u64)(val2 & 0xff) << 32) | \
+				 ((mac)->stats.stat_name & \
+					0xffffff0000000000ULL); \
+	if (ro & \
+	    (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
+		(mac)->stats.stat_name += 1ULL << 40; \
+}
+
+static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+							      int flag)
+{
+	u64	ro;
+	u32	val0, val1, val2, val3;
+
+	/* Snap the counters */
+	pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+		SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+
+	/* Counter rollover, clear on read */
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
+	ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
+		(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
+
+	/* Rx stats */
+	RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
+	RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
+	RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
+	RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
+	RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
+	RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
+	RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
+				RxInternalMACRcvError);
+	RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+	RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
+	RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
+	RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
+	RMON_UPDATE(mac, RxFragments, RxRuntErrors);
+	RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+	RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
+	RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
+
+	/* Tx stats */
+	RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
+	RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
+				TxInternalMACXmitError);
+	RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
+	RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
+	RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
+	RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
+	RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+	RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
+	RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
+
+	return &mac->stats;
+}
+
+static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
+{
+	memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
+	return 0;
+}
+
+static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+{
+	u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
+
+	/*
+	 * MAC addr: 00:07:43:00:13:09
+	 *
+	 * ma[5] = 0x09
+	 * ma[4] = 0x13
+	 * ma[3] = 0x00
+	 * ma[2] = 0x43
+	 * ma[1] = 0x07
+	 * ma[0] = 0x00
+	 *
+	 * The PM3393 requires byte swapping and reverse order entry
+	 * when programming MAC addresses:
+	 *
+	 * low_bits[15:0]    = ma[1]:ma[0]
+	 * mid_bits[31:16]   = ma[3]:ma[2]
+	 * high_bits[47:32]  = ma[5]:ma[4]
+	 */
+
+	/* Store local copy */
+	memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
+
+	lo  = ((u32) ma[1] << 8) | (u32) ma[0];
+	mid = ((u32) ma[3] << 8) | (u32) ma[2];
+	hi  = ((u32) ma[5] << 8) | (u32) ma[4];
+
+	/* Disable Rx/Tx MAC before configuring it. */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+	/* Set RXXG Station Address */
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
+
+	/* Set TXXG Station Address */
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
+
+	/* Setup Exact Match Filter 1 with our MAC address
+	 *
+	 * Must disable exact match filter before configuring it.
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
+	val &= 0xff0f;
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
+
+	val |= 0x0090;
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+	if (enabled)
+		pm3393_enable(cmac, enabled);
+	return 0;
+}
+
+static void pm3393_destroy(struct cmac *cmac)
+{
+	kfree(cmac);
+}
+
+static struct cmac_ops pm3393_ops = {
+	.destroy                 = pm3393_destroy,
+	.reset                   = pm3393_reset,
+	.interrupt_enable        = pm3393_interrupt_enable,
+	.interrupt_disable       = pm3393_interrupt_disable,
+	.interrupt_clear         = pm3393_interrupt_clear,
+	.interrupt_handler       = pm3393_interrupt_handler,
+	.enable                  = pm3393_enable_port,
+	.disable                 = pm3393_disable,
+	.loopback_enable         = pm3393_loopback_enable,
+	.loopback_disable        = pm3393_loopback_disable,
+	.set_mtu                 = pm3393_set_mtu,
+	.set_rx_mode             = pm3393_set_rx_mode,
+	.get_speed_duplex_fc     = pm3393_get_speed_duplex_fc,
+	.set_speed_duplex_fc     = pm3393_set_speed_duplex_fc,
+	.statistics_update       = pm3393_update_statistics,
+	.macaddress_get          = pm3393_macaddress_get,
+	.macaddress_set          = pm3393_macaddress_set
+};
+
+static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *cmac;
+
+	cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!cmac)
+		return NULL;
+
+	cmac->ops = &pm3393_ops;
+	cmac->instance = (cmac_instance *) (cmac + 1);
+	cmac->adapter = adapter;
+	cmac->instance->fc = PAUSE_TX | PAUSE_RX;
+
+	t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
+	t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
+	t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
+	t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001);   /* PL4IO Enable */
+	t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
+	t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202);	/* PL4IO Calendar Repetitions */
+
+	t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080);	/* EFLX Enable */
+	t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000);	/* EFLX Channel Deprovision */
+	t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000);	/* EFLX Low Limit */
+	t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040);	/* EFLX High Limit */
+	t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc);	/* EFLX Almost Full */
+	t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199);	/* EFLX Almost Empty */
+	t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240);	/* EFLX Cut Through Threshold */
+	t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000);	/* EFLX Indirect Register Update */
+	t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001);	/* EFLX Channel Provision */
+	t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff);	/* EFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff);	/* EFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff);	/* EFLX enable overflow interrupt The other bit are undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff);	/* EFLX Undocumented */
+
+	t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000);	/* IFLX Configuration - enable */
+	t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000);	/* IFLX Channel Deprovision */
+	t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000);	/* IFLX Low Limit */
+	t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100);	/* IFLX High Limit */
+	t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00);	/* IFLX Almost Full Limit */
+	t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599);	/* IFLX Almost Empty Limit */
+	t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000);	/* IFLX Indirect Register Update */
+	t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001);	/* IFLX Channel Provision */
+	t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff);	/* IFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff);	/* IFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff);	/* IFLX Enable overflow interrupt.  The other bit are undocumented */
+
+	t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe);	/* PL4MOS Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff);	/* PL4MOS Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008);	/* PL4MOS Starving Burst Size */
+	t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008);	/* PL4MOS Hungry Burst Size */
+	t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008);	/* PL4MOS Transfer Size */
+	t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005);	/* PL4MOS Disable */
+
+	t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103);	/* PL4ODP Training Repeat and SOP rule */
+	t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000);	/* PL4ODP MAX_T setting */
+
+	t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087);	/* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
+	t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f);	/* PL4IDU Enable Dip4 check error interrupts */
+
+	t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32);	/* # TXXG Config */
+	/* For T1 use timer based Mac flow control. */
+	t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
+	t1_tpi_write(adapter, OFFSET(0x2040), 0x059c);	/* # RXXG Config */
+	t1_tpi_write(adapter, OFFSET(0x2049), 0x0001);	/* # RXXG Cut Through */
+	t1_tpi_write(adapter, OFFSET(0x2070), 0x0000);	/* # Disable promiscuous mode */
+
+	/* Setup Exact Match Filter 0 to allow broadcast packets.
+	 */
+	t1_tpi_write(adapter, OFFSET(0x206e), 0x0000);	/* # Disable Match Enable bit */
+	t1_tpi_write(adapter, OFFSET(0x204a), 0xffff);	/* # low addr */
+	t1_tpi_write(adapter, OFFSET(0x204b), 0xffff);	/* # mid addr */
+	t1_tpi_write(adapter, OFFSET(0x204c), 0xffff);	/* # high addr */
+	t1_tpi_write(adapter, OFFSET(0x206e), 0x0009);	/* # Enable Match Enable bit */
+
+	t1_tpi_write(adapter, OFFSET(0x0003), 0x0000);	/* # NO SOP/ PAD_EN setup */
+	t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0);	/* # RXEQB disabled */
+	t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f);	/* # No Preemphasis */
+
+	return cmac;
+}
+
+static int pm3393_mac_reset(adapter_t * adapter)
+{
+	u32 val;
+	u32 x;
+	u32 is_pl4_reset_finished;
+	u32 is_pl4_outof_lock;
+	u32 is_xaui_mabc_pll_locked;
+	u32 successful_reset;
+	int i;
+
+	/* The following steps are required to properly reset
+	 * the PM3393. This information is provided in the
+	 * PM3393 datasheet (Issue 2: November 2002)
+	 * section 13.1 -- Device Reset.
+	 *
+	 * The PM3393 has three types of components that are
+	 * individually reset:
+	 *
+	 * DRESETB      - Digital circuitry
+	 * PL4_ARESETB  - PL4 analog circuitry
+	 * XAUI_ARESETB - XAUI bus analog circuitry
+	 *
+	 * Steps to reset PM3393 using RSTB pin:
+	 *
+	 * 1. Assert RSTB pin low ( write 0 )
+	 * 2. Wait at least 1ms to initiate a complete initialization of device.
+	 * 3. Wait until all external clocks and REFSEL are stable.
+	 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
+	 * 5. De-assert RSTB ( write 1 )
+	 * 6. Wait until internal timers to expires after ~14ms.
+	 *    - Allows analog clock synthesizer(PL4CSU) to stabilize to
+	 *      selected reference frequency before allowing the digital
+	 *      portion of the device to operate.
+	 * 7. Wait at least 200us for XAUI interface to stabilize.
+	 * 8. Verify the PM3393 came out of reset successfully.
+	 *    Set successful reset flag if everything worked else try again
+	 *    a few more times.
+	 */
+
+	successful_reset = 0;
+	for (i = 0; i < 3 && !successful_reset; i++) {
+		/* 1 */
+		t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+		val &= ~1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		/* 2 */
+		msleep(1);
+
+		/* 3 */
+		msleep(1);
+
+		/* 4 */
+		msleep(2 /*1 extra ms for safety */ );
+
+		/* 5 */
+		val |= 1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		/* 6 */
+		msleep(15 /*1 extra ms for safety */ );
+
+		/* 7 */
+		msleep(1);
+
+		/* 8 */
+
+		/* Has PL4 analog block come out of reset correctly? */
+		t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
+		is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
+
+		/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
+		 *         figure out why? */
+
+		/* Have all PL4 block clocks locked? */
+		x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
+		     /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */  |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
+		is_pl4_outof_lock = (val & x);
+
+		/* ??? If this fails, might be able to software reset the XAUI part
+		 *     and try to recover... thus saving us from doing another HW reset */
+		/* Has the XAUI MABC PLL circuitry stablized? */
+		is_xaui_mabc_pll_locked =
+		    (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
+
+		successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
+				    && is_xaui_mabc_pll_locked);
+
+		if (netif_msg_hw(adapter))
+			dev_dbg(&adapter->pdev->dev,
+				"PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
+				"is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
+				i, is_pl4_reset_finished, val,
+				is_pl4_outof_lock, is_xaui_mabc_pll_locked);
+	}
+	return successful_reset ? 0 : 1;
+}
+
+const struct gmac t1_pm3393_ops = {
+	.stats_update_period = STATS_TICK_SECS,
+	.create              = pm3393_mac_create,
+	.reset               = pm3393_mac_reset,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
new file mode 100644
index 0000000..964ce59
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -0,0 +1,2167 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: regs.h                                                              *
+ * $Revision: 1.8 $                                                          *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_REGS_H_
+#define _CXGB_REGS_H_
+
+/* SGE registers */
+#define A_SG_CONTROL 0x0
+
+#define S_CMDQ0_ENABLE    0
+#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
+#define F_CMDQ0_ENABLE    V_CMDQ0_ENABLE(1U)
+
+#define S_CMDQ1_ENABLE    1
+#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
+#define F_CMDQ1_ENABLE    V_CMDQ1_ENABLE(1U)
+
+#define S_FL0_ENABLE    2
+#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
+#define F_FL0_ENABLE    V_FL0_ENABLE(1U)
+
+#define S_FL1_ENABLE    3
+#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
+#define F_FL1_ENABLE    V_FL1_ENABLE(1U)
+
+#define S_CPL_ENABLE    4
+#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
+#define F_CPL_ENABLE    V_CPL_ENABLE(1U)
+
+#define S_RESPONSE_QUEUE_ENABLE    5
+#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
+#define F_RESPONSE_QUEUE_ENABLE    V_RESPONSE_QUEUE_ENABLE(1U)
+
+#define S_CMDQ_PRIORITY    6
+#define M_CMDQ_PRIORITY    0x3
+#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
+#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
+
+#define S_DISABLE_CMDQ0_GTS    8
+#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS)
+#define F_DISABLE_CMDQ0_GTS    V_DISABLE_CMDQ0_GTS(1U)
+
+#define S_DISABLE_CMDQ1_GTS    9
+#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
+#define F_DISABLE_CMDQ1_GTS    V_DISABLE_CMDQ1_GTS(1U)
+
+#define S_DISABLE_FL0_GTS    10
+#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
+#define F_DISABLE_FL0_GTS    V_DISABLE_FL0_GTS(1U)
+
+#define S_DISABLE_FL1_GTS    11
+#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
+#define F_DISABLE_FL1_GTS    V_DISABLE_FL1_GTS(1U)
+
+#define S_ENABLE_BIG_ENDIAN    12
+#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
+#define F_ENABLE_BIG_ENDIAN    V_ENABLE_BIG_ENDIAN(1U)
+
+#define S_FL_SELECTION_CRITERIA    13
+#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA)
+#define F_FL_SELECTION_CRITERIA    V_FL_SELECTION_CRITERIA(1U)
+
+#define S_ISCSI_COALESCE    14
+#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
+#define F_ISCSI_COALESCE    V_ISCSI_COALESCE(1U)
+
+#define S_RX_PKT_OFFSET    15
+#define M_RX_PKT_OFFSET    0x7
+#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET)
+
+#define S_VLAN_XTRACT    18
+#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
+#define F_VLAN_XTRACT    V_VLAN_XTRACT(1U)
+
+#define A_SG_DOORBELL 0x4
+#define A_SG_CMD0BASELWR 0x8
+#define A_SG_CMD0BASEUPR 0xc
+#define A_SG_CMD1BASELWR 0x10
+#define A_SG_CMD1BASEUPR 0x14
+#define A_SG_FL0BASELWR 0x18
+#define A_SG_FL0BASEUPR 0x1c
+#define A_SG_FL1BASELWR 0x20
+#define A_SG_FL1BASEUPR 0x24
+#define A_SG_CMD0SIZE 0x28
+
+#define S_CMDQ0_SIZE    0
+#define M_CMDQ0_SIZE    0x1ffff
+#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE)
+#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE)
+
+#define A_SG_FL0SIZE 0x2c
+
+#define S_FL0_SIZE    0
+#define M_FL0_SIZE    0x1ffff
+#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE)
+#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE)
+
+#define A_SG_RSPSIZE 0x30
+
+#define S_RESPQ_SIZE    0
+#define M_RESPQ_SIZE    0x1ffff
+#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE)
+#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE)
+
+#define A_SG_RSPBASELWR 0x34
+#define A_SG_RSPBASEUPR 0x38
+#define A_SG_FLTHRESHOLD 0x3c
+
+#define S_FL_THRESHOLD    0
+#define M_FL_THRESHOLD    0xffff
+#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD)
+#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD)
+
+#define A_SG_RSPQUEUECREDIT 0x40
+
+#define S_RESPQ_CREDIT    0
+#define M_RESPQ_CREDIT    0x1ffff
+#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT)
+#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT)
+
+#define A_SG_SLEEPING 0x48
+
+#define S_SLEEPING    0
+#define M_SLEEPING    0xffff
+#define V_SLEEPING(x) ((x) << S_SLEEPING)
+#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING)
+
+#define A_SG_INTRTIMER 0x4c
+
+#define S_INTERRUPT_TIMER_COUNT    0
+#define M_INTERRUPT_TIMER_COUNT    0xffffff
+#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT)
+#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT)
+
+#define A_SG_CMD0PTR 0x50
+
+#define S_CMDQ0_POINTER    0
+#define M_CMDQ0_POINTER    0xffff
+#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER)
+#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER)
+
+#define S_CURRENT_GENERATION_BIT    16
+#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT)
+#define F_CURRENT_GENERATION_BIT    V_CURRENT_GENERATION_BIT(1U)
+
+#define A_SG_CMD1PTR 0x54
+
+#define S_CMDQ1_POINTER    0
+#define M_CMDQ1_POINTER    0xffff
+#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER)
+#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER)
+
+#define A_SG_FL0PTR 0x58
+
+#define S_FL0_POINTER    0
+#define M_FL0_POINTER    0xffff
+#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER)
+#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER)
+
+#define A_SG_FL1PTR 0x5c
+
+#define S_FL1_POINTER    0
+#define M_FL1_POINTER    0xffff
+#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER)
+#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER)
+
+#define A_SG_VERSION 0x6c
+
+#define S_DAY    0
+#define M_DAY    0x1f
+#define V_DAY(x) ((x) << S_DAY)
+#define G_DAY(x) (((x) >> S_DAY) & M_DAY)
+
+#define S_MONTH    5
+#define M_MONTH    0xf
+#define V_MONTH(x) ((x) << S_MONTH)
+#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH)
+
+#define A_SG_CMD1SIZE 0xb0
+
+#define S_CMDQ1_SIZE    0
+#define M_CMDQ1_SIZE    0x1ffff
+#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE)
+#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE)
+
+#define A_SG_FL1SIZE 0xb4
+
+#define S_FL1_SIZE    0
+#define M_FL1_SIZE    0x1ffff
+#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE)
+#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE)
+
+#define A_SG_INT_ENABLE 0xb8
+
+#define S_RESPQ_EXHAUSTED    0
+#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
+#define F_RESPQ_EXHAUSTED    V_RESPQ_EXHAUSTED(1U)
+
+#define S_RESPQ_OVERFLOW    1
+#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
+#define F_RESPQ_OVERFLOW    V_RESPQ_OVERFLOW(1U)
+
+#define S_FL_EXHAUSTED    2
+#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
+#define F_FL_EXHAUSTED    V_FL_EXHAUSTED(1U)
+
+#define S_PACKET_TOO_BIG    3
+#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
+#define F_PACKET_TOO_BIG    V_PACKET_TOO_BIG(1U)
+
+#define S_PACKET_MISMATCH    4
+#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
+#define F_PACKET_MISMATCH    V_PACKET_MISMATCH(1U)
+
+#define A_SG_INT_CAUSE 0xbc
+#define A_SG_RESPACCUTIMER 0xc0
+
+/* MC3 registers */
+#define A_MC3_CFG 0x100
+
+#define S_CLK_ENABLE    0
+#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE)
+#define F_CLK_ENABLE    V_CLK_ENABLE(1U)
+
+#define S_READY    1
+#define V_READY(x) ((x) << S_READY)
+#define F_READY    V_READY(1U)
+
+#define S_READ_TO_WRITE_DELAY    2
+#define M_READ_TO_WRITE_DELAY    0x7
+#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY)
+#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY)
+
+#define S_WRITE_TO_READ_DELAY    5
+#define M_WRITE_TO_READ_DELAY    0x7
+#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY)
+#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY)
+
+#define S_MC3_BANK_CYCLE    8
+#define M_MC3_BANK_CYCLE    0xf
+#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE)
+#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE)
+
+#define S_REFRESH_CYCLE    12
+#define M_REFRESH_CYCLE    0xf
+#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE)
+#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE)
+
+#define S_PRECHARGE_CYCLE    16
+#define M_PRECHARGE_CYCLE    0x3
+#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE)
+#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE)
+
+#define S_ACTIVE_TO_READ_WRITE_DELAY    18
+#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY)
+#define F_ACTIVE_TO_READ_WRITE_DELAY    V_ACTIVE_TO_READ_WRITE_DELAY(1U)
+
+#define S_ACTIVE_TO_PRECHARGE_DELAY    19
+#define M_ACTIVE_TO_PRECHARGE_DELAY    0x7
+#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY)
+#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY)
+
+#define S_WRITE_RECOVERY_DELAY    22
+#define M_WRITE_RECOVERY_DELAY    0x3
+#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY)
+#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY)
+
+#define S_DENSITY    24
+#define M_DENSITY    0x3
+#define V_DENSITY(x) ((x) << S_DENSITY)
+#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY)
+
+#define S_ORGANIZATION    26
+#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION)
+#define F_ORGANIZATION    V_ORGANIZATION(1U)
+
+#define S_BANKS    27
+#define V_BANKS(x) ((x) << S_BANKS)
+#define F_BANKS    V_BANKS(1U)
+
+#define S_UNREGISTERED    28
+#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED)
+#define F_UNREGISTERED    V_UNREGISTERED(1U)
+
+#define S_MC3_WIDTH    29
+#define M_MC3_WIDTH    0x3
+#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH)
+#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH)
+
+#define S_MC3_SLOW    31
+#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW)
+#define F_MC3_SLOW    V_MC3_SLOW(1U)
+
+#define A_MC3_MODE 0x104
+
+#define S_MC3_MODE    0
+#define M_MC3_MODE    0x3fff
+#define V_MC3_MODE(x) ((x) << S_MC3_MODE)
+#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE)
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define A_MC3_EXT_MODE 0x108
+
+#define S_MC3_EXTENDED_MODE    0
+#define M_MC3_EXTENDED_MODE    0x3fff
+#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE)
+#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE)
+
+#define A_MC3_PRECHARG 0x10c
+#define A_MC3_REFRESH 0x110
+
+#define S_REFRESH_ENABLE    0
+#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE)
+#define F_REFRESH_ENABLE    V_REFRESH_ENABLE(1U)
+
+#define S_REFRESH_DIVISOR    1
+#define M_REFRESH_DIVISOR    0x3fff
+#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR)
+#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR)
+
+#define A_MC3_STROBE 0x114
+
+#define S_MASTER_DLL_RESET    0
+#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET)
+#define F_MASTER_DLL_RESET    V_MASTER_DLL_RESET(1U)
+
+#define S_MASTER_DLL_TAP_COUNT    1
+#define M_MASTER_DLL_TAP_COUNT    0xff
+#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT)
+#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT)
+
+#define S_MASTER_DLL_LOCKED    9
+#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED)
+#define F_MASTER_DLL_LOCKED    V_MASTER_DLL_LOCKED(1U)
+
+#define S_MASTER_DLL_MAX_TAP_COUNT    10
+#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT)
+#define F_MASTER_DLL_MAX_TAP_COUNT    V_MASTER_DLL_MAX_TAP_COUNT(1U)
+
+#define S_MASTER_DLL_TAP_COUNT_OFFSET    11
+#define M_MASTER_DLL_TAP_COUNT_OFFSET    0x3f
+#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET)
+#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET)
+
+#define S_SLAVE_DLL_RESET    11
+#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET)
+#define F_SLAVE_DLL_RESET    V_SLAVE_DLL_RESET(1U)
+
+#define S_SLAVE_DLL_DELTA    12
+#define M_SLAVE_DLL_DELTA    0xf
+#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA)
+#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT    17
+#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT    0x3f
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE    23
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE)
+#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE    V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U)
+
+#define S_SLAVE_DELAY_LINE_TAP_COUNT    24
+#define M_SLAVE_DELAY_LINE_TAP_COUNT    0x3f
+#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT)
+
+#define A_MC3_ECC_CNTL 0x118
+
+#define S_ECC_GENERATION_ENABLE    0
+#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE)
+#define F_ECC_GENERATION_ENABLE    V_ECC_GENERATION_ENABLE(1U)
+
+#define S_ECC_CHECK_ENABLE    1
+#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE)
+#define F_ECC_CHECK_ENABLE    V_ECC_CHECK_ENABLE(1U)
+
+#define S_CORRECTABLE_ERROR_COUNT    2
+#define M_CORRECTABLE_ERROR_COUNT    0xff
+#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT)
+#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT)
+
+#define S_UNCORRECTABLE_ERROR_COUNT    10
+#define M_UNCORRECTABLE_ERROR_COUNT    0xff
+#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT)
+#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT)
+
+#define A_MC3_CE_ADDR 0x11c
+
+#define S_MC3_CE_ADDR    4
+#define M_MC3_CE_ADDR    0xfffffff
+#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR)
+#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR)
+
+#define A_MC3_CE_DATA0 0x120
+#define A_MC3_CE_DATA1 0x124
+#define A_MC3_CE_DATA2 0x128
+#define A_MC3_CE_DATA3 0x12c
+#define A_MC3_CE_DATA4 0x130
+#define A_MC3_UE_ADDR 0x134
+
+#define S_MC3_UE_ADDR    4
+#define M_MC3_UE_ADDR    0xfffffff
+#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR)
+#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR)
+
+#define A_MC3_UE_DATA0 0x138
+#define A_MC3_UE_DATA1 0x13c
+#define A_MC3_UE_DATA2 0x140
+#define A_MC3_UE_DATA3 0x144
+#define A_MC3_UE_DATA4 0x148
+#define A_MC3_BD_ADDR 0x14c
+#define A_MC3_BD_DATA0 0x150
+#define A_MC3_BD_DATA1 0x154
+#define A_MC3_BD_DATA2 0x158
+#define A_MC3_BD_DATA3 0x15c
+#define A_MC3_BD_DATA4 0x160
+#define A_MC3_BD_OP 0x164
+
+#define S_BACK_DOOR_OPERATION    0
+#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION)
+#define F_BACK_DOOR_OPERATION    V_BACK_DOOR_OPERATION(1U)
+
+#define A_MC3_BIST_ADDR_BEG 0x168
+#define A_MC3_BIST_ADDR_END 0x16c
+#define A_MC3_BIST_DATA 0x170
+#define A_MC3_BIST_OP 0x174
+
+#define S_OP    0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP    V_OP(1U)
+
+#define S_DATA_PATTERN    1
+#define M_DATA_PATTERN    0x3
+#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN)
+#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN)
+
+#define S_CONTINUOUS    3
+#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS)
+#define F_CONTINUOUS    V_CONTINUOUS(1U)
+
+#define A_MC3_INT_ENABLE 0x178
+
+#define S_MC3_CORR_ERR    0
+#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR)
+#define F_MC3_CORR_ERR    V_MC3_CORR_ERR(1U)
+
+#define S_MC3_UNCORR_ERR    1
+#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR)
+#define F_MC3_UNCORR_ERR    V_MC3_UNCORR_ERR(1U)
+
+#define S_MC3_PARITY_ERR    2
+#define M_MC3_PARITY_ERR    0xff
+#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR)
+#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR)
+
+#define S_MC3_ADDR_ERR    10
+#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR)
+#define F_MC3_ADDR_ERR    V_MC3_ADDR_ERR(1U)
+
+#define A_MC3_INT_CAUSE 0x17c
+
+/* MC4 registers */
+#define A_MC4_CFG 0x180
+
+#define S_POWER_UP    0
+#define V_POWER_UP(x) ((x) << S_POWER_UP)
+#define F_POWER_UP    V_POWER_UP(1U)
+
+#define S_MC4_BANK_CYCLE    8
+#define M_MC4_BANK_CYCLE    0x7
+#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE)
+#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE)
+
+#define S_MC4_NARROW    24
+#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW)
+#define F_MC4_NARROW    V_MC4_NARROW(1U)
+
+#define S_MC4_SLOW    25
+#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
+#define F_MC4_SLOW    V_MC4_SLOW(1U)
+
+#define S_MC4A_WIDTH    24
+#define M_MC4A_WIDTH    0x3
+#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH)
+#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH)
+
+#define S_MC4A_SLOW    26
+#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW)
+#define F_MC4A_SLOW    V_MC4A_SLOW(1U)
+
+#define A_MC4_MODE 0x184
+
+#define S_MC4_MODE    0
+#define M_MC4_MODE    0x7fff
+#define V_MC4_MODE(x) ((x) << S_MC4_MODE)
+#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE)
+
+#define A_MC4_EXT_MODE 0x188
+
+#define S_MC4_EXTENDED_MODE    0
+#define M_MC4_EXTENDED_MODE    0x7fff
+#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE)
+#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE)
+
+#define A_MC4_REFRESH 0x190
+#define A_MC4_STROBE 0x194
+#define A_MC4_ECC_CNTL 0x198
+#define A_MC4_CE_ADDR 0x19c
+
+#define S_MC4_CE_ADDR    4
+#define M_MC4_CE_ADDR    0xffffff
+#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR)
+#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR)
+
+#define A_MC4_CE_DATA0 0x1a0
+#define A_MC4_CE_DATA1 0x1a4
+#define A_MC4_CE_DATA2 0x1a8
+#define A_MC4_CE_DATA3 0x1ac
+#define A_MC4_CE_DATA4 0x1b0
+#define A_MC4_UE_ADDR 0x1b4
+
+#define S_MC4_UE_ADDR    4
+#define M_MC4_UE_ADDR    0xffffff
+#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR)
+#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR)
+
+#define A_MC4_UE_DATA0 0x1b8
+#define A_MC4_UE_DATA1 0x1bc
+#define A_MC4_UE_DATA2 0x1c0
+#define A_MC4_UE_DATA3 0x1c4
+#define A_MC4_UE_DATA4 0x1c8
+#define A_MC4_BD_ADDR 0x1cc
+
+#define S_MC4_BACK_DOOR_ADDR    0
+#define M_MC4_BACK_DOOR_ADDR    0xfffffff
+#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR)
+#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR)
+
+#define A_MC4_BD_DATA0 0x1d0
+#define A_MC4_BD_DATA1 0x1d4
+#define A_MC4_BD_DATA2 0x1d8
+#define A_MC4_BD_DATA3 0x1dc
+#define A_MC4_BD_DATA4 0x1e0
+#define A_MC4_BD_OP 0x1e4
+
+#define S_OPERATION    0
+#define V_OPERATION(x) ((x) << S_OPERATION)
+#define F_OPERATION    V_OPERATION(1U)
+
+#define A_MC4_BIST_ADDR_BEG 0x1e8
+#define A_MC4_BIST_ADDR_END 0x1ec
+#define A_MC4_BIST_DATA 0x1f0
+#define A_MC4_BIST_OP 0x1f4
+#define A_MC4_INT_ENABLE 0x1f8
+
+#define S_MC4_CORR_ERR    0
+#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR)
+#define F_MC4_CORR_ERR    V_MC4_CORR_ERR(1U)
+
+#define S_MC4_UNCORR_ERR    1
+#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR)
+#define F_MC4_UNCORR_ERR    V_MC4_UNCORR_ERR(1U)
+
+#define S_MC4_ADDR_ERR    2
+#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR)
+#define F_MC4_ADDR_ERR    V_MC4_ADDR_ERR(1U)
+
+#define A_MC4_INT_CAUSE 0x1fc
+
+/* TPI registers */
+#define A_TPI_ADDR 0x280
+
+#define S_TPI_ADDRESS    0
+#define M_TPI_ADDRESS    0xffffff
+#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS)
+#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS)
+
+#define A_TPI_WR_DATA 0x284
+#define A_TPI_RD_DATA 0x288
+#define A_TPI_CSR 0x28c
+
+#define S_TPIWR    0
+#define V_TPIWR(x) ((x) << S_TPIWR)
+#define F_TPIWR    V_TPIWR(1U)
+
+#define S_TPIRDY    1
+#define V_TPIRDY(x) ((x) << S_TPIRDY)
+#define F_TPIRDY    V_TPIRDY(1U)
+
+#define S_INT_DIR    31
+#define V_INT_DIR(x) ((x) << S_INT_DIR)
+#define F_INT_DIR    V_INT_DIR(1U)
+
+#define A_TPI_PAR 0x29c
+
+#define S_TPIPAR    0
+#define M_TPIPAR    0x7f
+#define V_TPIPAR(x) ((x) << S_TPIPAR)
+#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
+
+
+/* TP registers */
+#define A_TP_IN_CONFIG 0x300
+
+#define S_TP_IN_CSPI_TUNNEL    0
+#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL)
+#define F_TP_IN_CSPI_TUNNEL    V_TP_IN_CSPI_TUNNEL(1U)
+
+#define S_TP_IN_CSPI_ETHERNET    1
+#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET)
+#define F_TP_IN_CSPI_ETHERNET    V_TP_IN_CSPI_ETHERNET(1U)
+
+#define S_TP_IN_CSPI_CPL    3
+#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
+#define F_TP_IN_CSPI_CPL    V_TP_IN_CSPI_CPL(1U)
+
+#define S_TP_IN_CSPI_POS    4
+#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS)
+#define F_TP_IN_CSPI_POS    V_TP_IN_CSPI_POS(1U)
+
+#define S_TP_IN_CSPI_CHECK_IP_CSUM    5
+#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
+#define F_TP_IN_CSPI_CHECK_IP_CSUM    V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_CSPI_CHECK_TCP_CSUM    6
+#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
+#define F_TP_IN_CSPI_CHECK_TCP_CSUM    V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
+
+#define S_TP_IN_ESPI_TUNNEL    7
+#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL)
+#define F_TP_IN_ESPI_TUNNEL    V_TP_IN_ESPI_TUNNEL(1U)
+
+#define S_TP_IN_ESPI_ETHERNET    8
+#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
+#define F_TP_IN_ESPI_ETHERNET    V_TP_IN_ESPI_ETHERNET(1U)
+
+#define S_TP_IN_ESPI_CPL    10
+#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL)
+#define F_TP_IN_ESPI_CPL    V_TP_IN_ESPI_CPL(1U)
+
+#define S_TP_IN_ESPI_POS    11
+#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS)
+#define F_TP_IN_ESPI_POS    V_TP_IN_ESPI_POS(1U)
+
+#define S_TP_IN_ESPI_CHECK_IP_CSUM    12
+#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
+#define F_TP_IN_ESPI_CHECK_IP_CSUM    V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_ESPI_CHECK_TCP_CSUM    13
+#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
+#define F_TP_IN_ESPI_CHECK_TCP_CSUM    V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
+
+#define S_OFFLOAD_DISABLE    14
+#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
+#define F_OFFLOAD_DISABLE    V_OFFLOAD_DISABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_TP_OUT_C_ETH    0
+#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH)
+#define F_TP_OUT_C_ETH    V_TP_OUT_C_ETH(1U)
+
+#define S_TP_OUT_CSPI_CPL    2
+#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
+#define F_TP_OUT_CSPI_CPL    V_TP_OUT_CSPI_CPL(1U)
+
+#define S_TP_OUT_CSPI_POS    3
+#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS)
+#define F_TP_OUT_CSPI_POS    V_TP_OUT_CSPI_POS(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_IP_CSUM    4
+#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_IP_CSUM    V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM    5
+#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM    V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_ETHERNET    6
+#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
+#define F_TP_OUT_ESPI_ETHERNET    V_TP_OUT_ESPI_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_TAG_ETHERNET    7
+#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET)
+#define F_TP_OUT_ESPI_TAG_ETHERNET    V_TP_OUT_ESPI_TAG_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_CPL    8
+#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL)
+#define F_TP_OUT_ESPI_CPL    V_TP_OUT_ESPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_POS    9
+#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS)
+#define F_TP_OUT_ESPI_POS    V_TP_OUT_ESPI_POS(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_IP_CSUM    10
+#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_IP_CSUM    V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM    11
+#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM    V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_IP_TTL    0
+#define M_IP_TTL    0xff
+#define V_IP_TTL(x) ((x) << S_IP_TTL)
+#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL)
+
+#define S_TCAM_SERVER_REGION_USAGE    8
+#define M_TCAM_SERVER_REGION_USAGE    0x3
+#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE)
+#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE)
+
+#define S_QOS_MAPPING    10
+#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING)
+#define F_QOS_MAPPING    V_QOS_MAPPING(1U)
+
+#define S_TCP_CSUM    11
+#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
+#define F_TCP_CSUM    V_TCP_CSUM(1U)
+
+#define S_UDP_CSUM    12
+#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
+#define F_UDP_CSUM    V_UDP_CSUM(1U)
+
+#define S_IP_CSUM    13
+#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
+#define F_IP_CSUM    V_IP_CSUM(1U)
+
+#define S_IP_ID_SPLIT    14
+#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT)
+#define F_IP_ID_SPLIT    V_IP_ID_SPLIT(1U)
+
+#define S_PATH_MTU    15
+#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
+#define F_PATH_MTU    V_PATH_MTU(1U)
+
+#define S_5TUPLE_LOOKUP    17
+#define M_5TUPLE_LOOKUP    0x3
+#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP)
+
+#define S_IP_FRAGMENT_DROP    19
+#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP)
+#define F_IP_FRAGMENT_DROP    V_IP_FRAGMENT_DROP(1U)
+
+#define S_PING_DROP    20
+#define V_PING_DROP(x) ((x) << S_PING_DROP)
+#define F_PING_DROP    V_PING_DROP(1U)
+
+#define S_PROTECT_MODE    21
+#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE)
+#define F_PROTECT_MODE    V_PROTECT_MODE(1U)
+
+#define S_SYN_COOKIE_ALGORITHM    22
+#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM)
+#define F_SYN_COOKIE_ALGORITHM    V_SYN_COOKIE_ALGORITHM(1U)
+
+#define S_ATTACK_FILTER    23
+#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER)
+#define F_ATTACK_FILTER    V_ATTACK_FILTER(1U)
+
+#define S_INTERFACE_TYPE    24
+#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE)
+#define F_INTERFACE_TYPE    V_INTERFACE_TYPE(1U)
+
+#define S_DISABLE_RX_FLOW_CONTROL    25
+#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL)
+#define F_DISABLE_RX_FLOW_CONTROL    V_DISABLE_RX_FLOW_CONTROL(1U)
+
+#define S_SYN_COOKIE_PARAMETER    26
+#define M_SYN_COOKIE_PARAMETER    0x3f
+#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER)
+
+#define A_TP_GLOBAL_RX_CREDITS 0x30c
+#define A_TP_CM_SIZE 0x310
+#define A_TP_CM_MM_BASE 0x314
+
+#define S_CM_MEMMGR_BASE    0
+#define M_CM_MEMMGR_BASE    0xfffffff
+#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE)
+#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE)
+
+#define A_TP_CM_TIMER_BASE 0x318
+
+#define S_CM_TIMER_BASE    0
+#define M_CM_TIMER_BASE    0xfffffff
+#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE)
+#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE)
+
+#define A_TP_PM_SIZE 0x31c
+#define A_TP_PM_TX_BASE 0x320
+#define A_TP_PM_DEFRAG_BASE 0x324
+#define A_TP_PM_RX_BASE 0x328
+#define A_TP_PM_RX_PG_SIZE 0x32c
+#define A_TP_PM_RX_MAX_PGS 0x330
+#define A_TP_PM_TX_PG_SIZE 0x334
+#define A_TP_PM_TX_MAX_PGS 0x338
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_TIMESTAMP    0
+#define M_TIMESTAMP    0x3
+#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP)
+#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP)
+
+#define S_WINDOW_SCALE    2
+#define M_WINDOW_SCALE    0x3
+#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE)
+#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE)
+
+#define S_SACK    4
+#define M_SACK    0x3
+#define V_SACK(x) ((x) << S_SACK)
+#define G_SACK(x) (((x) >> S_SACK) & M_SACK)
+
+#define S_ECN    6
+#define M_ECN    0x3
+#define V_ECN(x) ((x) << S_ECN)
+#define G_ECN(x) (((x) >> S_ECN) & M_ECN)
+
+#define S_SACK_ALGORITHM    8
+#define M_SACK_ALGORITHM    0x3
+#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM)
+#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM)
+
+#define S_MSS    10
+#define V_MSS(x) ((x) << S_MSS)
+#define F_MSS    V_MSS(1U)
+
+#define S_DEFAULT_PEER_MSS    16
+#define M_DEFAULT_PEER_MSS    0xffff
+#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS)
+#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_DACK_MODE    0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE    V_DACK_MODE(1U)
+
+#define S_DACK_AUTO_MGMT    1
+#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT)
+#define F_DACK_AUTO_MGMT    V_DACK_AUTO_MGMT(1U)
+
+#define S_DACK_AUTO_CAREFUL    2
+#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL)
+#define F_DACK_AUTO_CAREFUL    V_DACK_AUTO_CAREFUL(1U)
+
+#define S_DACK_MSS_SELECTOR    3
+#define M_DACK_MSS_SELECTOR    0x3
+#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR)
+#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR)
+
+#define S_DACK_BYTE_THRESHOLD    5
+#define M_DACK_BYTE_THRESHOLD    0xfffff
+#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD)
+#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TP_ACCESS_LATENCY    0
+#define M_TP_ACCESS_LATENCY    0xf
+#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY)
+#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY)
+
+#define S_HELD_FIN_DISABLE    4
+#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE)
+#define F_HELD_FIN_DISABLE    V_HELD_FIN_DISABLE(1U)
+
+#define S_DDP_FC_ENABLE    5
+#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE)
+#define F_DDP_FC_ENABLE    V_DDP_FC_ENABLE(1U)
+
+#define S_RDMA_ERR_ENABLE    6
+#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE)
+#define F_RDMA_ERR_ENABLE    V_RDMA_ERR_ENABLE(1U)
+
+#define S_FAST_PDU_DELIVERY    7
+#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY)
+#define F_FAST_PDU_DELIVERY    V_FAST_PDU_DELIVERY(1U)
+
+#define S_CLEAR_FIN    8
+#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN)
+#define F_CLEAR_FIN    V_CLEAR_FIN(1U)
+
+#define S_DIS_TX_FILL_WIN_PUSH    12
+#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
+#define F_DIS_TX_FILL_WIN_PUSH    V_DIS_TX_FILL_WIN_PUSH(1U)
+
+#define S_TP_PC_REV    30
+#define M_TP_PC_REV    0x3
+#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV)
+#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+
+#define A_TP_BACKOFF0 0x350
+
+#define S_ELEMENT0    0
+#define M_ELEMENT0    0xff
+#define V_ELEMENT0(x) ((x) << S_ELEMENT0)
+#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0)
+
+#define S_ELEMENT1    8
+#define M_ELEMENT1    0xff
+#define V_ELEMENT1(x) ((x) << S_ELEMENT1)
+#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1)
+
+#define S_ELEMENT2    16
+#define M_ELEMENT2    0xff
+#define V_ELEMENT2(x) ((x) << S_ELEMENT2)
+#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2)
+
+#define S_ELEMENT3    24
+#define M_ELEMENT3    0xff
+#define V_ELEMENT3(x) ((x) << S_ELEMENT3)
+#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3)
+
+#define A_TP_BACKOFF1 0x354
+#define A_TP_BACKOFF2 0x358
+#define A_TP_BACKOFF3 0x35c
+#define A_TP_PARA_REG0 0x360
+
+#define S_VAR_MULT    0
+#define M_VAR_MULT    0xf
+#define V_VAR_MULT(x) ((x) << S_VAR_MULT)
+#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT)
+
+#define S_VAR_GAIN    4
+#define M_VAR_GAIN    0xf
+#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN)
+#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN)
+
+#define S_SRTT_GAIN    8
+#define M_SRTT_GAIN    0xf
+#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN)
+#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN)
+
+#define S_RTTVAR_INIT    12
+#define M_RTTVAR_INIT    0xf
+#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT)
+#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT)
+
+#define S_DUP_THRESH    20
+#define M_DUP_THRESH    0xf
+#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH)
+#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH)
+
+#define S_INIT_CONG_WIN    24
+#define M_INIT_CONG_WIN    0x7
+#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN)
+#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN)
+
+#define A_TP_PARA_REG1 0x364
+
+#define S_INITIAL_SLOW_START_THRESHOLD    0
+#define M_INITIAL_SLOW_START_THRESHOLD    0xffff
+#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD)
+#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD)
+
+#define S_RECEIVE_BUFFER_SIZE    16
+#define M_RECEIVE_BUFFER_SIZE    0xffff
+#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE)
+#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE)
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_RX_COALESCE_SIZE    0
+#define M_RX_COALESCE_SIZE    0xffff
+#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE)
+#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE)
+
+#define S_MAX_RX_SIZE    16
+#define M_MAX_RX_SIZE    0xffff
+#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE)
+#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_RX_COALESCING_PSH_DELIVER    0
+#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER)
+#define F_RX_COALESCING_PSH_DELIVER    V_RX_COALESCING_PSH_DELIVER(1U)
+
+#define S_RX_COALESCING_ENABLE    1
+#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE)
+#define F_RX_COALESCING_ENABLE    V_RX_COALESCING_ENABLE(1U)
+
+#define S_TAHOE_ENABLE    2
+#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE)
+#define F_TAHOE_ENABLE    V_TAHOE_ENABLE(1U)
+
+#define S_MAX_REORDER_FRAGMENTS    12
+#define M_MAX_REORDER_FRAGMENTS    0x7
+#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS)
+#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_DELAYED_ACK_TIMER_RESOLUTION    0
+#define M_DELAYED_ACK_TIMER_RESOLUTION    0x3f
+#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION)
+#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION)
+
+#define S_GENERIC_TIMER_RESOLUTION    16
+#define M_GENERIC_TIMER_RESOLUTION    0x3f
+#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION)
+#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION)
+
+#define A_TP_2MSL 0x394
+
+#define S_2MSL    0
+#define M_2MSL    0x3fffffff
+#define V_2MSL(x) ((x) << S_2MSL)
+#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL)
+
+#define A_TP_RXT_MIN 0x398
+
+#define S_RETRANSMIT_TIMER_MIN    0
+#define M_RETRANSMIT_TIMER_MIN    0xffff
+#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN)
+#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN)
+
+#define A_TP_RXT_MAX 0x39c
+
+#define S_RETRANSMIT_TIMER_MAX    0
+#define M_RETRANSMIT_TIMER_MAX    0x3fffffff
+#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX)
+#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX)
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define S_PERSIST_TIMER_MIN    0
+#define M_PERSIST_TIMER_MIN    0xffff
+#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN)
+#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN)
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define S_PERSIST_TIMER_MAX    0
+#define M_PERSIST_TIMER_MAX    0x3fffffff
+#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX)
+#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX)
+
+#define A_TP_KEEP_IDLE 0x3ac
+
+#define S_KEEP_ALIVE_IDLE_TIME    0
+#define M_KEEP_ALIVE_IDLE_TIME    0x3fffffff
+#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME)
+#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME)
+
+#define A_TP_KEEP_INTVL 0x3b0
+
+#define S_KEEP_ALIVE_INTERVAL_TIME    0
+#define M_KEEP_ALIVE_INTERVAL_TIME    0x3fffffff
+#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME)
+#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME)
+
+#define A_TP_INIT_SRTT 0x3b4
+
+#define S_INITIAL_SRTT    0
+#define M_INITIAL_SRTT    0xffff
+#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT)
+#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT)
+
+#define A_TP_DACK_TIME 0x3b8
+
+#define S_DELAYED_ACK_TIME    0
+#define M_DELAYED_ACK_TIME    0x7ff
+#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME)
+#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME)
+
+#define A_TP_FINWAIT2_TIME 0x3bc
+
+#define S_FINWAIT2_TIME    0
+#define M_FINWAIT2_TIME    0x3fffffff
+#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME)
+#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME)
+
+#define A_TP_FAST_FINWAIT2_TIME 0x3c0
+
+#define S_FAST_FINWAIT2_TIME    0
+#define M_FAST_FINWAIT2_TIME    0x3fffffff
+#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME)
+#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME)
+
+#define A_TP_SHIFT_CNT 0x3c4
+
+#define S_KEEPALIVE_MAX    0
+#define M_KEEPALIVE_MAX    0xff
+#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX)
+#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX)
+
+#define S_WINDOWPROBE_MAX    8
+#define M_WINDOWPROBE_MAX    0xff
+#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX)
+#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX)
+
+#define S_RETRANSMISSION_MAX    16
+#define M_RETRANSMISSION_MAX    0xff
+#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX)
+#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX)
+
+#define S_SYN_MAX    24
+#define M_SYN_MAX    0xff
+#define V_SYN_MAX(x) ((x) << S_SYN_MAX)
+#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX)
+
+#define A_TP_QOS_REG0 0x3e0
+
+#define S_L3_VALUE    0
+#define M_L3_VALUE    0x3f
+#define V_L3_VALUE(x) ((x) << S_L3_VALUE)
+#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE)
+
+#define A_TP_QOS_REG1 0x3e4
+#define A_TP_QOS_REG2 0x3e8
+#define A_TP_QOS_REG3 0x3ec
+#define A_TP_QOS_REG4 0x3f0
+#define A_TP_QOS_REG5 0x3f4
+#define A_TP_QOS_REG6 0x3f8
+#define A_TP_QOS_REG7 0x3fc
+#define A_TP_MTU_REG0 0x404
+#define A_TP_MTU_REG1 0x408
+#define A_TP_MTU_REG2 0x40c
+#define A_TP_MTU_REG3 0x410
+#define A_TP_MTU_REG4 0x414
+#define A_TP_MTU_REG5 0x418
+#define A_TP_MTU_REG6 0x41c
+#define A_TP_MTU_REG7 0x420
+#define A_TP_RESET 0x44c
+
+#define S_TP_RESET    0
+#define V_TP_RESET(x) ((x) << S_TP_RESET)
+#define F_TP_RESET    V_TP_RESET(1U)
+
+#define S_CM_MEMMGR_INIT    1
+#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT)
+#define F_CM_MEMMGR_INIT    V_CM_MEMMGR_INIT(1U)
+
+#define A_TP_MIB_INDEX 0x450
+#define A_TP_MIB_DATA 0x454
+#define A_TP_SYNC_TIME_HI 0x458
+#define A_TP_SYNC_TIME_LO 0x45c
+#define A_TP_CM_MM_RX_FLST_BASE 0x460
+
+#define S_CM_MEMMGR_RX_FREE_LIST_BASE    0
+#define M_CM_MEMMGR_RX_FREE_LIST_BASE    0xfffffff
+#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_TX_FLST_BASE 0x464
+
+#define S_CM_MEMMGR_TX_FREE_LIST_BASE    0
+#define M_CM_MEMMGR_TX_FREE_LIST_BASE    0xfffffff
+#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_P_FLST_BASE 0x468
+
+#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE    0
+#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE    0xfffffff
+#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_MAX_P 0x46c
+
+#define S_CM_MEMMGR_MAX_PSTRUCT    0
+#define M_CM_MEMMGR_MAX_PSTRUCT    0xfffffff
+#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT)
+#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT)
+
+#define A_TP_INT_ENABLE 0x470
+
+#define S_TX_FREE_LIST_EMPTY    0
+#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY)
+#define F_TX_FREE_LIST_EMPTY    V_TX_FREE_LIST_EMPTY(1U)
+
+#define S_RX_FREE_LIST_EMPTY    1
+#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY)
+#define F_RX_FREE_LIST_EMPTY    V_RX_FREE_LIST_EMPTY(1U)
+
+#define A_TP_INT_CAUSE 0x474
+#define A_TP_TIMER_SEPARATOR 0x4a4
+
+#define S_DISABLE_PAST_TIMER_INSERTION    0
+#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION)
+#define F_DISABLE_PAST_TIMER_INSERTION    V_DISABLE_PAST_TIMER_INSERTION(1U)
+
+#define S_MODULATION_TIMER_SEPARATOR    1
+#define M_MODULATION_TIMER_SEPARATOR    0x7fff
+#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR)
+#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR)
+
+#define S_GLOBAL_TIMER_SEPARATOR    16
+#define M_GLOBAL_TIMER_SEPARATOR    0xffff
+#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR)
+#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR)
+
+#define A_TP_CM_FC_MODE 0x4b0
+#define A_TP_PC_CONGESTION_CNTL 0x4b4
+#define A_TP_TX_DROP_CONFIG 0x4b8
+
+#define S_ENABLE_TX_DROP    31
+#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
+#define F_ENABLE_TX_DROP    V_ENABLE_TX_DROP(1U)
+
+#define S_ENABLE_TX_ERROR    30
+#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
+#define F_ENABLE_TX_ERROR    V_ENABLE_TX_ERROR(1U)
+
+#define S_DROP_TICKS_CNT    4
+#define M_DROP_TICKS_CNT    0x3ffffff
+#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT)
+
+#define S_NUM_PKTS_DROPPED    0
+#define M_NUM_PKTS_DROPPED    0xf
+#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED)
+
+#define A_TP_TX_DROP_COUNT 0x4bc
+
+/* RAT registers */
+#define A_RAT_ROUTE_CONTROL 0x580
+
+#define S_USE_ROUTE_TABLE    0
+#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE)
+#define F_USE_ROUTE_TABLE    V_USE_ROUTE_TABLE(1U)
+
+#define S_ENABLE_CSPI    1
+#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI)
+#define F_ENABLE_CSPI    V_ENABLE_CSPI(1U)
+
+#define S_ENABLE_PCIX    2
+#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX)
+#define F_ENABLE_PCIX    V_ENABLE_PCIX(1U)
+
+#define A_RAT_ROUTE_TABLE_INDEX 0x584
+
+#define S_ROUTE_TABLE_INDEX    0
+#define M_ROUTE_TABLE_INDEX    0xf
+#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX)
+#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX)
+
+#define A_RAT_ROUTE_TABLE_DATA 0x588
+#define A_RAT_NO_ROUTE 0x58c
+
+#define S_CPL_OPCODE    0
+#define M_CPL_OPCODE    0xff
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE)
+
+#define A_RAT_INTR_ENABLE 0x590
+
+#define S_ZEROROUTEERROR    0
+#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR)
+#define F_ZEROROUTEERROR    V_ZEROROUTEERROR(1U)
+
+#define S_CSPIFRAMINGERROR    1
+#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR)
+#define F_CSPIFRAMINGERROR    V_CSPIFRAMINGERROR(1U)
+
+#define S_SGEFRAMINGERROR    2
+#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR)
+#define F_SGEFRAMINGERROR    V_SGEFRAMINGERROR(1U)
+
+#define S_TPFRAMINGERROR    3
+#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR)
+#define F_TPFRAMINGERROR    V_TPFRAMINGERROR(1U)
+
+#define A_RAT_INTR_CAUSE 0x594
+
+/* CSPI registers */
+#define A_CSPI_RX_AE_WM 0x810
+#define A_CSPI_RX_AF_WM 0x814
+#define A_CSPI_CALENDAR_LEN 0x818
+
+#define S_CALENDARLENGTH    0
+#define M_CALENDARLENGTH    0xffff
+#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH)
+#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH)
+
+#define A_CSPI_FIFO_STATUS_ENABLE 0x820
+
+#define S_FIFOSTATUSENABLE    0
+#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE)
+#define F_FIFOSTATUSENABLE    V_FIFOSTATUSENABLE(1U)
+
+#define A_CSPI_MAXBURST1_MAXBURST2 0x828
+
+#define S_MAXBURST1    0
+#define M_MAXBURST1    0xffff
+#define V_MAXBURST1(x) ((x) << S_MAXBURST1)
+#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1)
+
+#define S_MAXBURST2    16
+#define M_MAXBURST2    0xffff
+#define V_MAXBURST2(x) ((x) << S_MAXBURST2)
+#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2)
+
+#define A_CSPI_TRAIN 0x82c
+
+#define S_CSPI_TRAIN_ALPHA    0
+#define M_CSPI_TRAIN_ALPHA    0xffff
+#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA)
+#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA)
+
+#define S_CSPI_TRAIN_DATA_MAXT    16
+#define M_CSPI_TRAIN_DATA_MAXT    0xffff
+#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT)
+#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT)
+
+#define A_CSPI_INTR_STATUS 0x848
+
+#define S_DIP4ERR    0
+#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
+#define F_DIP4ERR    V_DIP4ERR(1U)
+
+#define S_RXDROP    1
+#define V_RXDROP(x) ((x) << S_RXDROP)
+#define F_RXDROP    V_RXDROP(1U)
+
+#define S_TXDROP    2
+#define V_TXDROP(x) ((x) << S_TXDROP)
+#define F_TXDROP    V_TXDROP(1U)
+
+#define S_RXOVERFLOW    3
+#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
+#define F_RXOVERFLOW    V_RXOVERFLOW(1U)
+
+#define S_RAMPARITYERR    4
+#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
+#define F_RAMPARITYERR    V_RAMPARITYERR(1U)
+
+#define A_CSPI_INTR_ENABLE 0x84c
+
+/* ESPI registers */
+#define A_ESPI_SCH_TOKEN0 0x880
+
+#define S_SCHTOKEN0    0
+#define M_SCHTOKEN0    0xffff
+#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0)
+#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0)
+
+#define A_ESPI_SCH_TOKEN1 0x884
+
+#define S_SCHTOKEN1    0
+#define M_SCHTOKEN1    0xffff
+#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1)
+#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1)
+
+#define A_ESPI_SCH_TOKEN2 0x888
+
+#define S_SCHTOKEN2    0
+#define M_SCHTOKEN2    0xffff
+#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2)
+#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2)
+
+#define A_ESPI_SCH_TOKEN3 0x88c
+
+#define S_SCHTOKEN3    0
+#define M_SCHTOKEN3    0xffff
+#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3)
+#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3)
+
+#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+
+#define S_ALMOSTEMPTY    0
+#define M_ALMOSTEMPTY    0xffff
+#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY)
+#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY)
+
+#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+
+#define S_ALMOSTFULL    0
+#define M_ALMOSTFULL    0xffff
+#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL)
+#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL)
+
+#define A_ESPI_CALENDAR_LENGTH 0x898
+#define A_PORT_CONFIG 0x89c
+
+#define S_RX_NPORTS    0
+#define M_RX_NPORTS    0xff
+#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS)
+
+#define S_TX_NPORTS    8
+#define M_TX_NPORTS    0xff
+#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS)
+
+#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
+
+#define S_RXSTATUSENABLE    0
+#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
+#define F_RXSTATUSENABLE    V_RXSTATUSENABLE(1U)
+
+#define S_TXDROPENABLE    1
+#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE)
+#define F_TXDROPENABLE    V_TXDROPENABLE(1U)
+
+#define S_RXENDIANMODE    2
+#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE)
+#define F_RXENDIANMODE    V_RXENDIANMODE(1U)
+
+#define S_TXENDIANMODE    3
+#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE)
+#define F_TXENDIANMODE    V_TXENDIANMODE(1U)
+
+#define S_INTEL1010MODE    4
+#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
+#define F_INTEL1010MODE    V_INTEL1010MODE(1U)
+
+#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
+#define A_ESPI_TRAIN 0x8ac
+
+#define S_MAXTRAINALPHA    0
+#define M_MAXTRAINALPHA    0xffff
+#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA)
+#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA)
+
+#define S_MAXTRAINDATA    16
+#define M_MAXTRAINDATA    0xffff
+#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA)
+#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA)
+
+#define A_RAM_STATUS 0x8b0
+
+#define S_RXFIFOPARITYERROR    0
+#define M_RXFIFOPARITYERROR    0x3ff
+#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR)
+#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR)
+
+#define S_TXFIFOPARITYERROR    10
+#define M_TXFIFOPARITYERROR    0x3ff
+#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR)
+#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR)
+
+#define S_RXFIFOOVERFLOW    20
+#define M_RXFIFOOVERFLOW    0x3ff
+#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW)
+#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW)
+
+#define A_TX_DROP_COUNT0 0x8b4
+
+#define S_TXPORT0DROPCNT    0
+#define M_TXPORT0DROPCNT    0xffff
+#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT)
+#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT)
+
+#define S_TXPORT1DROPCNT    16
+#define M_TXPORT1DROPCNT    0xffff
+#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT)
+#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT)
+
+#define A_TX_DROP_COUNT1 0x8b8
+
+#define S_TXPORT2DROPCNT    0
+#define M_TXPORT2DROPCNT    0xffff
+#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT)
+#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT)
+
+#define S_TXPORT3DROPCNT    16
+#define M_TXPORT3DROPCNT    0xffff
+#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT)
+#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT)
+
+#define A_RX_DROP_COUNT0 0x8bc
+
+#define S_RXPORT0DROPCNT    0
+#define M_RXPORT0DROPCNT    0xffff
+#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT)
+#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT)
+
+#define S_RXPORT1DROPCNT    16
+#define M_RXPORT1DROPCNT    0xffff
+#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT)
+#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT)
+
+#define A_RX_DROP_COUNT1 0x8c0
+
+#define S_RXPORT2DROPCNT    0
+#define M_RXPORT2DROPCNT    0xffff
+#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT)
+#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT)
+
+#define S_RXPORT3DROPCNT    16
+#define M_RXPORT3DROPCNT    0xffff
+#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT)
+#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT)
+
+#define A_DIP4_ERROR_COUNT 0x8c4
+
+#define S_DIP4ERRORCNT    0
+#define M_DIP4ERRORCNT    0xfff
+#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT)
+#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT)
+
+#define S_DIP4ERRORCNTSHADOW    12
+#define M_DIP4ERRORCNTSHADOW    0xfff
+#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW)
+#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW)
+
+#define S_TRICN_RX_TRAIN_ERR    24
+#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR)
+#define F_TRICN_RX_TRAIN_ERR    V_TRICN_RX_TRAIN_ERR(1U)
+
+#define S_TRICN_RX_TRAINING    25
+#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING)
+#define F_TRICN_RX_TRAINING    V_TRICN_RX_TRAINING(1U)
+
+#define S_TRICN_RX_TRAIN_OK    26
+#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK)
+#define F_TRICN_RX_TRAIN_OK    V_TRICN_RX_TRAIN_OK(1U)
+
+#define A_ESPI_INTR_STATUS 0x8c8
+
+#define S_DIP2PARITYERR    5
+#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
+#define F_DIP2PARITYERR    V_DIP2PARITYERR(1U)
+
+#define A_ESPI_INTR_ENABLE 0x8cc
+#define A_RX_DROP_THRESHOLD 0x8d0
+#define A_ESPI_RX_RESET 0x8ec
+
+#define S_ESPI_RX_LNK_RST    0
+#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST)
+#define F_ESPI_RX_LNK_RST    V_ESPI_RX_LNK_RST(1U)
+
+#define S_ESPI_RX_CORE_RST    1
+#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST)
+#define F_ESPI_RX_CORE_RST    V_ESPI_RX_CORE_RST(1U)
+
+#define S_RX_CLK_STATUS    2
+#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS)
+#define F_RX_CLK_STATUS    V_RX_CLK_STATUS(1U)
+
+#define A_ESPI_MISC_CONTROL 0x8f0
+
+#define S_OUT_OF_SYNC_COUNT    0
+#define M_OUT_OF_SYNC_COUNT    0xf
+#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_COUNT_MODE_ENABLE    4
+#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE)
+#define F_DIP2_COUNT_MODE_ENABLE    V_DIP2_COUNT_MODE_ENABLE(1U)
+
+#define S_DIP2_PARITY_ERR_THRES    5
+#define M_DIP2_PARITY_ERR_THRES    0xf
+#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES)
+
+#define S_DIP4_THRES    9
+#define M_DIP4_THRES    0xfff
+#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES)
+
+#define S_DIP4_THRES_ENABLE    21
+#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE)
+#define F_DIP4_THRES_ENABLE    V_DIP4_THRES_ENABLE(1U)
+
+#define S_FORCE_DISABLE_STATUS    22
+#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS)
+#define F_FORCE_DISABLE_STATUS    V_FORCE_DISABLE_STATUS(1U)
+
+#define S_DYNAMIC_DESKEW    23
+#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW)
+#define F_DYNAMIC_DESKEW    V_DYNAMIC_DESKEW(1U)
+
+#define S_MONITORED_PORT_NUM    25
+#define M_MONITORED_PORT_NUM    0x3
+#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM)
+
+#define S_MONITORED_DIRECTION    27
+#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
+#define F_MONITORED_DIRECTION    V_MONITORED_DIRECTION(1U)
+
+#define S_MONITORED_INTERFACE    28
+#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
+#define F_MONITORED_INTERFACE    V_MONITORED_INTERFACE(1U)
+
+#define A_ESPI_DIP2_ERR_COUNT 0x8f4
+
+#define S_DIP2_ERR_CNT    0
+#define M_DIP2_ERR_CNT    0xf
+#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT)
+#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT)
+
+#define A_ESPI_CMD_ADDR 0x8f8
+
+#define S_WRITE_DATA    0
+#define M_WRITE_DATA    0xff
+#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA)
+
+#define S_REGISTER_OFFSET    8
+#define M_REGISTER_OFFSET    0xf
+#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET)
+
+#define S_CHANNEL_ADDR    12
+#define M_CHANNEL_ADDR    0xf
+#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR)
+
+#define S_MODULE_ADDR    16
+#define M_MODULE_ADDR    0x3
+#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR)
+
+#define S_BUNDLE_ADDR    20
+#define M_BUNDLE_ADDR    0x3
+#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR)
+
+#define S_SPI4_COMMAND    24
+#define M_SPI4_COMMAND    0xff
+#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND)
+
+#define A_ESPI_GOSTAT 0x8fc
+
+#define S_READ_DATA    0
+#define M_READ_DATA    0xff
+#define V_READ_DATA(x) ((x) << S_READ_DATA)
+#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA)
+
+#define S_ESPI_CMD_BUSY    8
+#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
+#define F_ESPI_CMD_BUSY    V_ESPI_CMD_BUSY(1U)
+
+#define S_ERROR_ACK    9
+#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK)
+#define F_ERROR_ACK    V_ERROR_ACK(1U)
+
+#define S_UNMAPPED_ERR    10
+#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR)
+#define F_UNMAPPED_ERR    V_UNMAPPED_ERR(1U)
+
+#define S_TRANSACTION_TIMER    16
+#define M_TRANSACTION_TIMER    0xff
+#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER)
+#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER)
+
+
+/* ULP registers */
+#define A_ULP_ULIMIT 0x980
+#define A_ULP_TAGMASK 0x984
+#define A_ULP_HREG_INDEX 0x988
+#define A_ULP_HREG_DATA 0x98c
+#define A_ULP_INT_ENABLE 0x990
+#define A_ULP_INT_CAUSE 0x994
+
+#define S_HREG_PAR_ERR    0
+#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR)
+#define F_HREG_PAR_ERR    V_HREG_PAR_ERR(1U)
+
+#define S_EGRS_DATA_PAR_ERR    1
+#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR)
+#define F_EGRS_DATA_PAR_ERR    V_EGRS_DATA_PAR_ERR(1U)
+
+#define S_INGRS_DATA_PAR_ERR    2
+#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR)
+#define F_INGRS_DATA_PAR_ERR    V_INGRS_DATA_PAR_ERR(1U)
+
+#define S_PM_INTR    3
+#define V_PM_INTR(x) ((x) << S_PM_INTR)
+#define F_PM_INTR    V_PM_INTR(1U)
+
+#define S_PM_E2C_SYNC_ERR    4
+#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR)
+#define F_PM_E2C_SYNC_ERR    V_PM_E2C_SYNC_ERR(1U)
+
+#define S_PM_C2E_SYNC_ERR    5
+#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR)
+#define F_PM_C2E_SYNC_ERR    V_PM_C2E_SYNC_ERR(1U)
+
+#define S_PM_E2C_EMPTY_ERR    6
+#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR)
+#define F_PM_E2C_EMPTY_ERR    V_PM_E2C_EMPTY_ERR(1U)
+
+#define S_PM_C2E_EMPTY_ERR    7
+#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR)
+#define F_PM_C2E_EMPTY_ERR    V_PM_C2E_EMPTY_ERR(1U)
+
+#define S_PM_PAR_ERR    8
+#define M_PM_PAR_ERR    0xffff
+#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR)
+#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR)
+
+#define S_PM_E2C_WRT_FULL    24
+#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL)
+#define F_PM_E2C_WRT_FULL    V_PM_E2C_WRT_FULL(1U)
+
+#define S_PM_C2E_WRT_FULL    25
+#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL)
+#define F_PM_C2E_WRT_FULL    V_PM_C2E_WRT_FULL(1U)
+
+#define A_ULP_PIO_CTRL 0x998
+
+/* PL registers */
+#define A_PL_ENABLE 0xa00
+
+#define S_PL_INTR_SGE_ERR    0
+#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
+#define F_PL_INTR_SGE_ERR    V_PL_INTR_SGE_ERR(1U)
+
+#define S_PL_INTR_SGE_DATA    1
+#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
+#define F_PL_INTR_SGE_DATA    V_PL_INTR_SGE_DATA(1U)
+
+#define S_PL_INTR_MC3    2
+#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3)
+#define F_PL_INTR_MC3    V_PL_INTR_MC3(1U)
+
+#define S_PL_INTR_MC4    3
+#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4)
+#define F_PL_INTR_MC4    V_PL_INTR_MC4(1U)
+
+#define S_PL_INTR_MC5    4
+#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5)
+#define F_PL_INTR_MC5    V_PL_INTR_MC5(1U)
+
+#define S_PL_INTR_RAT    5
+#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT)
+#define F_PL_INTR_RAT    V_PL_INTR_RAT(1U)
+
+#define S_PL_INTR_TP    6
+#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
+#define F_PL_INTR_TP    V_PL_INTR_TP(1U)
+
+#define S_PL_INTR_ULP    7
+#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP)
+#define F_PL_INTR_ULP    V_PL_INTR_ULP(1U)
+
+#define S_PL_INTR_ESPI    8
+#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
+#define F_PL_INTR_ESPI    V_PL_INTR_ESPI(1U)
+
+#define S_PL_INTR_CSPI    9
+#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI)
+#define F_PL_INTR_CSPI    V_PL_INTR_CSPI(1U)
+
+#define S_PL_INTR_PCIX    10
+#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
+#define F_PL_INTR_PCIX    V_PL_INTR_PCIX(1U)
+
+#define S_PL_INTR_EXT    11
+#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
+#define F_PL_INTR_EXT    V_PL_INTR_EXT(1U)
+
+#define A_PL_CAUSE 0xa04
+
+/* MC5 registers */
+#define A_MC5_CONFIG 0xc04
+
+#define S_MODE    0
+#define V_MODE(x) ((x) << S_MODE)
+#define F_MODE    V_MODE(1U)
+
+#define S_TCAM_RESET    1
+#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
+#define F_TCAM_RESET    V_TCAM_RESET(1U)
+
+#define S_TCAM_READY    2
+#define V_TCAM_READY(x) ((x) << S_TCAM_READY)
+#define F_TCAM_READY    V_TCAM_READY(1U)
+
+#define S_DBGI_ENABLE    4
+#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE)
+#define F_DBGI_ENABLE    V_DBGI_ENABLE(1U)
+
+#define S_M_BUS_ENABLE    5
+#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
+#define F_M_BUS_ENABLE    V_M_BUS_ENABLE(1U)
+
+#define S_PARITY_ENABLE    6
+#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE)
+#define F_PARITY_ENABLE    V_PARITY_ENABLE(1U)
+
+#define S_SYN_ISSUE_MODE    7
+#define M_SYN_ISSUE_MODE    0x3
+#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE)
+#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE)
+
+#define S_BUILD    16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD    V_BUILD(1U)
+
+#define S_COMPRESSION_ENABLE    17
+#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE)
+#define F_COMPRESSION_ENABLE    V_COMPRESSION_ENABLE(1U)
+
+#define S_NUM_LIP    18
+#define M_NUM_LIP    0x3f
+#define V_NUM_LIP(x) ((x) << S_NUM_LIP)
+#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP)
+
+#define S_TCAM_PART_CNT    24
+#define M_TCAM_PART_CNT    0x3
+#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT)
+#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT)
+
+#define S_TCAM_PART_TYPE    26
+#define M_TCAM_PART_TYPE    0x3
+#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE)
+#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE)
+
+#define S_TCAM_PART_SIZE    28
+#define M_TCAM_PART_SIZE    0x3
+#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE)
+#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE)
+
+#define S_TCAM_PART_TYPE_HI    30
+#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI)
+#define F_TCAM_PART_TYPE_HI    V_TCAM_PART_TYPE_HI(1U)
+
+#define A_MC5_SIZE 0xc08
+
+#define S_SIZE    0
+#define M_SIZE    0x3fffff
+#define V_SIZE(x) ((x) << S_SIZE)
+#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
+
+#define A_MC5_ROUTING_TABLE_INDEX 0xc0c
+
+#define S_START_OF_ROUTING_TABLE    0
+#define M_START_OF_ROUTING_TABLE    0x3fffff
+#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE)
+#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE)
+
+#define A_MC5_SERVER_INDEX 0xc14
+
+#define S_START_OF_SERVER_INDEX    0
+#define M_START_OF_SERVER_INDEX    0x3fffff
+#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX)
+#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX)
+
+#define A_MC5_LIP_RAM_ADDR 0xc18
+
+#define S_LOCAL_IP_RAM_ADDR    0
+#define M_LOCAL_IP_RAM_ADDR    0x3f
+#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR)
+#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR)
+
+#define S_RAM_WRITE_ENABLE    8
+#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE)
+#define F_RAM_WRITE_ENABLE    V_RAM_WRITE_ENABLE(1U)
+
+#define A_MC5_LIP_RAM_DATA 0xc1c
+#define A_MC5_RSP_LATENCY 0xc20
+
+#define S_SEARCH_RESPONSE_LATENCY    0
+#define M_SEARCH_RESPONSE_LATENCY    0x1f
+#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY)
+#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY)
+
+#define S_LEARN_RESPONSE_LATENCY    8
+#define M_LEARN_RESPONSE_LATENCY    0x1f
+#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY)
+#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY)
+
+#define A_MC5_PARITY_LATENCY 0xc24
+
+#define S_SRCHLAT    0
+#define M_SRCHLAT    0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT)
+
+#define S_PARLAT    8
+#define M_PARLAT    0x1f
+#define V_PARLAT(x) ((x) << S_PARLAT)
+#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT)
+
+#define A_MC5_WR_LRN_VERIFY 0xc28
+
+#define S_POVEREN    0
+#define V_POVEREN(x) ((x) << S_POVEREN)
+#define F_POVEREN    V_POVEREN(1U)
+
+#define S_LRNVEREN    1
+#define V_LRNVEREN(x) ((x) << S_LRNVEREN)
+#define F_LRNVEREN    V_LRNVEREN(1U)
+
+#define S_VWVEREN    2
+#define V_VWVEREN(x) ((x) << S_VWVEREN)
+#define F_VWVEREN    V_VWVEREN(1U)
+
+#define A_MC5_PART_ID_INDEX 0xc2c
+
+#define S_IDINDEX    0
+#define M_IDINDEX    0xf
+#define V_IDINDEX(x) ((x) << S_IDINDEX)
+#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX)
+
+#define A_MC5_RESET_MAX 0xc30
+
+#define S_RSTMAX    0
+#define M_RSTMAX    0x1ff
+#define V_RSTMAX(x) ((x) << S_RSTMAX)
+#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX)
+
+#define A_MC5_INT_ENABLE 0xc40
+
+#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR    0
+#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR    V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR    1
+#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR    V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_RT_REGION_ERR    2
+#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR)
+#define F_MC5_INT_HIT_IN_RT_REGION_ERR    V_MC5_INT_HIT_IN_RT_REGION_ERR(1U)
+
+#define S_MC5_INT_MISS_ERR    3
+#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR)
+#define F_MC5_INT_MISS_ERR    V_MC5_INT_MISS_ERR(1U)
+
+#define S_MC5_INT_LIP0_ERR    4
+#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR)
+#define F_MC5_INT_LIP0_ERR    V_MC5_INT_LIP0_ERR(1U)
+
+#define S_MC5_INT_LIP_MISS_ERR    5
+#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR)
+#define F_MC5_INT_LIP_MISS_ERR    V_MC5_INT_LIP_MISS_ERR(1U)
+
+#define S_MC5_INT_PARITY_ERR    6
+#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR)
+#define F_MC5_INT_PARITY_ERR    V_MC5_INT_PARITY_ERR(1U)
+
+#define S_MC5_INT_ACTIVE_REGION_FULL    7
+#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL)
+#define F_MC5_INT_ACTIVE_REGION_FULL    V_MC5_INT_ACTIVE_REGION_FULL(1U)
+
+#define S_MC5_INT_NFA_SRCH_ERR    8
+#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR)
+#define F_MC5_INT_NFA_SRCH_ERR    V_MC5_INT_NFA_SRCH_ERR(1U)
+
+#define S_MC5_INT_SYN_COOKIE    9
+#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE)
+#define F_MC5_INT_SYN_COOKIE    V_MC5_INT_SYN_COOKIE(1U)
+
+#define S_MC5_INT_SYN_COOKIE_BAD    10
+#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD)
+#define F_MC5_INT_SYN_COOKIE_BAD    V_MC5_INT_SYN_COOKIE_BAD(1U)
+
+#define S_MC5_INT_SYN_COOKIE_OFF    11
+#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF)
+#define F_MC5_INT_SYN_COOKIE_OFF    V_MC5_INT_SYN_COOKIE_OFF(1U)
+
+#define S_MC5_INT_UNKNOWN_CMD    15
+#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD)
+#define F_MC5_INT_UNKNOWN_CMD    V_MC5_INT_UNKNOWN_CMD(1U)
+
+#define S_MC5_INT_REQUESTQ_PARITY_ERR    16
+#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR)
+#define F_MC5_INT_REQUESTQ_PARITY_ERR    V_MC5_INT_REQUESTQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DISPATCHQ_PARITY_ERR    17
+#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR)
+#define F_MC5_INT_DISPATCHQ_PARITY_ERR    V_MC5_INT_DISPATCHQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DEL_ACT_EMPTY    18
+#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY)
+#define F_MC5_INT_DEL_ACT_EMPTY    V_MC5_INT_DEL_ACT_EMPTY(1U)
+
+#define A_MC5_INT_CAUSE 0xc44
+#define A_MC5_INT_TID 0xc48
+#define A_MC5_INT_PTID 0xc4c
+#define A_MC5_DBGI_CONFIG 0xc74
+#define A_MC5_DBGI_REQ_CMD 0xc78
+
+#define S_CMDMODE    0
+#define M_CMDMODE    0x7
+#define V_CMDMODE(x) ((x) << S_CMDMODE)
+#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE)
+
+#define S_SADRSEL    4
+#define V_SADRSEL(x) ((x) << S_SADRSEL)
+#define F_SADRSEL    V_SADRSEL(1U)
+
+#define S_WRITE_BURST_SIZE    22
+#define M_WRITE_BURST_SIZE    0x3ff
+#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE)
+#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE)
+
+#define A_MC5_DBGI_REQ_ADDR0 0xc7c
+#define A_MC5_DBGI_REQ_ADDR1 0xc80
+#define A_MC5_DBGI_REQ_ADDR2 0xc84
+#define A_MC5_DBGI_REQ_DATA0 0xc88
+#define A_MC5_DBGI_REQ_DATA1 0xc8c
+#define A_MC5_DBGI_REQ_DATA2 0xc90
+#define A_MC5_DBGI_REQ_DATA3 0xc94
+#define A_MC5_DBGI_REQ_DATA4 0xc98
+#define A_MC5_DBGI_REQ_MASK0 0xc9c
+#define A_MC5_DBGI_REQ_MASK1 0xca0
+#define A_MC5_DBGI_REQ_MASK2 0xca4
+#define A_MC5_DBGI_REQ_MASK3 0xca8
+#define A_MC5_DBGI_REQ_MASK4 0xcac
+#define A_MC5_DBGI_RSP_STATUS 0xcb0
+
+#define S_DBGI_RSP_VALID    0
+#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID)
+#define F_DBGI_RSP_VALID    V_DBGI_RSP_VALID(1U)
+
+#define S_DBGI_RSP_HIT    1
+#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT)
+#define F_DBGI_RSP_HIT    V_DBGI_RSP_HIT(1U)
+
+#define S_DBGI_RSP_ERR    2
+#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR)
+#define F_DBGI_RSP_ERR    V_DBGI_RSP_ERR(1U)
+
+#define S_DBGI_RSP_ERR_REASON    8
+#define M_DBGI_RSP_ERR_REASON    0x7
+#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON)
+#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON)
+
+#define A_MC5_DBGI_RSP_DATA0 0xcb4
+#define A_MC5_DBGI_RSP_DATA1 0xcb8
+#define A_MC5_DBGI_RSP_DATA2 0xcbc
+#define A_MC5_DBGI_RSP_DATA3 0xcc0
+#define A_MC5_DBGI_RSP_DATA4 0xcc4
+#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8
+#define A_MC5_POPEN_DATA_WR_CMD 0xccc
+#define A_MC5_POPEN_MASK_WR_CMD 0xcd0
+#define A_MC5_AOPEN_SRCH_CMD 0xcd4
+#define A_MC5_AOPEN_LRN_CMD 0xcd8
+#define A_MC5_SYN_SRCH_CMD 0xcdc
+#define A_MC5_SYN_LRN_CMD 0xce0
+#define A_MC5_ACK_SRCH_CMD 0xce4
+#define A_MC5_ACK_LRN_CMD 0xce8
+#define A_MC5_ILOOKUP_CMD 0xcec
+#define A_MC5_ELOOKUP_CMD 0xcf0
+#define A_MC5_DATA_WRITE_CMD 0xcf4
+#define A_MC5_DATA_READ_CMD 0xcf8
+#define A_MC5_MASK_WRITE_CMD 0xcfc
+
+/* PCICFG registers */
+#define A_PCICFG_PM_CSR 0x44
+#define A_PCICFG_VPD_ADDR 0x4a
+
+#define S_VPD_ADDR    0
+#define M_VPD_ADDR    0x7fff
+#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR)
+#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR)
+
+#define S_VPD_OP_FLAG    15
+#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
+#define F_VPD_OP_FLAG    V_VPD_OP_FLAG(1U)
+
+#define A_PCICFG_VPD_DATA 0x4c
+#define A_PCICFG_PCIX_CMD 0x60
+#define A_PCICFG_INTR_ENABLE 0xf4
+
+#define S_MASTER_PARITY_ERR    0
+#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR)
+#define F_MASTER_PARITY_ERR    V_MASTER_PARITY_ERR(1U)
+
+#define S_SIG_TARGET_ABORT    1
+#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT)
+#define F_SIG_TARGET_ABORT    V_SIG_TARGET_ABORT(1U)
+
+#define S_RCV_TARGET_ABORT    2
+#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT)
+#define F_RCV_TARGET_ABORT    V_RCV_TARGET_ABORT(1U)
+
+#define S_RCV_MASTER_ABORT    3
+#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT)
+#define F_RCV_MASTER_ABORT    V_RCV_MASTER_ABORT(1U)
+
+#define S_SIG_SYS_ERR    4
+#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR)
+#define F_SIG_SYS_ERR    V_SIG_SYS_ERR(1U)
+
+#define S_DET_PARITY_ERR    5
+#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR)
+#define F_DET_PARITY_ERR    V_DET_PARITY_ERR(1U)
+
+#define S_PIO_PARITY_ERR    6
+#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR)
+#define F_PIO_PARITY_ERR    V_PIO_PARITY_ERR(1U)
+
+#define S_WF_PARITY_ERR    7
+#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR)
+#define F_WF_PARITY_ERR    V_WF_PARITY_ERR(1U)
+
+#define S_RF_PARITY_ERR    8
+#define M_RF_PARITY_ERR    0x3
+#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR)
+#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR)
+
+#define S_CF_PARITY_ERR    10
+#define M_CF_PARITY_ERR    0x3
+#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR)
+#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR)
+
+#define A_PCICFG_INTR_CAUSE 0xf8
+#define A_PCICFG_MODE 0xfc
+
+#define S_PCI_MODE_64BIT    0
+#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
+#define F_PCI_MODE_64BIT    V_PCI_MODE_64BIT(1U)
+
+#define S_PCI_MODE_66MHZ    1
+#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ)
+#define F_PCI_MODE_66MHZ    V_PCI_MODE_66MHZ(1U)
+
+#define S_PCI_MODE_PCIX_INITPAT    2
+#define M_PCI_MODE_PCIX_INITPAT    0x7
+#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT)
+#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT)
+
+#define S_PCI_MODE_PCIX    5
+#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
+#define F_PCI_MODE_PCIX    V_PCI_MODE_PCIX(1U)
+
+#define S_PCI_MODE_CLK    6
+#define M_PCI_MODE_CLK    0x3
+#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK)
+#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
+
+#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
new file mode 100644
index 0000000..526ea74
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -0,0 +1,2124 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.c                                                               *
+ * $Revision: 1.26 $                                                         *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  DMA engine.                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/ktime.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+
+#include "cpl5_cmd.h"
+#include "sge.h"
+#include "regs.h"
+#include "espi.h"
+
+/* This belongs in if_ether.h */
+#define ETH_P_CPL5 0xf
+
+#define SGE_CMDQ_N		2
+#define SGE_FREELQ_N		2
+#define SGE_CMDQ0_E_N		1024
+#define SGE_CMDQ1_E_N		128
+#define SGE_FREEL_SIZE		4096
+#define SGE_JUMBO_FREEL_SIZE	512
+#define SGE_FREEL_REFILL_THRESH	16
+#define SGE_RESPQ_E_N		1024
+#define SGE_INTRTIMER_NRES	1000
+#define SGE_RX_SM_BUF_SIZE	1536
+#define SGE_TX_DESC_MAX_PLEN	16384
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer.  This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+#define M_CMD_LEN       0x7fffffff
+#define V_CMD_LEN(v)    (v)
+#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v)   ((v) << 31)
+#define V_CMD_GEN2(v)   (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP       (1 << 2)
+#define V_CMD_EOP(v)    ((v) << 3)
+
+/*
+ * Command queue, receive buffer list, and response queue descriptors.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct cmdQ_e {
+	u32 addr_lo;
+	u32 len_gen;
+	u32 flags;
+	u32 addr_hi;
+};
+
+struct freelQ_e {
+	u32 addr_lo;
+	u32 len_gen;
+	u32 gen2;
+	u32 addr_hi;
+};
+
+struct respQ_e {
+	u32 Qsleeping		: 4;
+	u32 Cmdq1CreditReturn	: 5;
+	u32 Cmdq1DmaComplete	: 5;
+	u32 Cmdq0CreditReturn	: 5;
+	u32 Cmdq0DmaComplete	: 5;
+	u32 FreelistQid		: 2;
+	u32 CreditValid		: 1;
+	u32 DataValid		: 1;
+	u32 Offload		: 1;
+	u32 Eop			: 1;
+	u32 Sop			: 1;
+	u32 GenerationBit	: 1;
+	u32 BufferLength;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct cmdQ_e {
+	u32 len_gen;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 flags;
+};
+
+struct freelQ_e {
+	u32 len_gen;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 gen2;
+};
+
+struct respQ_e {
+	u32 BufferLength;
+	u32 GenerationBit	: 1;
+	u32 Sop			: 1;
+	u32 Eop			: 1;
+	u32 Offload		: 1;
+	u32 DataValid		: 1;
+	u32 CreditValid		: 1;
+	u32 FreelistQid		: 2;
+	u32 Cmdq0DmaComplete	: 5;
+	u32 Cmdq0CreditReturn	: 5;
+	u32 Cmdq1DmaComplete	: 5;
+	u32 Cmdq1CreditReturn	: 5;
+	u32 Qsleeping		: 4;
+} ;
+#endif
+
+/*
+ * SW Context Command and Freelist Queue Descriptors
+ */
+struct cmdQ_ce {
+	struct sk_buff *skb;
+	DEFINE_DMA_UNMAP_ADDR(dma_addr);
+	DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+struct freelQ_ce {
+	struct sk_buff *skb;
+	DEFINE_DMA_UNMAP_ADDR(dma_addr);
+	DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/*
+ * SW command, freelist and response rings
+ */
+struct cmdQ {
+	unsigned long   status;         /* HW DMA fetch status */
+	unsigned int    in_use;         /* # of in-use command descriptors */
+	unsigned int	size;	        /* # of descriptors */
+	unsigned int    processed;      /* total # of descs HW has processed */
+	unsigned int    cleaned;        /* total # of descs SW has reclaimed */
+	unsigned int    stop_thres;     /* SW TX queue suspend threshold */
+	u16		pidx;           /* producer index (SW) */
+	u16		cidx;           /* consumer index (HW) */
+	u8		genbit;         /* current generation (=valid) bit */
+	u8              sop;            /* is next entry start of packet? */
+	struct cmdQ_e  *entries;        /* HW command descriptor Q */
+	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
+	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
+};
+
+struct freelQ {
+	unsigned int	credits;        /* # of available RX buffers */
+	unsigned int	size;	        /* free list capacity */
+	u16		pidx;           /* producer index (SW) */
+	u16		cidx;           /* consumer index (HW) */
+	u16		rx_buffer_size; /* Buffer size on this free list */
+	u16             dma_offset;     /* DMA offset to align IP headers */
+	u16             recycleq_idx;   /* skb recycle q to use */
+	u8		genbit;	        /* current generation (=valid) bit */
+	struct freelQ_e	*entries;       /* HW freelist descriptor Q */
+	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW freelist descriptor Q */
+};
+
+struct respQ {
+	unsigned int	credits;        /* credits to be returned to SGE */
+	unsigned int	size;	        /* # of response Q descriptors */
+	u16		cidx;	        /* consumer index (SW) */
+	u8		genbit;	        /* current generation(=valid) bit */
+	struct respQ_e *entries;        /* HW response descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+	CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
+	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
+};
+
+/* T204 TX SW scheduler */
+
+/* Per T204 TX port */
+struct sched_port {
+	unsigned int	avail;		/* available bits - quota */
+	unsigned int	drain_bits_per_1024ns; /* drain rate */
+	unsigned int	speed;		/* drain rate, mbps */
+	unsigned int	mtu;		/* mtu size */
+	struct sk_buff_head skbq;	/* pending skbs */
+};
+
+/* Per T204 device */
+struct sched {
+	ktime_t         last_updated;   /* last time quotas were computed */
+	unsigned int	max_avail;	/* max bits to be sent to any port */
+	unsigned int	port;		/* port index (round robin ports) */
+	unsigned int	num;		/* num skbs in per port queues */
+	struct sched_port p[MAX_NPORTS];
+	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+};
+static void restart_sched(unsigned long);
+
+
+/*
+ * Main SGE data structure
+ *
+ * Interrupts are handled by a single CPU and it is likely that on a MP system
+ * the application is migrated to another CPU. In that scenario, we try to
+ * separate the RX(in irq context) and TX state in order to decrease memory
+ * contention.
+ */
+struct sge {
+	struct adapter *adapter;	/* adapter backpointer */
+	struct net_device *netdev;      /* netdevice backpointer */
+	struct freelQ	freelQ[SGE_FREELQ_N]; /* buffer free lists */
+	struct respQ	respQ;		/* response Q */
+	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
+	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
+	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
+	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */
+	unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
+	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+	struct timer_list espibug_timer;
+	unsigned long	espibug_timeout;
+	struct sk_buff	*espibug_skb[MAX_NPORTS];
+	u32		sge_control;	/* shadow value of sge control reg */
+	struct sge_intr_counts stats;
+	struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
+	struct sched	*tx_sched;
+	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
+};
+
+static const u8 ch_mac_addr[ETH_ALEN] = {
+	0x0, 0x7, 0x43, 0x0, 0x0, 0x0
+};
+
+/*
+ * stop tasklet and free all pending skb's
+ */
+static void tx_sched_stop(struct sge *sge)
+{
+	struct sched *s = sge->tx_sched;
+	int i;
+
+	tasklet_kill(&s->sched_tsk);
+
+	for (i = 0; i < MAX_NPORTS; i++)
+		__skb_queue_purge(&s->p[s->port].skbq);
+}
+
+/*
+ * t1_sched_update_parms() is called when the MTU or link speed changes. It
+ * re-computes scheduler parameters to scope with the change.
+ */
+unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
+				   unsigned int mtu, unsigned int speed)
+{
+	struct sched *s = sge->tx_sched;
+	struct sched_port *p = &s->p[port];
+	unsigned int max_avail_segs;
+
+	pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
+	if (speed)
+		p->speed = speed;
+	if (mtu)
+		p->mtu = mtu;
+
+	if (speed || mtu) {
+		unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
+		do_div(drain, (p->mtu + 50) * 1000);
+		p->drain_bits_per_1024ns = (unsigned int) drain;
+
+		if (p->speed < 1000)
+			p->drain_bits_per_1024ns =
+				90 * p->drain_bits_per_1024ns / 100;
+	}
+
+	if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
+		p->drain_bits_per_1024ns -= 16;
+		s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
+		max_avail_segs = max(1U, 4096 / (p->mtu - 40));
+	} else {
+		s->max_avail = 16384;
+		max_avail_segs = max(1U, 9000 / (p->mtu - 40));
+	}
+
+	pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
+		 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
+		 p->speed, s->max_avail, max_avail_segs,
+		 p->drain_bits_per_1024ns);
+
+	return max_avail_segs * (p->mtu - 40);
+}
+
+#if 0
+
+/*
+ * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
+ * data that can be pushed per port.
+ */
+void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
+{
+	struct sched *s = sge->tx_sched;
+	unsigned int i;
+
+	s->max_avail = val;
+	for (i = 0; i < MAX_NPORTS; i++)
+		t1_sched_update_parms(sge, i, 0, 0);
+}
+
+/*
+ * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
+ * is draining.
+ */
+void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
+					 unsigned int val)
+{
+	struct sched *s = sge->tx_sched;
+	struct sched_port *p = &s->p[port];
+	p->drain_bits_per_1024ns = val * 1024 / 1000;
+	t1_sched_update_parms(sge, port, 0, 0);
+}
+
+#endif  /*  0  */
+
+/*
+ * tx_sched_init() allocates resources and does basic initialization.
+ */
+static int tx_sched_init(struct sge *sge)
+{
+	struct sched *s;
+	int i;
+
+	s = kzalloc(sizeof (struct sched), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	pr_debug("tx_sched_init\n");
+	tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+	sge->tx_sched = s;
+
+	for (i = 0; i < MAX_NPORTS; i++) {
+		skb_queue_head_init(&s->p[i].skbq);
+		t1_sched_update_parms(sge, i, 1500, 1000);
+	}
+
+	return 0;
+}
+
+/*
+ * sched_update_avail() computes the delta since the last time it was called
+ * and updates the per port quota (number of bits that can be sent to the any
+ * port).
+ */
+static inline int sched_update_avail(struct sge *sge)
+{
+	struct sched *s = sge->tx_sched;
+	ktime_t now = ktime_get();
+	unsigned int i;
+	long long delta_time_ns;
+
+	delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
+
+	pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
+	if (delta_time_ns < 15000)
+		return 0;
+
+	for (i = 0; i < MAX_NPORTS; i++) {
+		struct sched_port *p = &s->p[i];
+		unsigned int delta_avail;
+
+		delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
+		p->avail = min(p->avail + delta_avail, s->max_avail);
+	}
+
+	s->last_updated = now;
+
+	return 1;
+}
+
+/*
+ * sched_skb() is called from two different places. In the tx path, any
+ * packet generating load on an output port will call sched_skb()
+ * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
+ * context (skb == NULL).
+ * The scheduler only returns a skb (which will then be sent) if the
+ * length of the skb is <= the current quota of the output port.
+ */
+static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
+				unsigned int credits)
+{
+	struct sched *s = sge->tx_sched;
+	struct sk_buff_head *skbq;
+	unsigned int i, len, update = 1;
+
+	pr_debug("sched_skb %p\n", skb);
+	if (!skb) {
+		if (!s->num)
+			return NULL;
+	} else {
+		skbq = &s->p[skb->dev->if_port].skbq;
+		__skb_queue_tail(skbq, skb);
+		s->num++;
+		skb = NULL;
+	}
+
+	if (credits < MAX_SKB_FRAGS + 1)
+		goto out;
+
+again:
+	for (i = 0; i < MAX_NPORTS; i++) {
+		s->port = (s->port + 1) & (MAX_NPORTS - 1);
+		skbq = &s->p[s->port].skbq;
+
+		skb = skb_peek(skbq);
+
+		if (!skb)
+			continue;
+
+		len = skb->len;
+		if (len <= s->p[s->port].avail) {
+			s->p[s->port].avail -= len;
+			s->num--;
+			__skb_unlink(skb, skbq);
+			goto out;
+		}
+		skb = NULL;
+	}
+
+	if (update-- && sched_update_avail(sge))
+		goto again;
+
+out:
+	/* If there are more pending skbs, we use the hardware to schedule us
+	 * again.
+	 */
+	if (s->num && !skb) {
+		struct cmdQ *q = &sge->cmdQ[0];
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+		}
+	}
+	pr_debug("sched_skb ret %p\n", skb);
+
+	return skb;
+}
+
+/*
+ * PIO to indicate that memory mapped Q contains valid descriptor(s).
+ */
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
+{
+	wmb();
+	writel(val, adapter->regs + A_SG_DOORBELL);
+}
+
+/*
+ * Frees all RX buffers on the freelist Q. The caller must make sure that
+ * the SGE is turned off before calling this function.
+ */
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
+{
+	unsigned int cidx = q->cidx;
+
+	while (q->credits--) {
+		struct freelQ_ce *ce = &q->centries[cidx];
+
+		pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+				 dma_unmap_len(ce, dma_len),
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(ce->skb);
+		ce->skb = NULL;
+		if (++cidx == q->size)
+			cidx = 0;
+	}
+}
+
+/*
+ * Free RX free list and response queue resources.
+ */
+static void free_rx_resources(struct sge *sge)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	if (sge->respQ.entries) {
+		size = sizeof(struct respQ_e) * sge->respQ.size;
+		pci_free_consistent(pdev, size, sge->respQ.entries,
+				    sge->respQ.dma_addr);
+	}
+
+	for (i = 0; i < SGE_FREELQ_N; i++) {
+		struct freelQ *q = &sge->freelQ[i];
+
+		if (q->centries) {
+			free_freelQ_buffers(pdev, q);
+			kfree(q->centries);
+		}
+		if (q->entries) {
+			size = sizeof(struct freelQ_e) * q->size;
+			pci_free_consistent(pdev, size, q->entries,
+					    q->dma_addr);
+		}
+	}
+}
+
+/*
+ * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
+ * response queue.
+ */
+static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_FREELQ_N; i++) {
+		struct freelQ *q = &sge->freelQ[i];
+
+		q->genbit = 1;
+		q->size = p->freelQ_size[i];
+		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+		size = sizeof(struct freelQ_e) * q->size;
+		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+		if (!q->entries)
+			goto err_no_mem;
+
+		size = sizeof(struct freelQ_ce) * q->size;
+		q->centries = kzalloc(size, GFP_KERNEL);
+		if (!q->centries)
+			goto err_no_mem;
+	}
+
+	/*
+	 * Calculate the buffer sizes for the two free lists.  FL0 accommodates
+	 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
+	 * including all the sk_buff overhead.
+	 *
+	 * Note: For T2 FL0 and FL1 are reversed.
+	 */
+	sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
+		sizeof(struct cpl_rx_data) +
+		sge->freelQ[!sge->jumbo_fl].dma_offset;
+
+		size = (16 * 1024) -
+		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
+
+	/*
+	 * Setup which skb recycle Q should be used when recycling buffers from
+	 * each free list.
+	 */
+	sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+	sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
+	sge->respQ.genbit = 1;
+	sge->respQ.size = SGE_RESPQ_E_N;
+	sge->respQ.credits = 0;
+	size = sizeof(struct respQ_e) * sge->respQ.size;
+	sge->respQ.entries =
+		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+	if (!sge->respQ.entries)
+		goto err_no_mem;
+	return 0;
+
+err_no_mem:
+	free_rx_resources(sge);
+	return -ENOMEM;
+}
+
+/*
+ * Reclaims n TX descriptors and frees the buffers associated with them.
+ */
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
+{
+	struct cmdQ_ce *ce;
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int cidx = q->cidx;
+
+	q->in_use -= n;
+	ce = &q->centries[cidx];
+	while (n--) {
+		if (likely(dma_unmap_len(ce, dma_len))) {
+			pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+					 dma_unmap_len(ce, dma_len),
+					 PCI_DMA_TODEVICE);
+			if (q->sop)
+				q->sop = 0;
+		}
+		if (ce->skb) {
+			dev_kfree_skb_any(ce->skb);
+			q->sop = 1;
+		}
+		ce++;
+		if (++cidx == q->size) {
+			cidx = 0;
+			ce = q->centries;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/*
+ * Free TX resources.
+ *
+ * Assumes that SGE is stopped and all interrupts are disabled.
+ */
+static void free_tx_resources(struct sge *sge)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_CMDQ_N; i++) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		if (q->centries) {
+			if (q->in_use)
+				free_cmdQ_buffers(sge, q, q->in_use);
+			kfree(q->centries);
+		}
+		if (q->entries) {
+			size = sizeof(struct cmdQ_e) * q->size;
+			pci_free_consistent(pdev, size, q->entries,
+					    q->dma_addr);
+		}
+	}
+}
+
+/*
+ * Allocates basic TX resources, consisting of memory mapped command Qs.
+ */
+static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_CMDQ_N; i++) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		q->genbit = 1;
+		q->sop = 1;
+		q->size = p->cmdQ_size[i];
+		q->in_use = 0;
+		q->status = 0;
+		q->processed = q->cleaned = 0;
+		q->stop_thres = 0;
+		spin_lock_init(&q->lock);
+		size = sizeof(struct cmdQ_e) * q->size;
+		q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+		if (!q->entries)
+			goto err_no_mem;
+
+		size = sizeof(struct cmdQ_ce) * q->size;
+		q->centries = kzalloc(size, GFP_KERNEL);
+		if (!q->centries)
+			goto err_no_mem;
+	}
+
+	/*
+	 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+	 * only.  For queue 0 set the stop threshold so we can handle one more
+	 * packet from each port, plus reserve an additional 24 entries for
+	 * Ethernet packets only.  Queue 1 never suspends nor do we reserve
+	 * space for Ethernet packets.
+	 */
+	sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+		(MAX_SKB_FRAGS + 1);
+	return 0;
+
+err_no_mem:
+	free_tx_resources(sge);
+	return -ENOMEM;
+}
+
+static inline void setup_ring_params(struct adapter *adapter, u64 addr,
+				     u32 size, int base_reg_lo,
+				     int base_reg_hi, int size_reg)
+{
+	writel((u32)addr, adapter->regs + base_reg_lo);
+	writel(addr >> 32, adapter->regs + base_reg_hi);
+	writel(size, adapter->regs + size_reg);
+}
+
+/*
+ * Enable/disable VLAN acceleration.
+ */
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
+{
+	struct sge *sge = adapter->sge;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		sge->sge_control |= F_VLAN_XTRACT;
+	else
+		sge->sge_control &= ~F_VLAN_XTRACT;
+	if (adapter->open_device_map) {
+		writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+		readl(adapter->regs + A_SG_CONTROL);   /* flush */
+	}
+}
+
+/*
+ * Programs the various SGE registers. However, the engine is not yet enabled,
+ * but sge->sge_control is setup and ready to go.
+ */
+static void configure_sge(struct sge *sge, struct sge_params *p)
+{
+	struct adapter *ap = sge->adapter;
+
+	writel(0, ap->regs + A_SG_CONTROL);
+	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
+			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
+	setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
+			  A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
+	setup_ring_params(ap, sge->freelQ[0].dma_addr,
+			  sge->freelQ[0].size, A_SG_FL0BASELWR,
+			  A_SG_FL0BASEUPR, A_SG_FL0SIZE);
+	setup_ring_params(ap, sge->freelQ[1].dma_addr,
+			  sge->freelQ[1].size, A_SG_FL1BASELWR,
+			  A_SG_FL1BASEUPR, A_SG_FL1SIZE);
+
+	/* The threshold comparison uses <. */
+	writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
+
+	setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+			  A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+	writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
+
+	sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
+		F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
+		V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+		V_RX_PKT_OFFSET(sge->rx_pkt_pad);
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+	sge->sge_control |= F_ENABLE_BIG_ENDIAN;
+#endif
+
+	/* Initialize no-resource timer */
+	sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+	t1_sge_set_coalesce_params(sge, p);
+}
+
+/*
+ * Return the payload capacity of the jumbo free-list buffers.
+ */
+static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
+{
+	return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
+		sge->freelQ[sge->jumbo_fl].dma_offset -
+		sizeof(struct cpl_rx_data);
+}
+
+/*
+ * Frees all SGE related resources and the sge structure itself
+ */
+void t1_sge_destroy(struct sge *sge)
+{
+	int i;
+
+	for_each_port(sge->adapter, i)
+		free_percpu(sge->port_stats[i]);
+
+	kfree(sge->tx_sched);
+	free_tx_resources(sge);
+	free_rx_resources(sge);
+	kfree(sge);
+}
+
+/*
+ * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
+ * context Q) until the Q is full or alloc_skb fails.
+ *
+ * It is possible that the generation bits already match, indicating that the
+ * buffer is already valid and nothing needs to be done. This happens when we
+ * copied a received buffer into a new sk_buff during the interrupt processing.
+ *
+ * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
+ * we specify a RX_OFFSET in order to make sure that the IP header is 4B
+ * aligned.
+ */
+static void refill_free_list(struct sge *sge, struct freelQ *q)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	struct freelQ_ce *ce = &q->centries[q->pidx];
+	struct freelQ_e *e = &q->entries[q->pidx];
+	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
+
+	while (q->credits < q->size) {
+		struct sk_buff *skb;
+		dma_addr_t mapping;
+
+		skb = dev_alloc_skb(q->rx_buffer_size);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, q->dma_offset);
+		mapping = pci_map_single(pdev, skb->data, dma_len,
+					 PCI_DMA_FROMDEVICE);
+		skb_reserve(skb, sge->rx_pkt_pad);
+
+		ce->skb = skb;
+		dma_unmap_addr_set(ce, dma_addr, mapping);
+		dma_unmap_len_set(ce, dma_len, dma_len);
+		e->addr_lo = (u32)mapping;
+		e->addr_hi = (u64)mapping >> 32;
+		e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+		wmb();
+		e->gen2 = V_CMD_GEN2(q->genbit);
+
+		e++;
+		ce++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			q->genbit ^= 1;
+			ce = q->centries;
+			e = q->entries;
+		}
+		q->credits++;
+	}
+}
+
+/*
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
+ */
+static void freelQs_empty(struct sge *sge)
+{
+	struct adapter *adapter = sge->adapter;
+	u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
+	u32 irqholdoff_reg;
+
+	refill_free_list(sge, &sge->freelQ[0]);
+	refill_free_list(sge, &sge->freelQ[1]);
+
+	if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+	    sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
+		irq_reg |= F_FL_EXHAUSTED;
+		irqholdoff_reg = sge->fixed_intrtimer;
+	} else {
+		/* Clear the F_FL_EXHAUSTED interrupts for now */
+		irq_reg &= ~F_FL_EXHAUSTED;
+		irqholdoff_reg = sge->intrtimer_nres;
+	}
+	writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+	writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
+
+	/* We reenable the Qs to force a freelist GTS interrupt later */
+	doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+}
+
+#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
+#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
+			F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+
+/*
+ * Disable SGE Interrupts
+ */
+void t1_sge_intr_disable(struct sge *sge)
+{
+	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+	writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+	writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
+}
+
+/*
+ * Enable SGE interrupts.
+ */
+void t1_sge_intr_enable(struct sge *sge)
+{
+	u32 en = SGE_INT_ENABLE;
+	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+	if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
+		en &= ~F_PACKET_TOO_BIG;
+	writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+	writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+}
+
+/*
+ * Clear SGE interrupts.
+ */
+void t1_sge_intr_clear(struct sge *sge)
+{
+	writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+	writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
+}
+
+/*
+ * SGE 'Error' interrupt handler
+ */
+int t1_sge_intr_error_handler(struct sge *sge)
+{
+	struct adapter *adapter = sge->adapter;
+	u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+
+	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
+		cause &= ~F_PACKET_TOO_BIG;
+	if (cause & F_RESPQ_EXHAUSTED)
+		sge->stats.respQ_empty++;
+	if (cause & F_RESPQ_OVERFLOW) {
+		sge->stats.respQ_overflow++;
+		pr_alert("%s: SGE response queue overflow\n",
+			 adapter->name);
+	}
+	if (cause & F_FL_EXHAUSTED) {
+		sge->stats.freelistQ_empty++;
+		freelQs_empty(sge);
+	}
+	if (cause & F_PACKET_TOO_BIG) {
+		sge->stats.pkt_too_big++;
+		pr_alert("%s: SGE max packet size exceeded\n",
+			 adapter->name);
+	}
+	if (cause & F_PACKET_MISMATCH) {
+		sge->stats.pkt_mismatch++;
+		pr_alert("%s: SGE packet mismatch\n", adapter->name);
+	}
+	if (cause & SGE_INT_FATAL)
+		t1_fatal_err(adapter);
+
+	writel(cause, adapter->regs + A_SG_INT_CAUSE);
+	return 0;
+}
+
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
+{
+	return &sge->stats;
+}
+
+void t1_sge_get_port_stats(const struct sge *sge, int port,
+			   struct sge_port_stats *ss)
+{
+	int cpu;
+
+	memset(ss, 0, sizeof(*ss));
+	for_each_possible_cpu(cpu) {
+		struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+		ss->rx_cso_good += st->rx_cso_good;
+		ss->tx_cso += st->tx_cso;
+		ss->tx_tso += st->tx_tso;
+		ss->tx_need_hdrroom += st->tx_need_hdrroom;
+		ss->vlan_xtract += st->vlan_xtract;
+		ss->vlan_insert += st->vlan_insert;
+	}
+}
+
+/**
+ *	recycle_fl_buf - recycle a free list buffer
+ *	@fl: the free list
+ *	@idx: index of buffer to recycle
+ *
+ *	Recycles the specified buffer on the given free list by adding it at
+ *	the next available slot on the list.
+ */
+static void recycle_fl_buf(struct freelQ *fl, int idx)
+{
+	struct freelQ_e *from = &fl->entries[idx];
+	struct freelQ_e *to = &fl->entries[fl->pidx];
+
+	fl->centries[fl->pidx] = fl->centries[idx];
+	to->addr_lo = from->addr_lo;
+	to->addr_hi = from->addr_hi;
+	to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+	wmb();
+	to->gen2 = V_CMD_GEN2(fl->genbit);
+	fl->credits++;
+
+	if (++fl->pidx == fl->size) {
+		fl->pidx = 0;
+		fl->genbit ^= 1;
+	}
+}
+
+static int copybreak __read_mostly = 256;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+/**
+ *	get_packet - return the next ingress packet buffer
+ *	@adapter: the adapter that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the actual packet length, excluding any SGE padding
+ *
+ *	Get the next packet from a free list and complete setup of the
+ *	sk_buff.  If the packet is small we make a copy and recycle the
+ *	original buffer, otherwise we use the original buffer itself.  If a
+ *	positive drop threshold is supplied packets are dropped and their
+ *	buffers recycled if (a) the number of remaining buffers is under the
+ *	threshold and the packet is too big to copy, or (b) the packet should
+ *	be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct adapter *adapter,
+					 struct freelQ *fl, unsigned int len)
+{
+	const struct freelQ_ce *ce = &fl->centries[fl->cidx];
+	struct pci_dev *pdev = adapter->pdev;
+	struct sk_buff *skb;
+
+	if (len < copybreak) {
+		skb = napi_alloc_skb(&adapter->napi, len);
+		if (!skb)
+			goto use_orig_buf;
+
+		skb_put(skb, len);
+		pci_dma_sync_single_for_cpu(pdev,
+					    dma_unmap_addr(ce, dma_addr),
+					    dma_unmap_len(ce, dma_len),
+					    PCI_DMA_FROMDEVICE);
+		skb_copy_from_linear_data(ce->skb, skb->data, len);
+		pci_dma_sync_single_for_device(pdev,
+					       dma_unmap_addr(ce, dma_addr),
+					       dma_unmap_len(ce, dma_len),
+					       PCI_DMA_FROMDEVICE);
+		recycle_fl_buf(fl, fl->cidx);
+		return skb;
+	}
+
+use_orig_buf:
+	if (fl->credits < 2) {
+		recycle_fl_buf(fl, fl->cidx);
+		return NULL;
+	}
+
+	pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+			 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+	skb = ce->skb;
+	prefetch(skb->data);
+
+	skb_put(skb, len);
+	return skb;
+}
+
+/**
+ *	unexpected_offload - handle an unexpected offload packet
+ *	@adapter: the adapter
+ *	@fl: the free list that received the packet
+ *
+ *	Called when we receive an unexpected offload packet (e.g., the TOE
+ *	function is disabled or the card is a NIC).  Prints a message and
+ *	recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+	struct freelQ_ce *ce = &fl->centries[fl->cidx];
+	struct sk_buff *skb = ce->skb;
+
+	pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+			    dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+	pr_err("%s: unexpected offload packet, cmd %u\n",
+	       adapter->name, *skb->data);
+	recycle_fl_buf(fl, fl->cidx);
+}
+
+/*
+ * T1/T2 SGE limits the maximum DMA size per TX descriptor to
+ * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
+ * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
+ * Note that the *_large_page_tx_descs stuff will be optimized out when
+ * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
+ *
+ * compute_large_page_descs() computes how many additional descriptors are
+ * required to break down the stack's request.
+ */
+static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
+{
+	unsigned int count = 0;
+
+	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+		unsigned int i, len = skb_headlen(skb);
+		while (len > SGE_TX_DESC_MAX_PLEN) {
+			count++;
+			len -= SGE_TX_DESC_MAX_PLEN;
+		}
+		for (i = 0; nfrags--; i++) {
+			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			len = skb_frag_size(frag);
+			while (len > SGE_TX_DESC_MAX_PLEN) {
+				count++;
+				len -= SGE_TX_DESC_MAX_PLEN;
+			}
+		}
+	}
+	return count;
+}
+
+/*
+ * Write a cmdQ entry.
+ *
+ * Since this function writes the 'flags' field, it must not be used to
+ * write the first cmdQ entry.
+ */
+static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
+				 unsigned int len, unsigned int gen,
+				 unsigned int eop)
+{
+	BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
+
+	e->addr_lo = (u32)mapping;
+	e->addr_hi = (u64)mapping >> 32;
+	e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
+	e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
+}
+
+/*
+ * See comment for previous function.
+ *
+ * write_tx_descs_large_page() writes additional SGE tx descriptors if
+ * *desc_len exceeds HW's capability.
+ */
+static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
+						     struct cmdQ_e **e,
+						     struct cmdQ_ce **ce,
+						     unsigned int *gen,
+						     dma_addr_t *desc_mapping,
+						     unsigned int *desc_len,
+						     unsigned int nfrags,
+						     struct cmdQ *q)
+{
+	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+		struct cmdQ_e *e1 = *e;
+		struct cmdQ_ce *ce1 = *ce;
+
+		while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
+			*desc_len -= SGE_TX_DESC_MAX_PLEN;
+			write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
+				      *gen, nfrags == 0 && *desc_len == 0);
+			ce1->skb = NULL;
+			dma_unmap_len_set(ce1, dma_len, 0);
+			*desc_mapping += SGE_TX_DESC_MAX_PLEN;
+			if (*desc_len) {
+				ce1++;
+				e1++;
+				if (++pidx == q->size) {
+					pidx = 0;
+					*gen ^= 1;
+					ce1 = q->centries;
+					e1 = q->entries;
+				}
+			}
+		}
+		*e = e1;
+		*ce = ce1;
+	}
+	return pidx;
+}
+
+/*
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
+ */
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+				  unsigned int pidx, unsigned int gen,
+				  struct cmdQ *q)
+{
+	dma_addr_t mapping, desc_mapping;
+	struct cmdQ_e *e, *e1;
+	struct cmdQ_ce *ce;
+	unsigned int i, flags, first_desc_len, desc_len,
+	    nfrags = skb_shinfo(skb)->nr_frags;
+
+	e = e1 = &q->entries[pidx];
+	ce = &q->centries[pidx];
+
+	mapping = pci_map_single(adapter->pdev, skb->data,
+				 skb_headlen(skb), PCI_DMA_TODEVICE);
+
+	desc_mapping = mapping;
+	desc_len = skb_headlen(skb);
+
+	flags = F_CMD_DATAVALID | F_CMD_SOP |
+	    V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
+	    V_CMD_GEN2(gen);
+	first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
+	    desc_len : SGE_TX_DESC_MAX_PLEN;
+	e->addr_lo = (u32)desc_mapping;
+	e->addr_hi = (u64)desc_mapping >> 32;
+	e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
+	ce->skb = NULL;
+	dma_unmap_len_set(ce, dma_len, 0);
+
+	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
+	    desc_len > SGE_TX_DESC_MAX_PLEN) {
+		desc_mapping += first_desc_len;
+		desc_len -= first_desc_len;
+		e1++;
+		ce++;
+		if (++pidx == q->size) {
+			pidx = 0;
+			gen ^= 1;
+			e1 = q->entries;
+			ce = q->centries;
+		}
+		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+						 &desc_mapping, &desc_len,
+						 nfrags, q);
+
+		if (likely(desc_len))
+			write_tx_desc(e1, desc_mapping, desc_len, gen,
+				      nfrags == 0);
+	}
+
+	ce->skb = NULL;
+	dma_unmap_addr_set(ce, dma_addr, mapping);
+	dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
+
+	for (i = 0; nfrags--; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		e1++;
+		ce++;
+		if (++pidx == q->size) {
+			pidx = 0;
+			gen ^= 1;
+			e1 = q->entries;
+			ce = q->centries;
+		}
+
+		mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+		desc_mapping = mapping;
+		desc_len = skb_frag_size(frag);
+
+		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+						 &desc_mapping, &desc_len,
+						 nfrags, q);
+		if (likely(desc_len))
+			write_tx_desc(e1, desc_mapping, desc_len, gen,
+				      nfrags == 0);
+		ce->skb = NULL;
+		dma_unmap_addr_set(ce, dma_addr, mapping);
+		dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
+	}
+	ce->skb = skb;
+	wmb();
+	e->flags = flags;
+}
+
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	if (reclaim) {
+		pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
+			 q->processed, q->cleaned);
+		free_cmdQ_buffers(sge, q, reclaim);
+		q->cleaned += reclaim;
+	}
+}
+
+/*
+ * Called from tasklet. Checks the scheduler for any
+ * pending skbs that can be sent.
+ */
+static void restart_sched(unsigned long arg)
+{
+	struct sge *sge = (struct sge *) arg;
+	struct adapter *adapter = sge->adapter;
+	struct cmdQ *q = &sge->cmdQ[0];
+	struct sk_buff *skb;
+	unsigned int credits, queued_skb = 0;
+
+	spin_lock(&q->lock);
+	reclaim_completed_tx(sge, q);
+
+	credits = q->size - q->in_use;
+	pr_debug("restart_sched credits=%d\n", credits);
+	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
+		unsigned int genbit, pidx, count;
+	        count = 1 + skb_shinfo(skb)->nr_frags;
+		count += compute_large_page_tx_descs(skb);
+		q->in_use += count;
+		genbit = q->genbit;
+		pidx = q->pidx;
+		q->pidx += count;
+		if (q->pidx >= q->size) {
+			q->pidx -= q->size;
+			q->genbit ^= 1;
+		}
+		write_tx_descs(adapter, skb, pidx, genbit, q);
+	        credits = q->size - q->in_use;
+		queued_skb = 1;
+	}
+
+	if (queued_skb) {
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	}
+	spin_unlock(&q->lock);
+}
+
+/**
+ *	sge_rx - process an ingress ethernet packet
+ *	@sge: the sge structure
+ *	@fl: the free list that contains the packet buffer
+ *	@len: the packet length
+ *
+ *	Process an ingress ethernet pakcet and deliver it to the stack.
+ */
+static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+{
+	struct sk_buff *skb;
+	const struct cpl_rx_pkt *p;
+	struct adapter *adapter = sge->adapter;
+	struct sge_port_stats *st;
+	struct net_device *dev;
+
+	skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
+	if (unlikely(!skb)) {
+		sge->stats.rx_drops++;
+		return;
+	}
+
+	p = (const struct cpl_rx_pkt *) skb->data;
+	if (p->iff >= adapter->params.nports) {
+		kfree_skb(skb);
+		return;
+	}
+	__skb_pull(skb, sizeof(*p));
+
+	st = this_cpu_ptr(sge->port_stats[p->iff]);
+	dev = adapter->port[p->iff].dev;
+
+	skb->protocol = eth_type_trans(skb, dev);
+	if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
+	    skb->protocol == htons(ETH_P_IP) &&
+	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+		++st->rx_cso_good;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else
+		skb_checksum_none_assert(skb);
+
+	if (p->vlan_valid) {
+		st->vlan_xtract++;
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
+	}
+	netif_receive_skb(skb);
+}
+
+/*
+ * Returns true if a command queue has enough available descriptors that
+ * we can resume Tx operation after temporarily disabling its packet queue.
+ */
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
+{
+	unsigned int r = q->processed - q->cleaned;
+
+	return q->in_use - r < (q->size >> 1);
+}
+
+/*
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
+ */
+static void restart_tx_queues(struct sge *sge)
+{
+	struct adapter *adap = sge->adapter;
+	int i;
+
+	if (!enough_free_Tx_descs(&sge->cmdQ[0]))
+		return;
+
+	for_each_port(adap, i) {
+		struct net_device *nd = adap->port[i].dev;
+
+		if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
+		    netif_running(nd)) {
+			sge->stats.cmdQ_restarted[2]++;
+			netif_wake_queue(nd);
+		}
+	}
+}
+
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter,
+					  unsigned int flags,
+					  unsigned int pr0)
+{
+	struct sge *sge = adapter->sge;
+	struct cmdQ *cmdq = &sge->cmdQ[0];
+
+	cmdq->processed += pr0;
+	if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
+		freelQs_empty(sge);
+		flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
+	}
+	if (flags & F_CMDQ0_ENABLE) {
+		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+
+		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+		if (sge->tx_sched)
+			tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+
+		flags &= ~F_CMDQ0_ENABLE;
+	}
+
+	if (unlikely(sge->stopped_tx_queues != 0))
+		restart_tx_queues(sge);
+
+	return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget.  Returns the number of
+ * responses processed.  A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &sge->respQ;
+	struct respQ_e *e = &q->entries[q->cidx];
+	int done = 0;
+	unsigned int flags = 0;
+	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+	while (done < budget && e->GenerationBit == q->genbit) {
+		flags |= e->Qsleeping;
+
+		cmdq_processed[0] += e->Cmdq0CreditReturn;
+		cmdq_processed[1] += e->Cmdq1CreditReturn;
+
+		/* We batch updates to the TX side to avoid cacheline
+		 * ping-pong of TX state information on MP where the sender
+		 * might run on a different CPU than this function...
+		 */
+		if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
+			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+			cmdq_processed[0] = 0;
+		}
+
+		if (unlikely(cmdq_processed[1] > 16)) {
+			sge->cmdQ[1].processed += cmdq_processed[1];
+			cmdq_processed[1] = 0;
+		}
+
+		if (likely(e->DataValid)) {
+			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+			BUG_ON(!e->Sop || !e->Eop);
+			if (unlikely(e->Offload))
+				unexpected_offload(adapter, fl);
+			else
+				sge_rx(sge, fl, e->BufferLength);
+
+			++done;
+
+			/*
+			 * Note: this depends on each packet consuming a
+			 * single free-list buffer; cf. the BUG above.
+			 */
+			if (++fl->cidx == fl->size)
+				fl->cidx = 0;
+			prefetch(fl->centries[fl->cidx].skb);
+
+			if (unlikely(--fl->credits <
+				     fl->size - SGE_FREEL_REFILL_THRESH))
+				refill_free_list(sge, fl);
+		} else
+			sge->stats.pure_rsps++;
+
+		e++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->genbit ^= 1;
+			e = q->entries;
+		}
+		prefetch(e);
+
+		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+			q->credits = 0;
+		}
+	}
+
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+	sge->cmdQ[1].processed += cmdq_processed[1];
+
+	return done;
+}
+
+static inline int responses_pending(const struct adapter *adapter)
+{
+	const struct respQ *Q = &adapter->sge->respQ;
+	const struct respQ_e *e = &Q->entries[Q->cidx];
+
+	return e->GenerationBit == Q->genbit;
+}
+
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses.  Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context.  The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response.  Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter)
+{
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &sge->respQ;
+	struct respQ_e *e = &q->entries[q->cidx];
+	const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+	unsigned int flags = 0;
+	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+	prefetch(fl->centries[fl->cidx].skb);
+	if (e->DataValid)
+		return 1;
+
+	do {
+		flags |= e->Qsleeping;
+
+		cmdq_processed[0] += e->Cmdq0CreditReturn;
+		cmdq_processed[1] += e->Cmdq1CreditReturn;
+
+		e++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->genbit ^= 1;
+			e = q->entries;
+		}
+		prefetch(e);
+
+		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+			q->credits = 0;
+		}
+		sge->stats.pure_rsps++;
+	} while (e->GenerationBit == q->genbit && !e->DataValid);
+
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+	sge->cmdQ[1].processed += cmdq_processed[1];
+
+	return e->GenerationBit == q->genbit;
+}
+
+/*
+ * Handler for new data events when using NAPI.  This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
+ */
+int t1_poll(struct napi_struct *napi, int budget)
+{
+	struct adapter *adapter = container_of(napi, struct adapter, napi);
+	int work_done = process_responses(adapter, budget);
+
+	if (likely(work_done < budget)) {
+		napi_complete(napi);
+		writel(adapter->sge->respQ.cidx,
+		       adapter->regs + A_SG_SLEEPING);
+	}
+	return work_done;
+}
+
+irqreturn_t t1_interrupt(int irq, void *data)
+{
+	struct adapter *adapter = data;
+	struct sge *sge = adapter->sge;
+	int handled;
+
+	if (likely(responses_pending(adapter))) {
+		writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+		if (napi_schedule_prep(&adapter->napi)) {
+			if (process_pure_responses(adapter))
+				__napi_schedule(&adapter->napi);
+			else {
+				/* no data, no NAPI needed */
+				writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+				/* undo schedule_prep */
+				napi_enable(&adapter->napi);
+			}
+		}
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&adapter->async_lock);
+	handled = t1_slow_intr_handler(adapter);
+	spin_unlock(&adapter->async_lock);
+
+	if (!handled)
+		sge->stats.unhandled_irqs++;
+
+	return IRQ_RETVAL(handled != 0);
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjunction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+		     unsigned int qid, struct net_device *dev)
+{
+	struct sge *sge = adapter->sge;
+	struct cmdQ *q = &sge->cmdQ[qid];
+	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+	if (!spin_trylock(&q->lock))
+		return NETDEV_TX_LOCKED;
+
+	reclaim_completed_tx(sge, q);
+
+	pidx = q->pidx;
+	credits = q->size - q->in_use;
+	count = 1 + skb_shinfo(skb)->nr_frags;
+	count += compute_large_page_tx_descs(skb);
+
+	/* Ethernet packet */
+	if (unlikely(credits < count)) {
+		if (!netif_queue_stopped(dev)) {
+			netif_stop_queue(dev);
+			set_bit(dev->if_port, &sge->stopped_tx_queues);
+			sge->stats.cmdQ_full[2]++;
+			pr_err("%s: Tx ring full while queue awake!\n",
+			       adapter->name);
+		}
+		spin_unlock(&q->lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(credits - count < q->stop_thres)) {
+		netif_stop_queue(dev);
+		set_bit(dev->if_port, &sge->stopped_tx_queues);
+		sge->stats.cmdQ_full[2]++;
+	}
+
+	/* T204 cmdQ0 skbs that are destined for a certain port have to go
+	 * through the scheduler.
+	 */
+	if (sge->tx_sched && !qid && skb->dev) {
+use_sched:
+		use_sched_skb = 1;
+		/* Note that the scheduler might return a different skb than
+		 * the one passed in.
+		 */
+		skb = sched_skb(sge, skb, credits);
+		if (!skb) {
+			spin_unlock(&q->lock);
+			return NETDEV_TX_OK;
+		}
+		pidx = q->pidx;
+		count = 1 + skb_shinfo(skb)->nr_frags;
+		count += compute_large_page_tx_descs(skb);
+	}
+
+	q->in_use += count;
+	genbit = q->genbit;
+	pidx = q->pidx;
+	q->pidx += count;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->genbit ^= 1;
+	}
+	spin_unlock(&q->lock);
+
+	write_tx_descs(adapter, skb, pidx, genbit, q);
+
+	/*
+	 * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
+	 * the doorbell if the Q is asleep. There is a natural race, where
+	 * the hardware is going to sleep just after we checked, however,
+	 * then the interrupt handler will detect the outstanding TX packet
+	 * and ring the doorbell for us.
+	 */
+	if (qid)
+		doorbell_pio(adapter, F_CMDQ1_ENABLE);
+	else {
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	}
+
+	if (use_sched_skb) {
+		if (spin_trylock(&q->lock)) {
+			credits = q->size - q->in_use;
+			skb = NULL;
+			goto use_sched;
+		}
+	}
+	return NETDEV_TX_OK;
+}
+
+#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
+
+/*
+ *	eth_hdr_len - return the length of an Ethernet header
+ *	@data: pointer to the start of the Ethernet header
+ *
+ *	Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+	const struct ethhdr *e = data;
+
+	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+/*
+ * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
+ */
+netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct adapter *adapter = dev->ml_priv;
+	struct sge *sge = adapter->sge;
+	struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
+	struct cpl_tx_pkt *cpl;
+	struct sk_buff *orig_skb = skb;
+	int ret;
+
+	if (skb->protocol == htons(ETH_P_CPL5))
+		goto send;
+
+	/*
+	 * We are using a non-standard hard_header_len.
+	 * Allocate more header room in the rare cases it is not big enough.
+	 */
+	if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+		skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
+		++st->tx_need_hdrroom;
+		dev_kfree_skb_any(orig_skb);
+		if (!skb)
+			return NETDEV_TX_OK;
+	}
+
+	if (skb_shinfo(skb)->gso_size) {
+		int eth_type;
+		struct cpl_tx_pkt_lso *hdr;
+
+		++st->tx_tso;
+
+		eth_type = skb_network_offset(skb) == ETH_HLEN ?
+			CPL_ETH_II : CPL_ETH_II_VLAN;
+
+		hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+		hdr->opcode = CPL_TX_PKT_LSO;
+		hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
+		hdr->ip_hdr_words = ip_hdr(skb)->ihl;
+		hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
+		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
+							  skb_shinfo(skb)->gso_size));
+		hdr->len = htonl(skb->len - sizeof(*hdr));
+		cpl = (struct cpl_tx_pkt *)hdr;
+	} else {
+		/*
+		 * Packets shorter than ETH_HLEN can break the MAC, drop them
+		 * early.  Also, we may get oversized packets because some
+		 * parts of the kernel don't handle our unusual hard_header_len
+		 * right, drop those too.
+		 */
+		if (unlikely(skb->len < ETH_HLEN ||
+			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+			netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
+				   skb->len, eth_hdr_len(skb->data), dev->mtu);
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL &&
+		    ip_hdr(skb)->protocol == IPPROTO_UDP) {
+			if (unlikely(skb_checksum_help(skb))) {
+				netdev_dbg(dev, "unable to do udp checksum\n");
+				dev_kfree_skb_any(skb);
+				return NETDEV_TX_OK;
+			}
+		}
+
+		/* Hmmm, assuming to catch the gratious arp... and we'll use
+		 * it to flush out stuck espi packets...
+		 */
+		if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
+			if (skb->protocol == htons(ETH_P_ARP) &&
+			    arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
+				adapter->sge->espibug_skb[dev->if_port] = skb;
+				/* We want to re-use this skb later. We
+				 * simply bump the reference count and it
+				 * will not be freed...
+				 */
+				skb = skb_get(skb);
+			}
+		}
+
+		cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+		cpl->opcode = CPL_TX_PKT;
+		cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
+		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
+		/* the length field isn't used so don't bother setting it */
+
+		st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
+	}
+	cpl->iff = dev->if_port;
+
+	if (skb_vlan_tag_present(skb)) {
+		cpl->vlan_valid = 1;
+		cpl->vlan = htons(skb_vlan_tag_get(skb));
+		st->vlan_insert++;
+	} else
+		cpl->vlan_valid = 0;
+
+send:
+	ret = t1_sge_tx(skb, adapter, 0, dev);
+
+	/* If transmit busy, and we reallocated skb's due to headroom limit,
+	 * then silently discard to avoid leak.
+	 */
+	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
+		dev_kfree_skb_any(skb);
+		ret = NETDEV_TX_OK;
+	}
+	return ret;
+}
+
+/*
+ * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+	int i;
+	struct sge *sge = (struct sge *)data;
+
+	for (i = 0; i < SGE_CMDQ_N; ++i) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		if (!spin_trylock(&q->lock))
+			continue;
+
+		reclaim_completed_tx(sge, q);
+		if (i == 0 && q->in_use) {    /* flush pending credits */
+			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+		}
+		spin_unlock(&q->lock);
+	}
+	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+	sge->fixed_intrtimer = p->rx_coalesce_usecs *
+		core_ticks_per_usec(sge->adapter);
+	writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
+	return 0;
+}
+
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
+{
+	if (alloc_rx_resources(sge, p))
+		return -ENOMEM;
+	if (alloc_tx_resources(sge, p)) {
+		free_rx_resources(sge);
+		return -ENOMEM;
+	}
+	configure_sge(sge, p);
+
+	/*
+	 * Now that we have sized the free lists calculate the payload
+	 * capacity of the large buffers.  Other parts of the driver use
+	 * this to set the max offload coalescing size so that RX packets
+	 * do not overflow our large buffers.
+	 */
+	p->large_buf_capacity = jumbo_payload_capacity(sge);
+	return 0;
+}
+
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+	int i;
+	writel(0, sge->adapter->regs + A_SG_CONTROL);
+	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+	if (is_T2(sge->adapter))
+		del_timer_sync(&sge->espibug_timer);
+
+	del_timer_sync(&sge->tx_reclaim_timer);
+	if (sge->tx_sched)
+		tx_sched_stop(sge);
+
+	for (i = 0; i < MAX_NPORTS; i++)
+		kfree_skb(sge->espibug_skb[i]);
+}
+
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
+{
+	refill_free_list(sge, &sge->freelQ[0]);
+	refill_free_list(sge, &sge->freelQ[1]);
+
+	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+	if (is_T2(sge->adapter))
+		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround_t204(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *sge = adapter->sge;
+	unsigned int nports = adapter->params.nports;
+	u32 seop[MAX_NPORTS];
+
+	if (adapter->open_device_map & PORT_MASK) {
+		int i;
+
+		if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
+			return;
+
+		for (i = 0; i < nports; i++) {
+			struct sk_buff *skb = sge->espibug_skb[i];
+
+			if (!netif_running(adapter->port[i].dev) ||
+			    netif_queue_stopped(adapter->port[i].dev) ||
+			    !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
+				continue;
+
+			if (!skb->cb[0]) {
+				skb_copy_to_linear_data_offset(skb,
+						    sizeof(struct cpl_tx_pkt),
+							       ch_mac_addr,
+							       ETH_ALEN);
+				skb_copy_to_linear_data_offset(skb,
+							       skb->len - 10,
+							       ch_mac_addr,
+							       ETH_ALEN);
+				skb->cb[0] = 0xff;
+			}
+
+			/* bump the reference count to avoid freeing of
+			 * the skb once the DMA has completed.
+			 */
+			skb = skb_get(skb);
+			t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
+		}
+	}
+	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+static void espibug_workaround(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *sge = adapter->sge;
+
+	if (netif_running(adapter->port[0].dev)) {
+	        struct sk_buff *skb = sge->espibug_skb[0];
+	        u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+	        if ((seop & 0xfff0fff) == 0xfff && skb) {
+	                if (!skb->cb[0]) {
+	                        skb_copy_to_linear_data_offset(skb,
+						     sizeof(struct cpl_tx_pkt),
+							       ch_mac_addr,
+							       ETH_ALEN);
+	                        skb_copy_to_linear_data_offset(skb,
+							       skb->len - 10,
+							       ch_mac_addr,
+							       ETH_ALEN);
+	                        skb->cb[0] = 0xff;
+	                }
+
+	                /* bump the reference count to avoid freeing of the
+	                 * skb once the DMA has completed.
+	                 */
+	                skb = skb_get(skb);
+	                t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+	        }
+	}
+	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
+{
+	struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
+	int i;
+
+	if (!sge)
+		return NULL;
+
+	sge->adapter = adapter;
+	sge->netdev = adapter->port[0].dev;
+	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	for_each_port(adapter, i) {
+		sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
+		if (!sge->port_stats[i])
+			goto nomem_port;
+	}
+
+	init_timer(&sge->tx_reclaim_timer);
+	sge->tx_reclaim_timer.data = (unsigned long)sge;
+	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+	if (is_T2(sge->adapter)) {
+		init_timer(&sge->espibug_timer);
+
+		if (adapter->params.nports > 1) {
+			tx_sched_init(sge);
+			sge->espibug_timer.function = espibug_workaround_t204;
+		} else
+			sge->espibug_timer.function = espibug_workaround;
+		sge->espibug_timer.data = (unsigned long)sge->adapter;
+
+		sge->espibug_timeout = 1;
+		/* for T204, every 10ms */
+		if (adapter->params.nports > 1)
+			sge->espibug_timeout = HZ/100;
+	}
+
+
+	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+	if (sge->tx_sched) {
+		if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
+			p->rx_coalesce_usecs = 15;
+		else
+			p->rx_coalesce_usecs = 50;
+	} else
+		p->rx_coalesce_usecs = 50;
+
+	p->coalesce_enable = 0;
+	p->sample_interval_usecs = 0;
+
+	return sge;
+nomem_port:
+	while (i >= 0) {
+		free_percpu(sge->port_stats[i]);
+		--i;
+	}
+	kfree(sge);
+	return NULL;
+
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
new file mode 100644
index 0000000..a1ba591
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -0,0 +1,93 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.h                                                               *
+ * $Revision: 1.11 $                                                          *
+ * $Date: 2005/06/21 22:10:55 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SGE_H_
+#define _CXGB_SGE_H_
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+struct sge_intr_counts {
+	unsigned int rx_drops;        /* # of packets dropped due to no mem */
+	unsigned int pure_rsps;        /* # of non-payload responses */
+	unsigned int unhandled_irqs;   /* # of unhandled interrupts */
+	unsigned int respQ_empty;      /* # times respQ empty */
+	unsigned int respQ_overflow;   /* # respQ overflow (fatal) */
+	unsigned int freelistQ_empty;  /* # times freelist empty */
+	unsigned int pkt_too_big;      /* packet too large (fatal) */
+	unsigned int pkt_mismatch;
+	unsigned int cmdQ_full[3];     /* not HW IRQ, host cmdQ[] full */
+	unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
+};
+
+struct sge_port_stats {
+	u64 rx_cso_good;     /* # of successful RX csum offloads */
+	u64 tx_cso;          /* # of TX checksum offloads */
+	u64 tx_tso;          /* # of TSO requests */
+	u64 vlan_xtract;     /* # of VLAN tag extractions */
+	u64 vlan_insert;     /* # of VLAN tag insertions */
+	u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */
+};
+
+struct sk_buff;
+struct net_device;
+struct adapter;
+struct sge_params;
+struct sge;
+
+struct sge *t1_sge_create(struct adapter *, struct sge_params *);
+int t1_sge_configure(struct sge *, struct sge_params *);
+int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
+void t1_sge_destroy(struct sge *);
+irqreturn_t t1_interrupt(int irq, void *cookie);
+int t1_poll(struct napi_struct *, int);
+
+netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
+void t1_sge_start(struct sge *);
+void t1_sge_stop(struct sge *);
+int t1_sge_intr_error_handler(struct sge *);
+void t1_sge_intr_enable(struct sge *);
+void t1_sge_intr_disable(struct sge *);
+void t1_sge_intr_clear(struct sge *);
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge);
+void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *);
+unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int,
+			   unsigned int);
+
+#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
new file mode 100644
index 0000000..ea0f874
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -0,0 +1,1128 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: subr.c                                                              *
+ * $Revision: 1.27 $                                                         *
+ * $Date: 2005/06/22 01:08:36 $                                              *
+ * Description:                                                              *
+ *  Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "elmer0.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "tp.h"
+#include "espi.h"
+
+/**
+ *	t1_wait_op_done - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *      @delay: delay in usecs between iterations
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  Returns %0 if the operation completes and %1
+ *	otherwise.
+ */
+static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
+			   int attempts, int delay)
+{
+	while (1) {
+		u32 val = readl(adapter->regs + reg) & mask;
+
+		if (!!val == polarity)
+			return 0;
+		if (--attempts == 0)
+			return 1;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+#define TPI_ATTEMPTS 50
+
+/*
+ * Write a register over the TPI interface (unlocked and locked versions).
+ */
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+	int tpi_busy;
+
+	writel(addr, adapter->regs + A_TPI_ADDR);
+	writel(value, adapter->regs + A_TPI_WR_DATA);
+	writel(F_TPIWR, adapter->regs + A_TPI_CSR);
+
+	tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+				   TPI_ATTEMPTS, 3);
+	if (tpi_busy)
+		pr_alert("%s: TPI write to 0x%x failed\n",
+			 adapter->name, addr);
+	return tpi_busy;
+}
+
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+	int ret;
+
+	spin_lock(&adapter->tpi_lock);
+	ret = __t1_tpi_write(adapter, addr, value);
+	spin_unlock(&adapter->tpi_lock);
+	return ret;
+}
+
+/*
+ * Read a register over the TPI interface (unlocked and locked versions).
+ */
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+	int tpi_busy;
+
+	writel(addr, adapter->regs + A_TPI_ADDR);
+	writel(0, adapter->regs + A_TPI_CSR);
+
+	tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+				   TPI_ATTEMPTS, 3);
+	if (tpi_busy)
+		pr_alert("%s: TPI read from 0x%x failed\n",
+			 adapter->name, addr);
+	else
+		*valp = readl(adapter->regs + A_TPI_RD_DATA);
+	return tpi_busy;
+}
+
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+	int ret;
+
+	spin_lock(&adapter->tpi_lock);
+	ret = __t1_tpi_read(adapter, addr, valp);
+	spin_unlock(&adapter->tpi_lock);
+	return ret;
+}
+
+/*
+ * Set a TPI parameter.
+ */
+static void t1_tpi_par(adapter_t *adapter, u32 value)
+{
+	writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR);
+}
+
+/*
+ * Called when a port's link settings change to propagate the new values to the
+ * associated PHY and MAC.  After performing the common tasks it invokes an
+ * OS-specific handler.
+ */
+void t1_link_changed(adapter_t *adapter, int port_id)
+{
+	int link_ok, speed, duplex, fc;
+	struct cphy *phy = adapter->port[port_id].phy;
+	struct link_config *lc = &adapter->port[port_id].link_config;
+
+	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+	lc->speed = speed < 0 ? SPEED_INVALID : speed;
+	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+	if (!(lc->requested_fc & PAUSE_AUTONEG))
+		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+		/* Set MAC speed, duplex, and flow control to match PHY. */
+		struct cmac *mac = adapter->port[port_id].mac;
+
+		mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
+		lc->fc = (unsigned char)fc;
+	}
+	t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+static int t1_pci_intr_handler(adapter_t *adapter)
+{
+	u32 pcix_cause;
+
+	pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+
+	if (pcix_cause) {
+		pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
+				       pcix_cause);
+		t1_fatal_err(adapter);    /* PCI errors are fatal */
+	}
+	return 0;
+}
+
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+
+/*
+ * PHY interrupt handler for FPGA boards.
+ */
+static int fpga_phy_intr_handler(adapter_t *adapter)
+{
+	int p;
+	u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+
+	for_each_port(adapter, p)
+		if (cause & (1 << p)) {
+			struct cphy *phy = adapter->port[p].phy;
+			int phy_cause = phy->ops->interrupt_handler(phy);
+
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, p);
+		}
+	writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+	return 0;
+}
+
+/*
+ * Slow path interrupt handler for FPGAs.
+ */
+static int fpga_slow_intr(adapter_t *adapter)
+{
+	u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+	cause &= ~F_PL_INTR_SGE_DATA;
+	if (cause & F_PL_INTR_SGE_ERR)
+		t1_sge_intr_error_handler(adapter->sge);
+
+	if (cause & FPGA_PCIX_INTERRUPT_GMAC)
+		fpga_phy_intr_handler(adapter);
+
+	if (cause & FPGA_PCIX_INTERRUPT_TP) {
+		/*
+		 * FPGA doesn't support MC4 interrupts and it requires
+		 * this odd layer of indirection for MC5.
+		 */
+		u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+
+		/* Clear TP interrupt */
+		writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+	}
+	if (cause & FPGA_PCIX_INTERRUPT_PCIX)
+		t1_pci_intr_handler(adapter);
+
+	/* Clear the interrupts just processed. */
+	if (cause)
+		writel(cause, adapter->regs + A_PL_CAUSE);
+
+	return cause != 0;
+}
+#endif
+
+/*
+ * Wait until Elmer's MI1 interface is ready for new operations.
+ */
+static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
+{
+	int attempts = 100, busy;
+
+	do {
+		u32 val;
+
+		__t1_tpi_read(adapter, mi1_reg, &val);
+		busy = val & F_MI1_OP_BUSY;
+		if (busy)
+			udelay(10);
+	} while (busy && --attempts);
+	if (busy)
+		pr_alert("%s: MDIO operation timed out\n", adapter->name);
+	return busy;
+}
+
+/*
+ * MI1 MDIO initialization.
+ */
+static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
+{
+	u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
+	u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
+		V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
+
+	if (!(bi->caps & SUPPORTED_10000baseT_Full))
+		val |= V_MI1_SOF(1);
+	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
+}
+
+#if defined(CONFIG_CHELSIO_T1_1G)
+/*
+ * Elmer MI1 MDIO read/write operations.
+ */
+static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr,
+			 u16 reg_addr)
+{
+	struct adapter *adapter = dev->ml_priv;
+	u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+	unsigned int val;
+
+	spin_lock(&adapter->tpi_lock);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter,
+			A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val);
+	spin_unlock(&adapter->tpi_lock);
+	return val;
+}
+
+static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
+			  u16 reg_addr, u16 val)
+{
+	struct adapter *adapter = dev->ml_priv;
+	u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	spin_lock(&adapter->tpi_lock);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+	__t1_tpi_write(adapter,
+			A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	spin_unlock(&adapter->tpi_lock);
+	return 0;
+}
+
+static const struct mdio_ops mi1_mdio_ops = {
+	.init = mi1_mdio_init,
+	.read = mi1_mdio_read,
+	.write = mi1_mdio_write,
+	.mode_support = MDIO_SUPPORTS_C22
+};
+
+#endif
+
+static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+			     u16 reg_addr)
+{
+	struct adapter *adapter = dev->ml_priv;
+	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+	unsigned int val;
+
+	spin_lock(&adapter->tpi_lock);
+
+	/* Write the address we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+		       MI1_OP_INDIRECT_ADDRESS);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Write the operation we want. */
+	__t1_tpi_write(adapter,
+			A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Read the data. */
+	__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val);
+	spin_unlock(&adapter->tpi_lock);
+	return val;
+}
+
+static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr,
+			      int mmd_addr, u16 reg_addr, u16 val)
+{
+	struct adapter *adapter = dev->ml_priv;
+	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	spin_lock(&adapter->tpi_lock);
+
+	/* Write the address we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+		       MI1_OP_INDIRECT_ADDRESS);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Write the data. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	spin_unlock(&adapter->tpi_lock);
+	return 0;
+}
+
+static const struct mdio_ops mi1_mdio_ext_ops = {
+	.init = mi1_mdio_init,
+	.read = mi1_mdio_ext_read,
+	.write = mi1_mdio_ext_write,
+	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
+};
+
+enum {
+	CH_BRD_T110_1CU,
+	CH_BRD_N110_1F,
+	CH_BRD_N210_1F,
+	CH_BRD_T210_1F,
+	CH_BRD_T210_1CU,
+	CH_BRD_N204_4CU,
+};
+
+static const struct board_info t1_board[] = {
+	{
+		.board		= CHBT_BOARD_CHT110,
+		.port_number	= 1,
+		.caps		= SUPPORTED_10000baseT_Full,
+		.chip_term	= CHBT_TERM_T1,
+		.chip_mac	= CHBT_MAC_PM3393,
+		.chip_phy	= CHBT_PHY_MY3126,
+		.clock_core	= 125000000,
+		.clock_mc3	= 150000000,
+		.clock_mc4	= 125000000,
+		.espi_nports	= 1,
+		.clock_elmer0	= 44,
+		.mdio_mdien	= 1,
+		.mdio_mdiinv	= 1,
+		.mdio_mdc	= 1,
+		.mdio_phybaseaddr = 1,
+		.gmac		= &t1_pm3393_ops,
+		.gphy		= &t1_my3126_ops,
+		.mdio_ops	= &mi1_mdio_ext_ops,
+		.desc		= "Chelsio T110 1x10GBase-CX4 TOE",
+	},
+
+	{
+		.board		= CHBT_BOARD_N110,
+		.port_number	= 1,
+		.caps		= SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE,
+		.chip_term	= CHBT_TERM_T1,
+		.chip_mac	= CHBT_MAC_PM3393,
+		.chip_phy	= CHBT_PHY_88X2010,
+		.clock_core	= 125000000,
+		.espi_nports	= 1,
+		.clock_elmer0	= 44,
+		.mdio_mdien	= 0,
+		.mdio_mdiinv	= 0,
+		.mdio_mdc	= 1,
+		.mdio_phybaseaddr = 0,
+		.gmac		= &t1_pm3393_ops,
+		.gphy		= &t1_mv88x201x_ops,
+		.mdio_ops	= &mi1_mdio_ext_ops,
+		.desc		= "Chelsio N110 1x10GBaseX NIC",
+	},
+
+	{
+		.board		= CHBT_BOARD_N210,
+		.port_number	= 1,
+		.caps		= SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE,
+		.chip_term	= CHBT_TERM_T2,
+		.chip_mac	= CHBT_MAC_PM3393,
+		.chip_phy	= CHBT_PHY_88X2010,
+		.clock_core	= 125000000,
+		.espi_nports	= 1,
+		.clock_elmer0	= 44,
+		.mdio_mdien	= 0,
+		.mdio_mdiinv	= 0,
+		.mdio_mdc	= 1,
+		.mdio_phybaseaddr = 0,
+		.gmac		= &t1_pm3393_ops,
+		.gphy		= &t1_mv88x201x_ops,
+		.mdio_ops	= &mi1_mdio_ext_ops,
+		.desc		= "Chelsio N210 1x10GBaseX NIC",
+	},
+
+	{
+		.board		= CHBT_BOARD_CHT210,
+		.port_number	= 1,
+		.caps		= SUPPORTED_10000baseT_Full,
+		.chip_term	= CHBT_TERM_T2,
+		.chip_mac	= CHBT_MAC_PM3393,
+		.chip_phy	= CHBT_PHY_88X2010,
+		.clock_core	= 125000000,
+		.clock_mc3	= 133000000,
+		.clock_mc4	= 125000000,
+		.espi_nports	= 1,
+		.clock_elmer0	= 44,
+		.mdio_mdien	= 0,
+		.mdio_mdiinv	= 0,
+		.mdio_mdc	= 1,
+		.mdio_phybaseaddr = 0,
+		.gmac		= &t1_pm3393_ops,
+		.gphy		= &t1_mv88x201x_ops,
+		.mdio_ops	= &mi1_mdio_ext_ops,
+		.desc		= "Chelsio T210 1x10GBaseX TOE",
+	},
+
+	{
+		.board		= CHBT_BOARD_CHT210,
+		.port_number	= 1,
+		.caps		= SUPPORTED_10000baseT_Full,
+		.chip_term	= CHBT_TERM_T2,
+		.chip_mac	= CHBT_MAC_PM3393,
+		.chip_phy	= CHBT_PHY_MY3126,
+		.clock_core	= 125000000,
+		.clock_mc3	= 133000000,
+		.clock_mc4	= 125000000,
+		.espi_nports	= 1,
+		.clock_elmer0	= 44,
+		.mdio_mdien	= 1,
+		.mdio_mdiinv	= 1,
+		.mdio_mdc	= 1,
+		.mdio_phybaseaddr = 1,
+		.gmac		= &t1_pm3393_ops,
+		.gphy		= &t1_my3126_ops,
+		.mdio_ops	= &mi1_mdio_ext_ops,
+		.desc		= "Chelsio T210 1x10GBase-CX4 TOE",
+	},
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	{
+		.board		= CHBT_BOARD_CHN204,
+		.port_number	= 4,
+		.caps		= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+				| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
+				| SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+				  SUPPORTED_PAUSE | SUPPORTED_TP,
+		.chip_term	= CHBT_TERM_T2,
+		.chip_mac	= CHBT_MAC_VSC7321,
+		.chip_phy	= CHBT_PHY_88E1111,
+		.clock_core	= 100000000,
+		.espi_nports	= 4,
+		.clock_elmer0	= 44,
+		.mdio_mdien	= 0,
+		.mdio_mdiinv	= 0,
+		.mdio_mdc	= 0,
+		.mdio_phybaseaddr = 4,
+		.gmac		= &t1_vsc7326_ops,
+		.gphy		= &t1_mv88e1xxx_ops,
+		.mdio_ops	= &mi1_mdio_ops,
+		.desc		= "Chelsio N204 4x100/1000BaseT NIC",
+	},
+#endif
+
+};
+
+const struct pci_device_id t1_pci_tbl[] = {
+	CH_DEVICE(8, 0, CH_BRD_T110_1CU),
+	CH_DEVICE(8, 1, CH_BRD_T110_1CU),
+	CH_DEVICE(7, 0, CH_BRD_N110_1F),
+	CH_DEVICE(10, 1, CH_BRD_N210_1F),
+	CH_DEVICE(11, 1, CH_BRD_T210_1F),
+	CH_DEVICE(14, 1, CH_BRD_T210_1CU),
+	CH_DEVICE(16, 1, CH_BRD_N204_4CU),
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
+
+/*
+ * Return the board_info structure with a given index.  Out-of-range indices
+ * return NULL.
+ */
+const struct board_info *t1_get_board_info(unsigned int board_id)
+{
+	return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
+}
+
+struct chelsio_vpd_t {
+	u32 format_version;
+	u8 serial_number[16];
+	u8 mac_base_address[6];
+	u8 pad[2];           /* make multiple-of-4 size requirement explicit */
+};
+
+#define EEPROMSIZE        (8 * 1024)
+#define EEPROM_MAX_POLL   4
+
+/*
+ * Read SEEPROM. A zero is written to the flag register when the address is
+ * written to the Control register. The hardware device will set the flag to a
+ * one when 4B have been transferred to the Data register.
+ */
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data)
+{
+	int i = EEPROM_MAX_POLL;
+	u16 val;
+	u32 v;
+
+	if (addr >= EEPROMSIZE || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
+	do {
+		udelay(50);
+		pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
+	} while (!(val & F_VPD_OP_FLAG) && --i);
+
+	if (!(val & F_VPD_OP_FLAG)) {
+		pr_err("%s: reading EEPROM address 0x%x failed\n",
+		       adapter->name, addr);
+		return -EIO;
+	}
+	pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v);
+	*data = cpu_to_le32(v);
+	return 0;
+}
+
+static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
+{
+	int addr, ret = 0;
+
+	for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
+		ret = t1_seeprom_read(adapter, addr,
+				      (__le32 *)((u8 *)vpd + addr));
+
+	return ret;
+}
+
+/*
+ * Read a port's MAC address from the VPD ROM.
+ */
+static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
+{
+	struct chelsio_vpd_t vpd;
+
+	if (t1_eeprom_vpd_get(adapter, &vpd))
+		return 1;
+	memcpy(mac_addr, vpd.mac_base_address, 5);
+	mac_addr[5] = vpd.mac_base_address[5] + index;
+	return 0;
+}
+
+/*
+ * Set up the MAC/PHY according to the requested link settings.
+ *
+ * If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired and reset.
+ *
+ * If the PHY does not auto-negotiate we just reset it.
+ *
+ * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
+		if (fc) {
+			if (fc == ((PAUSE_RX | PAUSE_TX) &
+				   (mac->adapter->params.nports < 2)))
+				lc->advertising |= ADVERTISED_PAUSE;
+			else {
+				lc->advertising |= ADVERTISED_ASYM_PAUSE;
+				if (fc == PAUSE_RX)
+					lc->advertising |= ADVERTISED_PAUSE;
+			}
+		}
+		phy->ops->advertise(phy, lc->advertising);
+
+		if (lc->autoneg == AUTONEG_DISABLE) {
+			lc->speed = lc->requested_speed;
+			lc->duplex = lc->requested_duplex;
+			lc->fc = (unsigned char)fc;
+			mac->ops->set_speed_duplex_fc(mac, lc->speed,
+						      lc->duplex, fc);
+			/* Also disables autoneg */
+			phy->state = PHY_AUTONEG_RDY;
+			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+			phy->ops->reset(phy, 0);
+		} else {
+			phy->state = PHY_AUTONEG_EN;
+			phy->ops->autoneg_enable(phy); /* also resets PHY */
+		}
+	} else {
+		phy->state = PHY_AUTONEG_RDY;
+		mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
+		lc->fc = (unsigned char)fc;
+		phy->ops->reset(phy, 0);
+	}
+	return 0;
+}
+
+/*
+ * External interrupt handler for boards using elmer0.
+ */
+int t1_elmer0_ext_intr_handler(adapter_t *adapter)
+{
+	struct cphy *phy;
+	int phy_cause;
+	u32 cause;
+
+	t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
+
+	switch (board_info(adapter)->board) {
+#ifdef CONFIG_CHELSIO_T1_1G
+	case CHBT_BOARD_CHT204:
+	case CHBT_BOARD_CHT204E:
+	case CHBT_BOARD_CHN204:
+	case CHBT_BOARD_CHT204V: {
+		int i, port_bit;
+		for_each_port(adapter, i) {
+			port_bit = i + 1;
+			if (!(cause & (1 << port_bit)))
+				continue;
+
+			phy = adapter->port[i].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, i);
+		}
+		break;
+	}
+	case CHBT_BOARD_CHT101:
+		if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
+			phy = adapter->port[0].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, 0);
+		}
+		break;
+	case CHBT_BOARD_7500: {
+		int p;
+		/*
+		 * Elmer0's interrupt cause isn't useful here because there is
+		 * only one bit that can be set for all 4 ports.  This means
+		 * we are forced to check every PHY's interrupt status
+		 * register to see who initiated the interrupt.
+		 */
+		for_each_port(adapter, p) {
+			phy = adapter->port[p].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+			    t1_link_changed(adapter, p);
+		}
+		break;
+	}
+#endif
+	case CHBT_BOARD_CHT210:
+	case CHBT_BOARD_N210:
+	case CHBT_BOARD_N110:
+		if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
+			phy = adapter->port[0].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, 0);
+		}
+		break;
+	case CHBT_BOARD_8000:
+	case CHBT_BOARD_CHT110:
+		if (netif_msg_intr(adapter))
+			dev_dbg(&adapter->pdev->dev,
+				"External interrupt cause 0x%x\n", cause);
+		if (cause & ELMER0_GP_BIT1) {        /* PMC3393 INTB */
+			struct cmac *mac = adapter->port[0].mac;
+
+			mac->ops->interrupt_handler(mac);
+		}
+		if (cause & ELMER0_GP_BIT5) {        /* XPAK MOD_DETECT */
+			u32 mod_detect;
+
+			t1_tpi_read(adapter,
+					A_ELMER0_GPI_STAT, &mod_detect);
+			if (netif_msg_link(adapter))
+				dev_info(&adapter->pdev->dev, "XPAK %s\n",
+					 mod_detect ? "removed" : "inserted");
+		}
+		break;
+	}
+	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
+	return 0;
+}
+
+/* Enables all interrupts. */
+void t1_interrupts_enable(adapter_t *adapter)
+{
+	unsigned int i;
+
+	adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP;
+
+	t1_sge_intr_enable(adapter->sge);
+	t1_tp_intr_enable(adapter->tp);
+	if (adapter->espi) {
+		adapter->slow_intr_mask |= F_PL_INTR_ESPI;
+		t1_espi_intr_enable(adapter->espi);
+	}
+
+	/* Enable MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
+	}
+
+	/* Enable PCIX & external chip interrupts on ASIC boards. */
+	if (t1_is_asic(adapter)) {
+		u32 pl_intr = readl(adapter->regs + A_PL_ENABLE);
+
+		/* PCI-X interrupts */
+		pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+				       0xffffffff);
+
+		adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+		pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+		writel(pl_intr, adapter->regs + A_PL_ENABLE);
+	}
+}
+
+/* Disables all interrupts. */
+void t1_interrupts_disable(adapter_t* adapter)
+{
+	unsigned int i;
+
+	t1_sge_intr_disable(adapter->sge);
+	t1_tp_intr_disable(adapter->tp);
+	if (adapter->espi)
+		t1_espi_intr_disable(adapter->espi);
+
+	/* Disable MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
+	}
+
+	/* Disable PCIX & external chip interrupts. */
+	if (t1_is_asic(adapter))
+		writel(0, adapter->regs + A_PL_ENABLE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
+
+	adapter->slow_intr_mask = 0;
+}
+
+/* Clears all interrupts */
+void t1_interrupts_clear(adapter_t* adapter)
+{
+	unsigned int i;
+
+	t1_sge_intr_clear(adapter->sge);
+	t1_tp_intr_clear(adapter->tp);
+	if (adapter->espi)
+		t1_espi_intr_clear(adapter->espi);
+
+	/* Clear MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
+	}
+
+	/* Enable interrupts for external devices. */
+	if (t1_is_asic(adapter)) {
+		u32 pl_intr = readl(adapter->regs + A_PL_CAUSE);
+
+		writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+		       adapter->regs + A_PL_CAUSE);
+	}
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
+}
+
+/*
+ * Slow path interrupt handler for ASICs.
+ */
+static int asic_slow_intr(adapter_t *adapter)
+{
+	u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+	cause &= adapter->slow_intr_mask;
+	if (!cause)
+		return 0;
+	if (cause & F_PL_INTR_SGE_ERR)
+		t1_sge_intr_error_handler(adapter->sge);
+	if (cause & F_PL_INTR_TP)
+		t1_tp_intr_handler(adapter->tp);
+	if (cause & F_PL_INTR_ESPI)
+		t1_espi_intr_handler(adapter->espi);
+	if (cause & F_PL_INTR_PCIX)
+		t1_pci_intr_handler(adapter);
+	if (cause & F_PL_INTR_EXT)
+		t1_elmer0_ext_intr(adapter);
+
+	/* Clear the interrupts just processed. */
+	writel(cause, adapter->regs + A_PL_CAUSE);
+	readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+	return 1;
+}
+
+int t1_slow_intr_handler(adapter_t *adapter)
+{
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(adapter))
+		return fpga_slow_intr(adapter);
+#endif
+	return asic_slow_intr(adapter);
+}
+
+/* Power sequencing is a work-around for Intel's XPAKs. */
+static void power_sequence_xpak(adapter_t* adapter)
+{
+	u32 mod_detect;
+	u32 gpo;
+
+	/* Check for XPAK */
+	t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
+	if (!(ELMER0_GP_BIT5 & mod_detect)) {
+		/* XPAK is present */
+		t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
+		gpo |= ELMER0_GP_BIT18;
+		t1_tpi_write(adapter, A_ELMER0_GPO, gpo);
+	}
+}
+
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+		     struct adapter_params *p)
+{
+	p->chip_version = bi->chip_term;
+	p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
+	if (p->chip_version == CHBT_TERM_T1 ||
+	    p->chip_version == CHBT_TERM_T2 ||
+	    p->chip_version == CHBT_TERM_FPGA) {
+		u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
+
+		val = G_TP_PC_REV(val);
+		if (val == 2)
+			p->chip_revision = TERM_T1B;
+		else if (val == 3)
+			p->chip_revision = TERM_T2;
+		else
+			return -1;
+	} else
+		return -1;
+	return 0;
+}
+
+/*
+ * Enable board components other than the Chelsio chip, such as external MAC
+ * and PHY.
+ */
+static int board_init(adapter_t *adapter, const struct board_info *bi)
+{
+	switch (bi->board) {
+	case CHBT_BOARD_8000:
+	case CHBT_BOARD_N110:
+	case CHBT_BOARD_N210:
+	case CHBT_BOARD_CHT210:
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
+		break;
+	case CHBT_BOARD_CHT110:
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
+
+		/* TBD XXX Might not need.  This fixes a problem
+		 *         described in the Intel SR XPAK errata.
+		 */
+		power_sequence_xpak(adapter);
+		break;
+#ifdef CONFIG_CHELSIO_T1_1G
+	case CHBT_BOARD_CHT204E:
+		/* add config space write here */
+	case CHBT_BOARD_CHT204:
+	case CHBT_BOARD_CHT204V:
+	case CHBT_BOARD_CHN204:
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
+		break;
+	case CHBT_BOARD_CHT101:
+	case CHBT_BOARD_7500:
+		t1_tpi_par(adapter, 0xf);
+		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
+		break;
+#endif
+	}
+	return 0;
+}
+
+/*
+ * Initialize and configure the Terminator HW modules.  Note that external
+ * MAC and PHYs are initialized separately.
+ */
+int t1_init_hw_modules(adapter_t *adapter)
+{
+	int err = -EIO;
+	const struct board_info *bi = board_info(adapter);
+
+	if (!bi->clock_mc4) {
+		u32 val = readl(adapter->regs + A_MC4_CFG);
+
+		writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
+		writel(F_M_BUS_ENABLE | F_TCAM_RESET,
+		       adapter->regs + A_MC5_CONFIG);
+	}
+
+	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
+					  bi->espi_nports))
+		goto out_err;
+
+	if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core))
+		goto out_err;
+
+	err = t1_sge_configure(adapter->sge, &adapter->params.sge);
+	if (err)
+		goto out_err;
+
+	err = 0;
+out_err:
+	return err;
+}
+
+/*
+ * Determine a card's PCI mode.
+ */
+static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+{
+	static const unsigned short speed_map[] = { 33, 66, 100, 133 };
+	u32 pci_mode;
+
+	pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
+	p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
+	p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
+	p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
+}
+
+/*
+ * Release the structures holding the SW per-Terminator-HW-module state.
+ */
+void t1_free_sw_modules(adapter_t *adapter)
+{
+	unsigned int i;
+
+	for_each_port(adapter, i) {
+		struct cmac *mac = adapter->port[i].mac;
+		struct cphy *phy = adapter->port[i].phy;
+
+		if (mac)
+			mac->ops->destroy(mac);
+		if (phy)
+			phy->ops->destroy(phy);
+	}
+
+	if (adapter->sge)
+		t1_sge_destroy(adapter->sge);
+	if (adapter->tp)
+		t1_tp_destroy(adapter->tp);
+	if (adapter->espi)
+		t1_espi_destroy(adapter->espi);
+}
+
+static void init_link_config(struct link_config *lc,
+			     const struct board_info *bi)
+{
+	lc->supported = bi->caps;
+	lc->requested_speed = lc->speed = SPEED_INVALID;
+	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising = lc->supported;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+/*
+ * Allocate and initialize the data structures that hold the SW state of
+ * the Terminator HW modules.
+ */
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
+{
+	unsigned int i;
+
+	adapter->params.brd_info = bi;
+	adapter->params.nports = bi->port_number;
+	adapter->params.stats_update_period = bi->gmac->stats_update_period;
+
+	adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
+	if (!adapter->sge) {
+		pr_err("%s: SGE initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
+		pr_err("%s: ESPI initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
+	if (!adapter->tp) {
+		pr_err("%s: TP initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	board_init(adapter, bi);
+	bi->mdio_ops->init(adapter, bi);
+	if (bi->gphy->reset)
+		bi->gphy->reset(adapter);
+	if (bi->gmac->reset)
+		bi->gmac->reset(adapter);
+
+	for_each_port(adapter, i) {
+		u8 hw_addr[6];
+		struct cmac *mac;
+		int phy_addr = bi->mdio_phybaseaddr + i;
+
+		adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev,
+							phy_addr, bi->mdio_ops);
+		if (!adapter->port[i].phy) {
+			pr_err("%s: PHY %d initialization failed\n",
+			       adapter->name, i);
+			goto error;
+		}
+
+		adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
+		if (!mac) {
+			pr_err("%s: MAC %d initialization failed\n",
+			       adapter->name, i);
+			goto error;
+		}
+
+		/*
+		 * Get the port's MAC addresses either from the EEPROM if one
+		 * exists or the one hardcoded in the MAC.
+		 */
+		if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
+			mac->ops->macaddress_get(mac, hw_addr);
+		else if (vpd_macaddress_get(adapter, i, hw_addr)) {
+			pr_err("%s: could not read MAC address from VPD ROM\n",
+			       adapter->port[i].dev->name);
+			goto error;
+		}
+		memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+		init_link_config(&adapter->port[i].link_config, bi);
+	}
+
+	get_pci_mode(adapter, &adapter->params.pci);
+	t1_interrupts_clear(adapter);
+	return 0;
+
+error:
+	t1_free_sw_modules(adapter);
+	return -1;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
new file mode 100644
index 0000000..7f79cc7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -0,0 +1,1642 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: suni1x10gexp_regs.h                                                 *
+ * $Revision: 1.9 $                                                          *
+ * $Date: 2005/06/22 00:17:04 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: PMC/SIERRA                                                       *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
+#define _CXGB_SUNI1x10GEXP_REGS_H_
+
+/*
+** Space allocated for each Exact Match Filter
+**     There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003
+
+#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)       ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER )
+
+/*
+** Space allocated for VLAN-Id Filter
+**      There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001
+
+#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)   ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER )
+
+/*
+** Space allocated for each MSTAT Counter
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004
+
+#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)       ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT )
+
+
+/******************************************************************************/
+/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP                                     **/
+/******************************************************************************/
+/* Refer to the Register Bit Masks bellow for the naming of each register and */
+/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit        */
+/******************************************************************************/
+
+
+#define SUNI1x10GEXP_REG_IDENTIFICATION                                  0x0000
+#define SUNI1x10GEXP_REG_PRODUCT_REVISION                                0x0001
+#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL                        0x0002
+#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL                              0x0003
+#define SUNI1x10GEXP_REG_DEVICE_STATUS                                   0x0004
+#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE               0x0005
+
+#define SUNI1x10GEXP_REG_MDIO_COMMAND                                    0x0006
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE                           0x0007
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS                           0x0008
+#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS                                 0x0009
+#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA                        0x000A
+#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA                           0x000B
+
+#define SUNI1x10GEXP_REG_OAM_INTF_CTRL                                   0x000C
+#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS                         0x000D
+#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE                         0x000E
+#define SUNI1x10GEXP_REG_FREE                                            0x000F
+
+#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL                                  0x0010
+#define SUNI1x10GEXP_REG_XRF_MISC_CTRL                                   0x0011
+
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1                            0x0100
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2                            0x0101
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE                    0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE                   0x0103
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS                    0x0104
+#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG                         0x0107
+
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_1                                   0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_2                                   0x2041
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_3                                   0x2042
+#define SUNI1x10GEXP_REG_RXXG_INTERRUPT                                  0x2043
+#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH                           0x2045
+#define SUNI1x10GEXP_REG_RXXG_SA_15_0                                    0x2046
+#define SUNI1x10GEXP_REG_RXXG_SA_31_16                                   0x2047
+#define SUNI1x10GEXP_REG_RXXG_SA_47_32                                   0x2048
+#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD                     0x2049
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId)      (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId))
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW                     0x204A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID                     0x204B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH                    0x204C
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW                     0x204D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID                     0x204E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH                    0x204F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW                     0x2050
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID                     0x2051
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH                    0x2052
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW                     0x2053
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID                     0x2054
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH                    0x2055
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW                     0x2056
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID                     0x2057
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH                    0x2058
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW                     0x2059
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID                     0x205A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH                    0x205B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW                     0x205C
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID                     0x205D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH                    0x205E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW                     0x205F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID                     0x2060
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH                    0x2061
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0                          0x2062
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1                          0x2063
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2                          0x2064
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3                          0x2065
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4                          0x2066
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5                          0x2067
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6                          0x2068
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7                          0x2069
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW                         0x206A
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW                      0x206B
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH                     0x206C
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH                        0x206D
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0                   0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1                   0x206F
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2                   0x2070
+
+#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL                            0x2081
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0                       0x2084
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1                       0x2085
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2                       0x2086
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3                       0x2087
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE                            0x2088
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS                            0x2089
+#define SUNI1x10GEXP_REG_XRF_ERR_STATUS                                  0x208A
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE                       0x208B
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS                       0x208C
+#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES                              0x2092
+
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG                                    0x20C0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG                           0x20C1
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG                           0x20C2
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2                                  0x20C3
+#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG                                0x20C4
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES                             0x20C5
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE                          0x20C7
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS                          0x20C8
+#define SUNI1x10GEXP_REG_RXOAM_STATUS                                    0x20C9
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT                             0x20CA
+#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT                       0x20CB
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB                 0x20CC
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB                 0x20CD
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB               0x20CE
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB               0x20CF
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB               0x20D0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB               0x20D1
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB                     0x20D2
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB                     0x20D3
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB                     0x20D4
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB                     0x20D5
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB                 0x20D6
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB                 0x20D7
+
+#define SUNI1x10GEXP_REG_MSTAT_CONTROL                                   0x2100
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0                        0x2101
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1                        0x2102
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2                        0x2103
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3                        0x2104
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0                          0x2105
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1                          0x2106
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2                          0x2107
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3                          0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS                     0x2109
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW                    0x210A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE                 0x210B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH                   0x210C
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId)   (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId)   (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId)  (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW                             0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID                             0x2111
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH                            0x2112
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD                           0x2113
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW                             0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID                             0x2115
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH                            0x2116
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD                           0x2117
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW                             0x2118
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID                             0x2119
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH                            0x211A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD                           0x211B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW                             0x211C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID                             0x211D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH                            0x211E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD                           0x211F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW                             0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID                             0x2121
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH                            0x2122
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD                           0x2123
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW                             0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID                             0x2125
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH                            0x2126
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD                           0x2127
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW                             0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID                             0x2129
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH                            0x212A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD                           0x212B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW                             0x212C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID                             0x212D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH                            0x212E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD                           0x212F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW                             0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID                             0x2131
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH                            0x2132
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD                           0x2133
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW                             0x2134
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID                             0x2135
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH                            0x2136
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD                           0x2137
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW                            0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID                            0x2139
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH                           0x213A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD                          0x213B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW                            0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID                            0x213D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH                           0x213E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD                          0x213F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW                            0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID                            0x2141
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH                           0x2142
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD                          0x2143
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW                            0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID                            0x2145
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH                           0x2146
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD                          0x2147
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW                            0x2148
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID                            0x2149
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH                           0x214A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD                          0x214B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW                            0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID                            0x214D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH                           0x214E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD                          0x214F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW                            0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID                            0x2151
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH                           0x2152
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD                          0x2153
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW                            0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID                            0x2155
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH                           0x2156
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD                          0x2157
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW                            0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID                            0x2159
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH                           0x215A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD                          0x215B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW                            0x215C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID                            0x215D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH                           0x215E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD                          0x215F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW                            0x2160
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID                            0x2161
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH                           0x2162
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD                          0x2163
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW                            0x2164
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID                            0x2165
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH                           0x2166
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD                          0x2167
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW                            0x2168
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID                            0x2169
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH                           0x216A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD                          0x216B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW                            0x216C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID                            0x216D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH                           0x216E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD                          0x216F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW                            0x2170
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID                            0x2171
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH                           0x2172
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD                          0x2173
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW                            0x2174
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID                            0x2175
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH                           0x2176
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD                          0x2177
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW                            0x2178
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID                            0x2179
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH                           0x217a
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD                          0x217b
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW                            0x217c
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID                            0x217d
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH                           0x217e
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD                          0x217f
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW                            0x2180
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID                            0x2181
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH                           0x2182
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD                          0x2183
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW                            0x2184
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID                            0x2185
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH                           0x2186
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD                          0x2187
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW                            0x2188
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID                            0x2189
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH                           0x218A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD                          0x218B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW                            0x218C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID                            0x218D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH                           0x218E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD                          0x218F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW                            0x2190
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID                            0x2191
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH                           0x2192
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD                          0x2193
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW                            0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID                            0x2195
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH                           0x2196
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD                          0x2197
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW                            0x2198
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID                            0x2199
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH                           0x219A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD                          0x219B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW                            0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID                            0x219D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH                           0x219E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD                          0x219F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW                            0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID                            0x21A1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH                           0x21A2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD                          0x21A3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW                            0x21A4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID                            0x21A5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH                           0x21A6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD                          0x21A7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW                            0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID                            0x21A9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH                           0x21AA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD                          0x21AB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW                            0x21AC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID                            0x21AD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH                           0x21AE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD                          0x21AF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW                            0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID                            0x21B1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH                           0x21B2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD                          0x21B3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW                            0x21B4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID                            0x21B5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH                           0x21B6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD                          0x21B7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW                            0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID                            0x21B9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH                           0x21BA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD                          0x21BB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW                            0x21BC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID                            0x21BD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH                           0x21BE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD                          0x21BF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW                            0x21C0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID                            0x21C1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH                           0x21C2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD                          0x21C3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW                            0x21C4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID                            0x21C5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH                           0x21C6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD                          0x21C7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW                            0x21C8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID                            0x21C9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH                           0x21CA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD                          0x21CB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW                            0x21CC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID                            0x21CD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH                           0x21CE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD                          0x21CF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW                            0x21D0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID                            0x21D1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH                           0x21D2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD                          0x21D3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW                            0x21D4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID                            0x21D5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH                           0x21D6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD                          0x21D7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW                            0x21D8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID                            0x21D9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH                           0x21DA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD                          0x21DB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW                            0x21DC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID                            0x21DD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH                           0x21DE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD                          0x21DF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW                            0x21E0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID                            0x21E1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH                           0x21E2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD                          0x21E3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW                            0x21E4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID                            0x21E5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH                           0x21E6
+#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM                               51
+
+#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG                              0x2200
+#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION                          0x2201
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE                       0x2209
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT                    0x220A
+#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS                      0x220D
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION     0x220E
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT              0x220F
+#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT        0x2210
+#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT      0x2211
+
+#define SUNI1x10GEXP_REG_PL4MOS_CONFIG                                   0x2240
+#define SUNI1x10GEXP_REG_PL4MOS_MASK                                     0x2241
+#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING                         0x2242
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1                                0x2243
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2                                0x2244
+#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE                            0x2245
+
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG                                   0x2280
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK                           0x2282
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT                                0x2283
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T                             0x2284
+
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS                        0x2300
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE                        0x2301
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK                          0x2302
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS                        0x2303
+#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS                      0x2304
+#define SUNI1x10GEXP_REG_PL4IO_CONFIG                                    0x2305
+
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_1                                   0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_2                                   0x3041
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_3                                   0x3042
+#define SUNI1x10GEXP_REG_TXXG_INTERRUPT                                  0x3043
+#define SUNI1x10GEXP_REG_TXXG_STATUS                                     0x3044
+#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE                             0x3045
+#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE                             0x3046
+#define SUNI1x10GEXP_REG_TXXG_SA_15_0                                    0x3047
+#define SUNI1x10GEXP_REG_TXXG_SA_31_16                                   0x3048
+#define SUNI1x10GEXP_REG_TXXG_SA_47_32                                   0x3049
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER                                0x304D
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL                       0x304E
+#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER                       0x3051
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG                       0x3052
+
+#define SUNI1x10GEXP_REG_XTEF_CTRL                                       0x3080
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS                           0x3084
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE                           0x3085
+#define SUNI1x10GEXP_REG_XTEF_VISIBILITY                                 0x3086
+
+#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG                                0x30C0
+#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG                          0x30C1
+#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG                      0x30C2
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES                        0x30C3
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES                        0x30C4
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES                        0x30C5
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE                          0x30C6
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS                          0x30C7
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB                          0x30C8
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB                          0x30C9
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB                        0x30CA
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB                        0x30CB
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK                            0x30CC
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK                            0x30CD
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK                            0x30CE
+#define SUNI1x10GEXP_REG_TXOAM_COSET                                     0x30CF
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB                 0x30D0
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB                 0x30D1
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB               0x30D2
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB               0x30D3
+
+
+#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG                              0x3200
+#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS                         0x3201
+#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS                      0x3202
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT                       0x3203
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT                      0x3204
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT    0x3205
+#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT  0x3206
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD           0x3207
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE                 0x320C
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION             0x320D
+#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION                          0x3210
+
+#define SUNI1x10GEXP_REG_PL4IDU_CONFIG                                   0x3280
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK                           0x3282
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT                                0x3283
+
+
+/*----------------------------------------*/
+#define SUNI1x10GEXP_REG_MAX_OFFSET                                      0x3480
+
+/******************************************************************************/
+/*                 -- End register offset definitions --                      */
+/******************************************************************************/
+
+/******************************************************************************/
+/** SUNI-1x10GE-XP REGISTER BIT MASKS                                        **/
+/******************************************************************************/
+
+#define SUNI1x10GEXP_BITMSK_BITS_1   0x00001
+#define SUNI1x10GEXP_BITMSK_BITS_2   0x00003
+#define SUNI1x10GEXP_BITMSK_BITS_3   0x00007
+#define SUNI1x10GEXP_BITMSK_BITS_4   0x0000f
+#define SUNI1x10GEXP_BITMSK_BITS_5   0x0001f
+#define SUNI1x10GEXP_BITMSK_BITS_6   0x0003f
+#define SUNI1x10GEXP_BITMSK_BITS_7   0x0007f
+#define SUNI1x10GEXP_BITMSK_BITS_8   0x000ff
+#define SUNI1x10GEXP_BITMSK_BITS_9   0x001ff
+#define SUNI1x10GEXP_BITMSK_BITS_10  0x003ff
+#define SUNI1x10GEXP_BITMSK_BITS_11  0x007ff
+#define SUNI1x10GEXP_BITMSK_BITS_12  0x00fff
+#define SUNI1x10GEXP_BITMSK_BITS_13  0x01fff
+#define SUNI1x10GEXP_BITMSK_BITS_14  0x03fff
+#define SUNI1x10GEXP_BITMSK_BITS_15  0x07fff
+#define SUNI1x10GEXP_BITMSK_BITS_16  0x0ffff
+
+#define mSUNI1x10GEXP_CLR_MSBITS_1(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_15)
+#define mSUNI1x10GEXP_CLR_MSBITS_2(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_14)
+#define mSUNI1x10GEXP_CLR_MSBITS_3(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_13)
+#define mSUNI1x10GEXP_CLR_MSBITS_4(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_12)
+#define mSUNI1x10GEXP_CLR_MSBITS_5(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_11)
+#define mSUNI1x10GEXP_CLR_MSBITS_6(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_10)
+#define mSUNI1x10GEXP_CLR_MSBITS_7(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_9)
+#define mSUNI1x10GEXP_CLR_MSBITS_8(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_8)
+#define mSUNI1x10GEXP_CLR_MSBITS_9(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_7)
+#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6)
+#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5)
+#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4)
+#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3)
+#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2)
+#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1)
+
+#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0)
+
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x0001: S/UNI-1x10GE-XP Product Revision
+ *    Bit 3-0  REVISION
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_REVISION  0x000F
+
+/*----------------------------------------------------------------------------
+ * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control
+ *    Bit 2  XAUI_ARESETB
+ *    Bit 1  PL4_ARESETB
+ *    Bit 0  DRESETB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XAUI_ARESET  0x0004
+#define SUNI1x10GEXP_BITMSK_PL4_ARESET   0x0002
+#define SUNI1x10GEXP_BITMSK_DRESETB      0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control
+ *    Bit 11  PL4IO_OUTCLKSEL
+ *    Bit 9   SYSPCSLB
+ *    Bit 8   LINEPCSLB
+ *    Bit 7   MSTAT_BYPASS
+ *    Bit 6   RXXG_BYPASS
+ *    Bit 5   TXXG_BYPASS
+ *    Bit 4   SOP_PAD_EN
+ *    Bit 1   LOS_INV
+ *    Bit 0   OVERRIDE_LOS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL  0x0800
+#define SUNI1x10GEXP_BITMSK_SYSPCSLB         0x0200
+#define SUNI1x10GEXP_BITMSK_LINEPCSLB        0x0100
+#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS     0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS      0x0040
+#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS      0x0020
+#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN       0x0010
+#define SUNI1x10GEXP_BITMSK_LOS_INV          0x0002
+#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS     0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0004: S/UNI-1x10GE-XP Device Status
+ *    Bit 9 TOP_SXRA_EXPIRED
+ *    Bit 8 TOP_MDIO_BUSY
+ *    Bit 7 TOP_DTRB
+ *    Bit 6 TOP_EXPIRED
+ *    Bit 5 TOP_PAUSED
+ *    Bit 4 TOP_PL4_ID_DOOL
+ *    Bit 3 TOP_PL4_IS_DOOL
+ *    Bit 2 TOP_PL4_ID_ROOL
+ *    Bit 1 TOP_PL4_IS_ROOL
+ *    Bit 0 TOP_PL4_OUT_ROOL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED  0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY     0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_DTRB          0x0080
+#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED       0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PAUSED        0x0020
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL   0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL   0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL   0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL   0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0005: Global Performance Update and Clock Monitors
+ *    Bit 15 TIP
+ *    Bit 8  XAUI_REF_CLKA
+ *    Bit 7  RXLANE3CLKA
+ *    Bit 6  RXLANE2CLKA
+ *    Bit 5  RXLANE1CLKA
+ *    Bit 4  RXLANE0CLKA
+ *    Bit 3  CSUCLKA
+ *    Bit 2  TDCLKA
+ *    Bit 1  RSCLKA
+ *    Bit 0  RDCLKA
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TIP            0x8000
+#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA  0x0100
+#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA    0x0080
+#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA    0x0040
+#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA    0x0020
+#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA    0x0010
+#define SUNI1x10GEXP_BITMSK_CSUCLKA        0x0008
+#define SUNI1x10GEXP_BITMSK_TDCLKA         0x0004
+#define SUNI1x10GEXP_BITMSK_RSCLKA         0x0002
+#define SUNI1x10GEXP_BITMSK_RDCLKA         0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0006: MDIO Command
+ *    Bit 4 MDIO_RDINC
+ *    Bit 3 MDIO_RSTAT
+ *    Bit 2 MDIO_LCTLD
+ *    Bit 1 MDIO_LCTLA
+ *    Bit 0 MDIO_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_RDINC  0x0010
+#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT  0x0008
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD  0x0004
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA  0x0002
+#define SUNI1x10GEXP_BITMSK_MDIO_SPRE   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0007: MDIO Interrupt Enable
+ *    Bit 0 MDIO_BUSY_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0008: MDIO Interrupt Status
+ *    Bit 0 MDIO_BUSYI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0009: MMD PHY Address
+ *    Bit 12-8 MDIO_DEVADR
+ *    Bit 4-0 MDIO_PRTADR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR  0x1F00
+#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR  8
+#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR  0x001F
+#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x000C: OAM Interface Control
+ *    Bit 6 MDO_OD_ENB
+ *    Bit 5 MDI_INV
+ *    Bit 4 MDI_SEL
+ *    Bit 3 RXOAMEN
+ *    Bit 2 RXOAMCLKEN
+ *    Bit 1 TXOAMEN
+ *    Bit 0 TXOAMCLKEN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB  0x0040
+#define SUNI1x10GEXP_BITMSK_MDI_INV     0x0020
+#define SUNI1x10GEXP_BITMSK_MDI_SEL     0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAMEN     0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN  0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAMEN     0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status
+ *    Bit 15 TOP_PL4IO_INT
+ *    Bit 14 TOP_IRAM_INT
+ *    Bit 13 TOP_ERAM_INT
+ *    Bit 12 TOP_XAUI_INT
+ *    Bit 11 TOP_MSTAT_INT
+ *    Bit 10 TOP_RXXG_INT
+ *    Bit 9 TOP_TXXG_INT
+ *    Bit 8 TOP_XRF_INT
+ *    Bit 7 TOP_XTEF_INT
+ *    Bit 6 TOP_MDIO_BUSY_INT
+ *    Bit 5 TOP_RXOAM_INT
+ *    Bit 4 TOP_TXOAM_INT
+ *    Bit 3 TOP_IFLX_INT
+ *    Bit 2 TOP_EFLX_INT
+ *    Bit 1 TOP_PL4ODP_INT
+ *    Bit 0 TOP_PL4IDU_INT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT      0x8000
+#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT       0x4000
+#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT       0x2000
+#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT       0x1000
+#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT      0x0800
+#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT       0x0400
+#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT       0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT        0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT       0x0080
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT  0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT      0x0020
+#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT      0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT       0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT       0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT     0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT     0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000E:PM3393 Global interrupt enable
+ *    Bit 15 TOP_INTE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_INTE  0x8000
+
+/*----------------------------------------------------------------------------
+ * Register 0x0010: XTEF Miscellaneous Control
+ *    Bit 7 RF_VAL
+ *    Bit 6 RF_OVERRIDE
+ *    Bit 5 LF_VAL
+ *    Bit 4 LF_OVERRIDE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RF_VAL             0x0080
+#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE        0x0040
+#define SUNI1x10GEXP_BITMSK_LF_VAL             0x0020
+#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE        0x0010
+#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL  0x00F0
+
+/*----------------------------------------------------------------------------
+ * Register 0x0011: XRF Miscellaneous Control
+ *    Bit 6-4 EN_IDLE_REP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP  0x0070
+
+/*----------------------------------------------------------------------------
+ * Register 0x0100: SERDES 3125 Configuration Register 1
+ *    Bit 10 RXEQB_3
+ *    Bit 8  RXEQB_2
+ *    Bit 6  RXEQB_1
+ *    Bit 4  RXEQB_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXEQB    0x0FF0
+#define SUNI1x10GEXP_BITOFF_RXEQB_3  10
+#define SUNI1x10GEXP_BITOFF_RXEQB_2  8
+#define SUNI1x10GEXP_BITOFF_RXEQB_1  6
+#define SUNI1x10GEXP_BITOFF_RXEQB_0  4
+
+/*----------------------------------------------------------------------------
+ * Register 0x0101: SERDES 3125 Configuration Register 2
+ *    Bit 12 YSEL
+ *    Bit  7 PRE_EMPH_3
+ *    Bit  6 PRE_EMPH_2
+ *    Bit  5 PRE_EMPH_1
+ *    Bit  4 PRE_EMPH_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_YSEL        0x1000
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH    0x00F0
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3  0x0080
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2  0x0040
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1  0x0020
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0  0x0010
+
+/*----------------------------------------------------------------------------
+ * Register 0x0102: SERDES 3125 Interrupt Enable Register
+ *    Bit 3 LASIE
+ *    Bit 2 SPLL_RAE
+ *    Bit 1 MPLL_RAE
+ *    Bit 0 PLL_LOCKE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIE      0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAE   0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAE   0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKE  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0103: SERDES 3125 Interrupt Visibility Register
+ *    Bit 3 LASIV
+ *    Bit 2 SPLL_RAV
+ *    Bit 1 MPLL_RAV
+ *    Bit 0 PLL_LOCKV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIV      0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAV   0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAV   0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKV  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0104: SERDES 3125 Interrupt Status Register
+ *    Bit 3 LASII
+ *    Bit 2 SPLL_RAI
+ *    Bit 1 MPLL_RAI
+ *    Bit 0 PLL_LOCKI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASII      0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAI   0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAI   0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0107: SERDES 3125 Test Configuration
+ *    Bit 12 DUALTX
+ *    Bit 10 HC_1
+ *    Bit  9 HC_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_DUALTX  0x1000
+#define SUNI1x10GEXP_BITMSK_HC      0x0600
+#define SUNI1x10GEXP_BITOFF_HC_0    9
+
+/*----------------------------------------------------------------------------
+ * Register 0x2040: RXXG Configuration 1
+ *    Bit 15  RXXG_RXEN
+ *    Bit 14  RXXG_ROCF
+ *    Bit 13  RXXG_PAD_STRIP
+ *    Bit 10  RXXG_PUREP
+ *    Bit 9   RXXG_LONGP
+ *    Bit 8   RXXG_PARF
+ *    Bit 7   RXXG_FLCHK
+ *    Bit 5   RXXG_PASS_CTRL
+ *    Bit 3   RXXG_CRC_STRIP
+ *    Bit 2-0 RXXG_MIFG
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_RXEN       0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_ROCF       0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP  0x2000
+#define SUNI1x10GEXP_BITMSK_RXXG_PUREP      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_LONGP      0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_PARF       0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK      0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL  0x0020
+#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP  0x0008
+
+/*----------------------------------------------------------------------------
+ * Register 0x02041: RXXG Configuration 2
+ *    Bit 7-0 RXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE  0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x2042: RXXG Configuration 3
+ *    Bit 15 RXXG_MIN_LERRE
+ *    Bit 14 RXXG_MAX_LERRE
+ *    Bit 12 RXXG_LINE_ERRE
+ *    Bit 10 RXXG_RX_OVRE
+ *    Bit 9  RXXG_ADR_FILTERE
+ *    Bit 8  RXXG_ERR_FILTERE
+ *    Bit 5  RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE     0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE     0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE     0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE       0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE   0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE  0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE     0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2043: RXXG Interrupt
+ *    Bit 15 RXXG_MIN_LERRI
+ *    Bit 14 RXXG_MAX_LERRI
+ *    Bit 12 RXXG_LINE_ERRI
+ *    Bit 10 RXXG_RX_OVRI
+ *    Bit 9  RXXG_ADR_FILTERI
+ *    Bit 8  RXXG_ERR_FILTERI
+ *    Bit 5  RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI    0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI    0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI    0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI  0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI  0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE    0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2049: RXXG Receive FIFO Threshold
+ *    Bit 2-0 RXXG_CUT_THRU
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU  0x0007
+#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2062H - 0x2069: RXXG Exact Match VID
+ *    Bit 11-0 RXXG_VID_MATCH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH  0x0FFF
+#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x206EH - 0x206F: RXXG Address Filter Control
+ *    Bit 3 RXXG_FORWARD_ENABLE
+ *    Bit 2 RXXG_VLAN_ENABLE
+ *    Bit 1 RXXG_SRC_ADDR
+ *    Bit 0 RXXG_MATCH_ENABLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE  0x0008
+#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE     0x0004
+#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR        0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE    0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2070: RXXG Address Filter Control 2
+ *    Bit 1 RXXG_PMODE
+ *    Bit 0 RXXG_MHASH_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_PMODE     0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2081: XRF Control Register 2
+ *    Bit 6   EN_PKT_GEN
+ *    Bit 4-2 PATT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN  0x0040
+#define SUNI1x10GEXP_BITMSK_PATT        0x001C
+#define SUNI1x10GEXP_BITOFF_PATT        2
+
+/*----------------------------------------------------------------------------
+ * Register 0x2088: XRF Interrupt Enable
+ *    Bit 12-9 LANE_HICERE
+ *    Bit 8-5  HS_SD_LANEE
+ *    Bit 4    ALIGN_STATUS_ERRE
+ *    Bit 3-0  LANE_SYNC_STAT_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERE          0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERE          9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE          0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE          5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE    0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2089: XRF Interrupt Status
+ *    Bit 12-9 LANE_HICERI
+ *    Bit 8-5  HS_SD_LANEI
+ *    Bit 4    ALIGN_STATUS_ERRI
+ *    Bit 3-0  LANE_SYNC_STAT_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERI          0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERI          9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI          0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI          5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI    0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208A: XRF Error Status
+ *    Bit 8-5  HS_SD_LANE
+ *    Bit 4    ALIGN_STATUS_ERR
+ *    Bit 3-0  LANE_SYNC_STAT_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3          0x0100
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2          0x0080
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1          0x0040
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0          0x0020
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR     0x0010
+#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR  0x0008
+#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR  0x0004
+#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR  0x0002
+#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x208B: XRF Diagnostic Interrupt Enable
+ *    Bit 7-4 LANE_OVERRUNE
+ *    Bit 3-0 LANE_UNDERRUNE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE   0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE   4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208C: XRF Diagnostic Interrupt Status
+ *    Bit 7-4 LANE_OVERRUNI
+ *    Bit 3-0 LANE_UNDERRUNI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI   0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI   4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C0: RXOAM Configuration
+ *    Bit 15    RXOAM_BUSY
+ *    Bit 14-12 RXOAM_F2_SEL
+ *    Bit 10-8  RXOAM_F1_SEL
+ *    Bit 7-6   RXOAM_FILTER_CTRL
+ *    Bit 5-0   RXOAM_PX_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY         0x8000
+#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL       0x7000
+#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL       12
+#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL       0x0700
+#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL       8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL  0x00C0
+#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL  6
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN        0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN        0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C1,0x20C2: RXOAM Filter Configuration
+ *    Bit 15-8 RXOAM_FX_MASK
+ *    Bit 7-0  RXOAM_FX_VAL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK  0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK  8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL   0x00FF
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl   0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C3: RXOAM Configuration Register 2
+ *    Bit 13    RXOAM_REC_BYTE_VAL
+ *    Bit 11-10 RXOAM_BYPASS_MODE
+ *    Bit 5-0   RXOAM_PX_CLEAR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL  0x2000
+#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE   0x0C00
+#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE   10
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR      0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR      0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C4: RXOAM HEC Configuration
+ *    Bit 15-8 RXOAM_COSET
+ *    Bit 2    RXOAM_HEC_ERR_PKT
+ *    Bit 0    RXOAM_HEC_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_COSET        0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_COSET        8
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT  0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN       0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C7: RXOAM Interrupt Enable
+ *    Bit 10 RXOAM_FILTER_THRSHE
+ *    Bit 9  RXOAM_OAM_ERRE
+ *    Bit 8  RXOAM_HECE_THRSHE
+ *    Bit 7  RXOAM_SOPE
+ *    Bit 6  RXOAM_RFE
+ *    Bit 5  RXOAM_LFE
+ *    Bit 4  RXOAM_DV_ERRE
+ *    Bit 3  RXOAM_DATA_INVALIDE
+ *    Bit 2  RXOAM_FILTER_DROPE
+ *    Bit 1  RXOAM_HECE
+ *    Bit 0  RXOAM_OFLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE  0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE       0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE    0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE           0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFE            0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFE            0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE        0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE  0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE   0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE           0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE           0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C8: RXOAM Interrupt Status
+ *    Bit 10 RXOAM_FILTER_THRSHI
+ *    Bit 9  RXOAM_OAM_ERRI
+ *    Bit 8  RXOAM_HECE_THRSHI
+ *    Bit 7  RXOAM_SOPI
+ *    Bit 6  RXOAM_RFI
+ *    Bit 5  RXOAM_LFI
+ *    Bit 4  RXOAM_DV_ERRI
+ *    Bit 3  RXOAM_DATA_INVALIDI
+ *    Bit 2  RXOAM_FILTER_DROPI
+ *    Bit 1  RXOAM_HECI
+ *    Bit 0  RXOAM_OFLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI  0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI       0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI    0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI           0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFI            0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFI            0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI        0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI  0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI   0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECI           0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI           0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C9: RXOAM Status
+ *    Bit 10 RXOAM_FILTER_THRSHV
+ *    Bit 8  RXOAM_HECE_THRSHV
+ *    Bit 6  RXOAM_RFV
+ *    Bit 5  RXOAM_LFV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV  0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV    0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFV            0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFV            0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2100: MSTAT Control
+ *    Bit 2 MSTAT_WRITE
+ *    Bit 1 MSTAT_CLEAR
+ *    Bit 0 MSTAT_SNAP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE  0x0004
+#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR  0x0002
+#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2109: MSTAT Counter Write Address
+ *    Bit 5-0 MSTAT_WRITE_ADDRESS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F
+#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2200: IFLX Global Configuration Register
+ *    Bit 15   IFLX_IRCU_ENABLE
+ *    Bit 14   IFLX_IDSWT_ENABLE
+ *    Bit 13-0 IFLX_IFD_CNT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE   0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE  0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT       0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT       0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2209: IFLX FIFO Overflow Enable
+ *    Bit 0 IFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220A: IFLX FIFO Overflow Interrupt
+ *    Bit 0 IFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220D: IFLX Indirect Channel Address
+ *    Bit 15 IFLX_BUSY
+ *    Bit 14 IFLX_RWB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_BUSY  0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_RWB   0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision
+ *    Bit 9-0 IFLX_LOLIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM  0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x220F: IFLX Indirect Logical FIFO High Limit
+ *    Bit 9-0 IFLX_HILIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_HILIM  0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_HILIM  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit
+ *    Bit 15   IFLX_FULL
+ *    Bit 14   IFLX_AFULL
+ *    Bit 13-0 IFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_FULL   0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFULL  0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFTH   0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AFTH   0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit
+ *    Bit 15   IFLX_EMPTY
+ *    Bit 14   IFLX_AEMPTY
+ *    Bit 13-0 IFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY   0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY  0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AETH    0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AETH    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2240: PL4MOS Configuration Register
+ *    Bit 3 PL4MOS_RE_INIT
+ *    Bit 2 PL4MOS_EN
+ *    Bit 1 PL4MOS_NO_STATUS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT          0x0008
+#define SUNI1x10GEXP_BITMSK_PL4MOS_EN               0x0004
+#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS        0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2243: PL4MOS MaxBurst1 Register
+ *    Bit 11-0 PL4MOS_MAX_BURST1
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1  0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2244: PL4MOS MaxBurst2 Register
+ *    Bit 11-0 PL4MOS_MAX_BURST2
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2  0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2245: PL4MOS Transfer Size Register
+ *    Bit 7-0 PL4MOS_MAX_TRANSFER
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER  0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2280: PL4ODP Configuration
+ *    Bit 15-12 PL4ODP_REPEAT_T
+ *    Bit 8     PL4ODP_SOP_RULE
+ *    Bit 1     PL4ODP_EN_PORTS
+ *    Bit 0     PL4ODP_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T   0xF000
+#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T   12
+#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS   0x0002
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD    0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2282: PL4ODP Interrupt Mask
+ *    Bit 0 PL4ODP_OUT_DISE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE     0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE  0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE  0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE    0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE    0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE      0x0002
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x2283: PL4ODP Interrupt
+ *    Bit 0 PL4ODP_OUT_DISI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI     0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI  0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI  0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI    0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI    0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI      0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2300:  PL4IO Lock Detect Status
+ *    Bit 15 PL4IO_OUT_ROOLV
+ *    Bit 12 PL4IO_IS_ROOLV
+ *    Bit 11 PL4IO_DIP2_ERRV
+ *    Bit 8  PL4IO_ID_ROOLV
+ *    Bit 4  PL4IO_IS_DOOLV
+ *    Bit 0  PL4IO_ID_DOOLV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV   0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV  0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV   0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2301:  PL4IO Lock Detect Change
+ *    Bit 15 PL4IO_OUT_ROOLI
+ *    Bit 12 PL4IO_IS_ROOLI
+ *    Bit 11 PL4IO_DIP2_ERRI
+ *    Bit 8  PL4IO_ID_ROOLI
+ *    Bit 4  PL4IO_IS_DOOLI
+ *    Bit 0  PL4IO_ID_DOOLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI   0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI  0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI   0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2302:  PL4IO Lock Detect Mask
+ *    Bit 15 PL4IO_OUT_ROOLE
+ *    Bit 12 PL4IO_IS_ROOLE
+ *    Bit 11 PL4IO_DIP2_ERRE
+ *    Bit 8  PL4IO_ID_ROOLE
+ *    Bit 4  PL4IO_IS_DOOLE
+ *    Bit 0  PL4IO_ID_DOOLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE   0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE  0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE   0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2303:  PL4IO Lock Detect Limits
+ *    Bit 15-8 PL4IO_REF_LIMIT
+ *    Bit 7-0  PL4IO_TRAN_LIMIT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT   0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT   8
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT  0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2304:  PL4IO Calendar Repetitions
+ *    Bit 15-8 PL4IO_IN_MUL
+ *    Bit 7-0  PL4IO_OUT_MUL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL   0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL   8
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL  0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2305:  PL4IO Configuration
+ *    Bit 15  PL4IO_DIP2_ERR_CHK
+ *    Bit 11  PL4IO_ODAT_DIS
+ *    Bit 10  PL4IO_TRAIN_DIS
+ *    Bit 9   PL4IO_OSTAT_DIS
+ *    Bit 8   PL4IO_ISTAT_DIS
+ *    Bit 7   PL4IO_NO_ISTAT
+ *    Bit 6   PL4IO_STAT_OUTSEL
+ *    Bit 5   PL4IO_INSEL
+ *    Bit 4   PL4IO_DLSEL
+ *    Bit 1-0 PL4IO_OUTSEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS      0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS     0x0400
+#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS     0x0200
+#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS     0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT      0x0080
+#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL   0x0040
+#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL         0x0020
+#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL         0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL        0x0003
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL        0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3040: TXXG Configuration Register 1
+ *    Bit 15   TXXG_TXEN0
+ *    Bit 13   TXXG_HOSTPAUSE
+ *    Bit 12-7 TXXG_IPGT
+ *    Bit 5    TXXG_32BIT_ALIGN
+ *    Bit 4    TXXG_CRCEN
+ *    Bit 3    TXXG_FCTX
+ *    Bit 2    TXXG_FCRX
+ *    Bit 1    TXXG_PADEN
+ *    Bit 0    TXXG_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0        0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE    0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_IPGT         0x1F80
+#define SUNI1x10GEXP_BITOFF_TXXG_IPGT         7
+#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN  0x0020
+#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN        0x0010
+#define SUNI1x10GEXP_BITMSK_TXXG_FCTX         0x0008
+#define SUNI1x10GEXP_BITMSK_TXXG_FCRX         0x0004
+#define SUNI1x10GEXP_BITMSK_TXXG_PADEN        0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_SPRE         0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3041: TXXG Configuration Register 2
+ *    Bit 7-0   TXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE  0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3042: TXXG Configuration Register 3
+ *    Bit 15 TXXG_FIFO_ERRE
+ *    Bit 14 TXXG_FIFO_UDRE
+ *    Bit 13 TXXG_MAX_LERRE
+ *    Bit 12 TXXG_MIN_LERRE
+ *    Bit 11 TXXG_XFERE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE  0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE  0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE  0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE  0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERE      0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3043: TXXG Interrupt
+ *    Bit 15 TXXG_FIFO_ERRI
+ *    Bit 14 TXXG_FIFO_UDRI
+ *    Bit 13 TXXG_MAX_LERRI
+ *    Bit 12 TXXG_MIN_LERRI
+ *    Bit 11 TXXG_XFERI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI  0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI  0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI  0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI  0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERI      0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3044: TXXG Status Register
+ *    Bit 1 TXXG_TXACTIVE
+ *    Bit 0 TXXG_PAUSED
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE  0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED    0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3046: TXXG TX_MINFR -  Transmit Min Frame Size Register
+ *    Bit 7-0 TXXG_TX_MINFR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR  0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3052: TXXG Pause Quantum Value Configuration Register
+ *    Bit 7-0 TXXG_FC_PAUSE_QNTM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM  0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3080: XTEF Control
+ *    Bit 3-0 XTEF_FORCE_PARITY_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR  0x000F
+#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3084: XTEF Interrupt Event Register
+ *    Bit 0 XTEF_LOST_SYNCI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3085: XTEF Interrupt Enable Register
+ *    Bit 0 XTEF_LOST_SYNCE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3086: XTEF Visibility Register
+ *    Bit 0 XTEF_LOST_SYNCV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C0: TXOAM OAM Configuration
+ *    Bit 15   TXOAM_HEC_EN
+ *    Bit 14   TXOAM_EMPTYCODE_EN
+ *    Bit 13   TXOAM_FORCE_IDLE
+ *    Bit 12   TXOAM_IGNORE_IDLE
+ *    Bit 11-6 TXOAM_PX_OVERWRITE
+ *    Bit 5-0  TXOAM_PX_SEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN        0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN  0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE    0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE   0x1000
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE  0x0FC0
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE  6
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL        0x003F
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL        0
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C1: TXOAM Mini-Packet Rate Configuration
+ *    Bit 15   TXOAM_MINIDIS
+ *    Bit 14   TXOAM_BUSY
+ *    Bit 13   TXOAM_TRANS_EN
+ *    Bit 10-0 TXOAM_MINIRATE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS   0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY      0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN  0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE  0x07FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration
+ *    Bit 13-10 TXOAM_FTHRESH
+ *    Bit 9-6   TXOAM_MINIPOST
+ *    Bit 5-0   TXOAM_MINIPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH   0x3C00
+#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH   10
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST  0x03C0
+#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST  6
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE   0x003F
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C6: TXOAM Interrupt Enable
+ *    Bit 2 TXOAM_SOP_ERRE
+ *    Bit 1 TXOAM_OFLE
+ *    Bit 0 TXOAM_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE    0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE        0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE        0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C7: TXOAM Interrupt Status
+ *    Bit 2 TXOAM_SOP_ERRI
+ *    Bit 1 TXOAM_OFLI
+ *    Bit 0 TXOAM_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI    0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI        0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI        0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30CF: TXOAM Coset
+ *    Bit 7-0 TXOAM_COSET
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_COSET  0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3200: EFLX Global Configuration
+ *    Bit 15 EFLX_ERCU_EN
+ *    Bit 7  EFLX_EN_EDSWT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT  0x0080
+
+/*----------------------------------------------------------------------------
+ * Register 0x3201: EFLX ERCU Global Status
+ *    Bit 13 EFLX_OVF_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR  0x2000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3202: EFLX Indirect Channel Address
+ *    Bit 15 EFLX_BUSY
+ *    Bit 14 EFLX_RDWRB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_BUSY   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB  0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3203: EFLX Indirect Logical FIFO Low Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM                    0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM                    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3204: EFLX Indirect Logical FIFO High Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_HILIM                    0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_HILIM                    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit
+ *    Bit 15   EFLX_FULL
+ *    Bit 14   EFLX_AFULL
+ *    Bit 13-0 EFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_FULL   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFULL  0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFTH   0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AFTH   0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit
+ *    Bit 15   EFLX_EMPTY
+ *    Bit 14   EFLX_AEMPTY
+ *    Bit 13-0 EFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY  0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AETH    0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AETH    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU                 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU                 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x320C: EFLX FIFO Overflow Error Enable
+ *    Bit 0 EFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFE  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x320D: EFLX FIFO Overflow Error Indication
+ *    Bit 0 EFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3210: EFLX Channel Provision
+ *    Bit 0 EFLX_PROV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_PROV  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3280: PL4IDU Configuration
+ *    Bit 2 PL4IDU_SYNCH_ON_TRAIN
+ *    Bit 1 PL4IDU_EN_PORTS
+ *    Bit 0 PL4IDU_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN  0x0004
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS        0x0002
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD         0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3282: PL4IDU Interrupt Mask
+ *    Bit 1 PL4IDU_DIP4E
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E       0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x3283: PL4IDU Interrupt
+ *    Bit 1 PL4IDU_DIP4I
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I       0x0002
+
+#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
+
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
new file mode 100644
index 0000000..b146aca
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
@@ -0,0 +1,171 @@
+/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
+#include "common.h"
+#include "regs.h"
+#include "tp.h"
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+#endif
+
+struct petp {
+	adapter_t *adapter;
+};
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT  1
+
+static void tp_init(adapter_t * ap, const struct tp_params *p,
+		    unsigned int tp_clk)
+{
+	u32 val;
+
+	if (!t1_is_asic(ap))
+		return;
+
+	val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+		F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+	if (!p->pm_size)
+		val |= F_OFFLOAD_DISABLE;
+	else
+		val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
+	writel(val, ap->regs + A_TP_IN_CONFIG);
+	writel(F_TP_OUT_CSPI_CPL |
+	       F_TP_OUT_ESPI_ETHERNET |
+	       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+	       F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
+	writel(V_IP_TTL(64) |
+	       F_PATH_MTU /* IP DF bit */  |
+	       V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
+	       V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
+	/*
+	 * Enable pause frame deadlock prevention.
+	 */
+	if (is_T2(ap) && ap->params.nports > 1) {
+		u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+		writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+		       V_DROP_TICKS_CNT(drop_ticks) |
+		       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+		       ap->regs + A_TP_TX_DROP_CONFIG);
+	}
+}
+
+void t1_tp_destroy(struct petp *tp)
+{
+	kfree(tp);
+}
+
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p)
+{
+	struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+
+	if (!tp)
+		return NULL;
+
+	tp->adapter = adapter;
+
+	return tp;
+}
+
+void t1_tp_intr_enable(struct petp *tp)
+{
+	u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(tp->adapter)) {
+		/* FPGA */
+		writel(0xffffffff,
+		       tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+		writel(tp_intr | FPGA_PCIX_INTERRUPT_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	} else
+#endif
+	{
+		/* We don't use any TP interrupts */
+		writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+		writel(tp_intr | F_PL_INTR_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	}
+}
+
+void t1_tp_intr_disable(struct petp *tp)
+{
+	u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(tp->adapter)) {
+		/* FPGA */
+		writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+		writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	} else
+#endif
+	{
+		writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+		writel(tp_intr & ~F_PL_INTR_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	}
+}
+
+void t1_tp_intr_clear(struct petp *tp)
+{
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(tp->adapter)) {
+		writel(0xffffffff,
+		       tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+		writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE);
+		return;
+	}
+#endif
+	writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE);
+	writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE);
+}
+
+int t1_tp_intr_handler(struct petp *tp)
+{
+	u32 cause;
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	/* FPGA doesn't support TP interrupts. */
+	if (!t1_is_asic(tp->adapter))
+		return 1;
+#endif
+
+	cause = readl(tp->adapter->regs + A_TP_INT_CAUSE);
+	writel(cause, tp->adapter->regs + A_TP_INT_CAUSE);
+	return 0;
+}
+
+static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
+{
+	u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+
+	if (enable)
+		val |= csum_bit;
+	else
+		val &= ~csum_bit;
+	writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
+{
+	set_csum_offload(tp, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
+{
+	set_csum_offload(tp, F_TCP_CSUM, enable);
+}
+
+/*
+ * Initialize TP state.  tp_params contains initial settings for some TP
+ * parameters, particularly the one-time PM and CM settings.
+ */
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
+{
+	adapter_t *adapter = tp->adapter;
+
+	tp_init(adapter, p, tp_clk);
+	writel(F_TP_RESET, adapter->regs +  A_TP_RESET);
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
new file mode 100644
index 0000000..dfd8ce2
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -0,0 +1,72 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
+#ifndef CHELSIO_TP_H
+#define CHELSIO_TP_H
+
+#include "common.h"
+
+#define TP_MAX_RX_COALESCING_SIZE 16224U
+
+struct tp_mib_statistics {
+
+	/* IP */
+	u32 ipInReceive_hi;
+	u32 ipInReceive_lo;
+	u32 ipInHdrErrors_hi;
+	u32 ipInHdrErrors_lo;
+	u32 ipInAddrErrors_hi;
+	u32 ipInAddrErrors_lo;
+	u32 ipInUnknownProtos_hi;
+	u32 ipInUnknownProtos_lo;
+	u32 ipInDiscards_hi;
+	u32 ipInDiscards_lo;
+	u32 ipInDelivers_hi;
+	u32 ipInDelivers_lo;
+	u32 ipOutRequests_hi;
+	u32 ipOutRequests_lo;
+	u32 ipOutDiscards_hi;
+	u32 ipOutDiscards_lo;
+	u32 ipOutNoRoutes_hi;
+	u32 ipOutNoRoutes_lo;
+	u32 ipReasmTimeout;
+	u32 ipReasmReqds;
+	u32 ipReasmOKs;
+	u32 ipReasmFails;
+
+	u32 reserved[8];
+
+	/* TCP */
+	u32 tcpActiveOpens;
+	u32 tcpPassiveOpens;
+	u32 tcpAttemptFails;
+	u32 tcpEstabResets;
+	u32 tcpOutRsts;
+	u32 tcpCurrEstab;
+	u32 tcpInSegs_hi;
+	u32 tcpInSegs_lo;
+	u32 tcpOutSegs_hi;
+	u32 tcpOutSegs_lo;
+	u32 tcpRetransSeg_hi;
+	u32 tcpRetransSeg_lo;
+	u32 tcpInErrs_hi;
+	u32 tcpInErrs_lo;
+	u32 tcpRtoMin;
+	u32 tcpRtoMax;
+};
+
+struct petp;
+struct tp_params;
+
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
+void t1_tp_destroy(struct petp *tp);
+
+void t1_tp_intr_disable(struct petp *tp);
+void t1_tp_intr_enable(struct petp *tp);
+void t1_tp_intr_clear(struct petp *tp);
+int t1_tp_intr_handler(struct petp *tp);
+
+void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
+int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
new file mode 100644
index 0000000..b0cb388
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -0,0 +1,730 @@
+/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */
+
+/* Driver for Vitesse VSC7326 (Schaumburg) MAC */
+
+#include "gmac.h"
+#include "elmer0.h"
+#include "vsc7326_reg.h"
+
+/* Update fast changing statistics every 15 seconds */
+#define STATS_TICK_SECS 15
+/* 30 minutes for full statistics update */
+#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
+
+#define MAX_MTU 9600
+
+/* The egress WM value 0x01a01fff should be used only when the
+ * interface is down (MAC port disabled). This is a workaround
+ * for disabling the T2/MAC flow-control. When the interface is
+ * enabled, the WM value should be set to 0x014a03F0.
+ */
+#define WM_DISABLE	0x01a01fff
+#define WM_ENABLE	0x014a03F0
+
+struct init_table {
+	u32 addr;
+	u32 data;
+};
+
+struct _cmac_instance {
+	u32 index;
+	u32 ticks;
+};
+
+#define INITBLOCK_SLEEP	0xffffffff
+
+static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
+{
+	u32 status, vlo, vhi;
+	int i;
+
+	spin_lock_bh(&adapter->mac_lock);
+	t1_tpi_read(adapter, (addr << 2) + 4, &vlo);
+	i = 0;
+	do {
+		t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+		t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+		status = (vhi << 16) | vlo;
+		i++;
+	} while (((status & 1) == 0) && (i < 50));
+	if (i == 50)
+		pr_err("Invalid tpi read from MAC, breaking loop.\n");
+
+	t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
+	t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
+
+	*val = (vhi << 16) | vlo;
+
+	/* pr_err("rd: block: 0x%x  sublock: 0x%x  reg: 0x%x  data: 0x%x\n",
+		((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+		((addr&0x01fe)>>1), *val); */
+	spin_unlock_bh(&adapter->mac_lock);
+}
+
+static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
+{
+	spin_lock_bh(&adapter->mac_lock);
+	t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
+	t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
+	/* pr_err("wr: block: 0x%x  sublock: 0x%x  reg: 0x%x  data: 0x%x\n",
+		((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+		((addr&0x01fe)>>1), data); */
+	spin_unlock_bh(&adapter->mac_lock);
+}
+
+/* Hard reset the MAC.  This wipes out *all* configuration. */
+static void vsc7326_full_reset(adapter_t* adapter)
+{
+	u32 val;
+	u32 result = 0xffff;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~1;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(2);
+	val |= 0x1;	/* Enable mac MAC itself */
+	val |= 0x800;	/* Turn off the red LED */
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	mdelay(1);
+	vsc_write(adapter, REG_SW_RESET, 0x80000001);
+	do {
+		mdelay(1);
+		vsc_read(adapter, REG_SW_RESET, &result);
+	} while (result != 0x0);
+}
+
+static struct init_table vsc7326_reset[] = {
+	{      REG_IFACE_MODE, 0x00000000 },
+	{         REG_CRC_CFG, 0x00000020 },
+	{   REG_PLL_CLK_SPEED, 0x00050c00 },
+	{   REG_PLL_CLK_SPEED, 0x00050c00 },
+	{            REG_MSCH, 0x00002f14 },
+	{       REG_SPI4_MISC, 0x00040409 },
+	{     REG_SPI4_DESKEW, 0x00080000 },
+	{ REG_SPI4_ING_SETUP2, 0x08080004 },
+	{ REG_SPI4_ING_SETUP0, 0x04111004 },
+	{ REG_SPI4_EGR_SETUP0, 0x80001a04 },
+	{ REG_SPI4_ING_SETUP1, 0x02010000 },
+	{      REG_AGE_INC(0), 0x00000000 },
+	{      REG_AGE_INC(1), 0x00000000 },
+	{     REG_ING_CONTROL, 0x0a200011 },
+	{     REG_EGR_CONTROL, 0xa0010091 },
+};
+
+static struct init_table vsc7326_portinit[4][22] = {
+	{	/* Port 0 */
+			/* FIFO setup */
+		{           REG_DBG(0), 0x000004f0 },
+		{           REG_HDX(0), 0x00073101 },
+		{        REG_TEST(0,0), 0x00000022 },
+		{        REG_TEST(1,0), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,0), 0x003f0000 },
+		{  REG_TOP_BOTTOM(1,0), 0x00120000 },
+		{ REG_HIGH_LOW_WM(0,0), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,0), WM_DISABLE },
+		{   REG_CT_THRHLD(0,0), 0x00000000 },
+		{   REG_CT_THRHLD(1,0), 0x00000000 },
+		{         REG_BUCKE(0), 0x0002ffff },
+		{         REG_BUCKI(0), 0x0002ffff },
+		{        REG_TEST(0,0), 0x00000020 },
+		{        REG_TEST(1,0), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(0), 0x00002710 },
+		{     REG_PORT_FAIL(0), 0x00000002 },
+		{    REG_NORMALIZER(0), 0x00000a64 },
+		{        REG_DENORM(0), 0x00000010 },
+		{     REG_STICK_BIT(0), 0x03baa370 },
+		{     REG_DEV_SETUP(0), 0x00000083 },
+		{     REG_DEV_SETUP(0), 0x00000082 },
+		{      REG_MODE_CFG(0), 0x0200259f },
+	},
+	{	/* Port 1 */
+			/* FIFO setup */
+		{           REG_DBG(1), 0x000004f0 },
+		{           REG_HDX(1), 0x00073101 },
+		{        REG_TEST(0,1), 0x00000022 },
+		{        REG_TEST(1,1), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,1), 0x007e003f },
+		{  REG_TOP_BOTTOM(1,1), 0x00240012 },
+		{ REG_HIGH_LOW_WM(0,1), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,1), WM_DISABLE },
+		{   REG_CT_THRHLD(0,1), 0x00000000 },
+		{   REG_CT_THRHLD(1,1), 0x00000000 },
+		{         REG_BUCKE(1), 0x0002ffff },
+		{         REG_BUCKI(1), 0x0002ffff },
+		{        REG_TEST(0,1), 0x00000020 },
+		{        REG_TEST(1,1), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(1), 0x00002710 },
+		{     REG_PORT_FAIL(1), 0x00000002 },
+		{    REG_NORMALIZER(1), 0x00000a64 },
+		{        REG_DENORM(1), 0x00000010 },
+		{     REG_STICK_BIT(1), 0x03baa370 },
+		{     REG_DEV_SETUP(1), 0x00000083 },
+		{     REG_DEV_SETUP(1), 0x00000082 },
+		{      REG_MODE_CFG(1), 0x0200259f },
+	},
+	{	/* Port 2 */
+			/* FIFO setup */
+		{           REG_DBG(2), 0x000004f0 },
+		{           REG_HDX(2), 0x00073101 },
+		{        REG_TEST(0,2), 0x00000022 },
+		{        REG_TEST(1,2), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,2), 0x00bd007e },
+		{  REG_TOP_BOTTOM(1,2), 0x00360024 },
+		{ REG_HIGH_LOW_WM(0,2), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,2), WM_DISABLE },
+		{   REG_CT_THRHLD(0,2), 0x00000000 },
+		{   REG_CT_THRHLD(1,2), 0x00000000 },
+		{         REG_BUCKE(2), 0x0002ffff },
+		{         REG_BUCKI(2), 0x0002ffff },
+		{        REG_TEST(0,2), 0x00000020 },
+		{        REG_TEST(1,2), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(2), 0x00002710 },
+		{     REG_PORT_FAIL(2), 0x00000002 },
+		{    REG_NORMALIZER(2), 0x00000a64 },
+		{        REG_DENORM(2), 0x00000010 },
+		{     REG_STICK_BIT(2), 0x03baa370 },
+		{     REG_DEV_SETUP(2), 0x00000083 },
+		{     REG_DEV_SETUP(2), 0x00000082 },
+		{      REG_MODE_CFG(2), 0x0200259f },
+	},
+	{	/* Port 3 */
+			/* FIFO setup */
+		{           REG_DBG(3), 0x000004f0 },
+		{           REG_HDX(3), 0x00073101 },
+		{        REG_TEST(0,3), 0x00000022 },
+		{        REG_TEST(1,3), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,3), 0x00fc00bd },
+		{  REG_TOP_BOTTOM(1,3), 0x00480036 },
+		{ REG_HIGH_LOW_WM(0,3), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,3), WM_DISABLE },
+		{   REG_CT_THRHLD(0,3), 0x00000000 },
+		{   REG_CT_THRHLD(1,3), 0x00000000 },
+		{         REG_BUCKE(3), 0x0002ffff },
+		{         REG_BUCKI(3), 0x0002ffff },
+		{        REG_TEST(0,3), 0x00000020 },
+		{        REG_TEST(1,3), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(3), 0x00002710 },
+		{     REG_PORT_FAIL(3), 0x00000002 },
+		{    REG_NORMALIZER(3), 0x00000a64 },
+		{        REG_DENORM(3), 0x00000010 },
+		{     REG_STICK_BIT(3), 0x03baa370 },
+		{     REG_DEV_SETUP(3), 0x00000083 },
+		{     REG_DEV_SETUP(3), 0x00000082 },
+		{      REG_MODE_CFG(3), 0x0200259f },
+	},
+};
+
+static void run_table(adapter_t *adapter, struct init_table *ib, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (ib[i].addr == INITBLOCK_SLEEP) {
+			udelay( ib[i].data );
+			pr_err("sleep %d us\n",ib[i].data);
+		} else
+			vsc_write( adapter, ib[i].addr, ib[i].data );
+	}
+}
+
+static int bist_rd(adapter_t *adapter, int moduleid, int address)
+{
+	int data = 0;
+	u32 result = 0;
+
+	if ((address != 0x0) &&
+	    (address != 0x1) &&
+	    (address != 0x2) &&
+	    (address != 0xd) &&
+	    (address != 0xe))
+			pr_err("No bist address: 0x%x\n", address);
+
+	data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
+		((moduleid & 0xff) << 0));
+	vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+	udelay(10);
+
+	vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
+	if ((result & (1 << 9)) != 0x0)
+		pr_err("Still in bist read: 0x%x\n", result);
+	else if ((result & (1 << 8)) != 0x0)
+		pr_err("bist read error: 0x%x\n", result);
+
+	return result & 0xff;
+}
+
+static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
+{
+	int data = 0;
+	u32 result = 0;
+
+	if ((address != 0x0) &&
+	    (address != 0x1) &&
+	    (address != 0x2) &&
+	    (address != 0xd) &&
+	    (address != 0xe))
+			pr_err("No bist address: 0x%x\n", address);
+
+	if (value > 255)
+		pr_err("Suspicious write out of range value: 0x%x\n", value);
+
+	data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
+		((moduleid & 0xff) << 0));
+	vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+	udelay(5);
+
+	vsc_read(adapter, REG_RAM_BIST_CMD, &result);
+	if ((result & (1 << 27)) != 0x0)
+		pr_err("Still in bist write: 0x%x\n", result);
+	else if ((result & (1 << 26)) != 0x0)
+		pr_err("bist write error: 0x%x\n", result);
+
+	return 0;
+}
+
+static int run_bist(adapter_t *adapter, int moduleid)
+{
+	/*run bist*/
+	(void) bist_wr(adapter,moduleid, 0x00, 0x02);
+	(void) bist_wr(adapter,moduleid, 0x01, 0x01);
+
+	return 0;
+}
+
+static int check_bist(adapter_t *adapter, int moduleid)
+{
+	int result=0;
+	int column=0;
+	/*check bist*/
+	result = bist_rd(adapter,moduleid, 0x02);
+	column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
+			(bist_rd(adapter,moduleid, 0x0d)));
+	if ((result & 3) != 0x3)
+		pr_err("Result: 0x%x  BIST error in ram %d, column: 0x%04x\n",
+			result, moduleid, column);
+	return 0;
+}
+
+static int enable_mem(adapter_t *adapter, int moduleid)
+{
+	/*enable mem*/
+	(void) bist_wr(adapter,moduleid, 0x00, 0x00);
+	return 0;
+}
+
+static int run_bist_all(adapter_t *adapter)
+{
+	int port = 0;
+	u32 val = 0;
+
+	vsc_write(adapter, REG_MEM_BIST, 0x5);
+	vsc_read(adapter, REG_MEM_BIST, &val);
+
+	for (port = 0; port < 12; port++)
+		vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
+
+	udelay(300);
+	vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
+	udelay(300);
+
+	(void) run_bist(adapter,13);
+	(void) run_bist(adapter,14);
+	(void) run_bist(adapter,20);
+	(void) run_bist(adapter,21);
+	mdelay(200);
+	(void) check_bist(adapter,13);
+	(void) check_bist(adapter,14);
+	(void) check_bist(adapter,20);
+	(void) check_bist(adapter,21);
+	udelay(100);
+	(void) enable_mem(adapter,13);
+	(void) enable_mem(adapter,14);
+	(void) enable_mem(adapter,20);
+	(void) enable_mem(adapter,21);
+	udelay(300);
+	vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
+	udelay(300);
+	for (port = 0; port < 12; port++)
+		vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
+
+	udelay(300);
+	vsc_write(adapter, REG_MEM_BIST, 0x0);
+	mdelay(10);
+	return 0;
+}
+
+static int mac_intr_handler(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_intr_enable(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_intr_disable(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_intr_clear(struct cmac *mac)
+{
+	return 0;
+}
+
+/* Expect MAC address to be in network byte order. */
+static int mac_set_address(struct cmac* mac, u8 addr[6])
+{
+	u32 val;
+	int port = mac->instance->index;
+
+	vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port),
+		  (addr[3] << 16) | (addr[4] << 8) | addr[5]);
+	vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port),
+		  (addr[0] << 16) | (addr[1] << 8) | addr[2]);
+
+	vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val);
+	val &= ~0xf0000000;
+	vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28));
+
+	vsc_write(mac->adapter, REG_ING_FFILT_MASK0,
+		  0xffff0000 | (addr[4] << 8) | addr[5]);
+	vsc_write(mac->adapter, REG_ING_FFILT_MASK1,
+		  0xffff0000 | (addr[2] << 8) | addr[3]);
+	vsc_write(mac->adapter, REG_ING_FFILT_MASK2,
+		  0xffff0000 | (addr[0] << 8) | addr[1]);
+	return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+	u32 addr_lo, addr_hi;
+	int port = mac->instance->index;
+
+	vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo);
+	vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi);
+
+	addr[0] = (u8) (addr_hi >> 16);
+	addr[1] = (u8) (addr_hi >> 8);
+	addr[2] = (u8) addr_hi;
+	addr[3] = (u8) (addr_lo >> 16);
+	addr[4] = (u8) (addr_lo >> 8);
+	addr[5] = (u8) addr_lo;
+	return 0;
+}
+
+/* This is intended to reset a port, not the whole MAC */
+static int mac_reset(struct cmac *mac)
+{
+	int index = mac->instance->index;
+
+	run_table(mac->adapter, vsc7326_portinit[index],
+		  ARRAY_SIZE(vsc7326_portinit[index]));
+
+	return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+	u32 v;
+	int port = mac->instance->index;
+
+	vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v);
+	v |= 1 << 12;
+
+	if (t1_rx_mode_promisc(rm))
+		v &= ~(1 << (port + 16));
+	else
+		v |= 1 << (port + 16);
+
+	vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v);
+	return 0;
+}
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+	int port = mac->instance->index;
+
+	if (mtu > MAX_MTU)
+		return -EINVAL;
+
+	/* max_len includes header and FCS */
+	vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
+	return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+				   int fc)
+{
+	u32 v;
+	int enable, port = mac->instance->index;
+
+	if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 &&
+	    speed != SPEED_1000)
+		return -1;
+	if (duplex > 0 && duplex != DUPLEX_FULL)
+		return -1;
+
+	if (speed >= 0) {
+		vsc_read(mac->adapter, REG_MODE_CFG(port), &v);
+		enable = v & 3;             /* save tx/rx enables */
+		v &= ~0xf;
+		v |= 4;                     /* full duplex */
+		if (speed == SPEED_1000)
+			v |= 8;             /* GigE */
+		enable |= v;
+		vsc_write(mac->adapter, REG_MODE_CFG(port), v);
+
+		if (speed == SPEED_1000)
+			v = 0x82;
+		else if (speed == SPEED_100)
+			v = 0x84;
+		else	/* SPEED_10 */
+			v = 0x86;
+		vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */
+		vsc_write(mac->adapter, REG_DEV_SETUP(port), v);
+		vsc_read(mac->adapter, REG_DBG(port), &v);
+		v &= ~0xff00;
+		if (speed == SPEED_1000)
+			v |= 0x400;
+		else if (speed == SPEED_100)
+			v |= 0x2000;
+		else	/* SPEED_10 */
+			v |= 0xff00;
+		vsc_write(mac->adapter, REG_DBG(port), v);
+
+		vsc_write(mac->adapter, REG_TX_IFG(port),
+			  speed == SPEED_1000 ? 5 : 0x11);
+		if (duplex == DUPLEX_HALF)
+			enable = 0x0;	/* 100 or 10 */
+		else if (speed == SPEED_1000)
+			enable = 0xc;
+		else	/* SPEED_100 or 10 */
+			enable = 0x4;
+		enable |= 0x9 << 10;	/* IFG1 */
+		enable |= 0x6 << 6;	/* IFG2 */
+		enable |= 0x1 << 4;	/* VLAN */
+		enable |= 0x3;		/* RX/TX EN */
+		vsc_write(mac->adapter, REG_MODE_CFG(port), enable);
+
+	}
+
+	vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v);
+	v &= 0xfff0ffff;
+	v |= 0x20000;      /* xon/xoff */
+	if (fc & PAUSE_RX)
+		v |= 0x40000;
+	if (fc & PAUSE_TX)
+		v |= 0x80000;
+	if (fc == (PAUSE_RX | PAUSE_TX))
+		v |= 0x10000;
+	vsc_write(mac->adapter, REG_PAUSE_CFG(port), v);
+	return 0;
+}
+
+static int mac_enable(struct cmac *mac, int which)
+{
+	u32 val;
+	int port = mac->instance->index;
+
+	/* Write the correct WM value when the port is enabled. */
+	vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE);
+
+	vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+	if (which & MAC_DIRECTION_RX)
+		val |= 0x2;
+	if (which & MAC_DIRECTION_TX)
+		val |= 1;
+	vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+	return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+	u32 val;
+	int i, port = mac->instance->index;
+
+	/* Reset the port, this also writes the correct WM value */
+	mac_reset(mac);
+
+	vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+	if (which & MAC_DIRECTION_RX)
+		val &= ~0x2;
+	if (which & MAC_DIRECTION_TX)
+		val &= ~0x1;
+	vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+	vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+
+	/* Clear stats */
+	for (i = 0; i <= 0x3a; ++i)
+		vsc_write(mac->adapter, CRA(4, port, i), 0);
+
+	/* Clear software counters */
+	memset(&mac->stats, 0, sizeof(struct cmac_statistics));
+
+	return 0;
+}
+
+static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
+{
+	u32 v, lo;
+
+	vsc_read(mac->adapter, addr, &v);
+	lo = *stat;
+	*stat = *stat - lo + v;
+
+	if (v == 0)
+		return;
+
+	if (v < lo)
+		*stat += (1ULL << 32);
+}
+
+static void port_stats_update(struct cmac *mac)
+{
+	struct {
+		unsigned int reg;
+		unsigned int offset;
+	} hw_stats[] = {
+
+#define HW_STAT(reg, stat_name) \
+	{ reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+
+		/* Rx stats */
+		HW_STAT(RxUnicast, RxUnicastFramesOK),
+		HW_STAT(RxMulticast, RxMulticastFramesOK),
+		HW_STAT(RxBroadcast, RxBroadcastFramesOK),
+		HW_STAT(Crc, RxFCSErrors),
+		HW_STAT(RxAlignment, RxAlignErrors),
+		HW_STAT(RxOversize, RxFrameTooLongErrors),
+		HW_STAT(RxPause, RxPauseFrames),
+		HW_STAT(RxJabbers, RxJabberErrors),
+		HW_STAT(RxFragments, RxRuntErrors),
+		HW_STAT(RxUndersize, RxRuntErrors),
+		HW_STAT(RxSymbolCarrier, RxSymbolErrors),
+		HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
+
+		/* Tx stats (skip collision stats as we are full-duplex only) */
+		HW_STAT(TxUnicast, TxUnicastFramesOK),
+		HW_STAT(TxMulticast, TxMulticastFramesOK),
+		HW_STAT(TxBroadcast, TxBroadcastFramesOK),
+		HW_STAT(TxPause, TxPauseFrames),
+		HW_STAT(TxUnderrun, TxUnderrun),
+		HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
+	}, *p = hw_stats;
+	unsigned int port = mac->instance->index;
+	u64 *stats = (u64 *)&mac->stats;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
+		rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
+
+	rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
+	rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
+	rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics.  Since the counters are only 32 bits
+ * some of them can overflow in less than a minute at GigE speeds, so this
+ * function should be called every 30 seconds or so.
+ *
+ * To cut down on reading costs we update only the octet counters at each tick
+ * and do a full update at major ticks, which can be every 30 minutes or more.
+ */
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+							   int flag)
+{
+	if (flag == MAC_STATS_UPDATE_FULL ||
+	    mac->instance->ticks >= MAJOR_UPDATE_TICKS) {
+		port_stats_update(mac);
+		mac->instance->ticks = 0;
+	} else {
+		int port = mac->instance->index;
+
+		rmon_update(mac, REG_RX_OK_BYTES(port),
+			    &mac->stats.RxOctetsOK);
+		rmon_update(mac, REG_RX_BAD_BYTES(port),
+			    &mac->stats.RxOctetsBad);
+		rmon_update(mac, REG_TX_OK_BYTES(port),
+			    &mac->stats.TxOctetsOK);
+		mac->instance->ticks++;
+	}
+	return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+	kfree(mac);
+}
+
+static struct cmac_ops vsc7326_ops = {
+	.destroy                  = mac_destroy,
+	.reset                    = mac_reset,
+	.interrupt_handler        = mac_intr_handler,
+	.interrupt_enable         = mac_intr_enable,
+	.interrupt_disable        = mac_intr_disable,
+	.interrupt_clear          = mac_intr_clear,
+	.enable                   = mac_enable,
+	.disable                  = mac_disable,
+	.set_mtu                  = mac_set_mtu,
+	.set_rx_mode              = mac_set_rx_mode,
+	.set_speed_duplex_fc      = mac_set_speed_duplex_fc,
+	.statistics_update        = mac_update_statistics,
+	.macaddress_get           = mac_get_address,
+	.macaddress_set           = mac_set_address,
+};
+
+static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *mac;
+	u32 val;
+	int i;
+
+	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!mac)
+		return NULL;
+
+	mac->ops = &vsc7326_ops;
+	mac->instance = (cmac_instance *)(mac + 1);
+	mac->adapter  = adapter;
+
+	mac->instance->index = index;
+	mac->instance->ticks = 0;
+
+	i = 0;
+	do {
+		u32 vhi, vlo;
+
+		vhi = vlo = 0;
+		t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+		udelay(1);
+		t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+		udelay(5);
+		val = (vhi << 16) | vlo;
+	} while ((++i < 10000) && (val == 0xffffffff));
+
+	return mac;
+}
+
+static int vsc7326_mac_reset(adapter_t *adapter)
+{
+	vsc7326_full_reset(adapter);
+	(void) run_bist_all(adapter);
+	run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset));
+	return 0;
+}
+
+const struct gmac t1_vsc7326_ops = {
+	.stats_update_period = STATS_TICK_SECS,
+	.create              = vsc7326_mac_create,
+	.reset               = vsc7326_mac_reset,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
new file mode 100644
index 0000000..479edbc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
@@ -0,0 +1,297 @@
+/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
+#ifndef _VSC7321_REG_H_
+#define _VSC7321_REG_H_
+
+/* Register definitions for Vitesse VSC7321 (Meigs II) MAC
+ *
+ * Straight off the data sheet, VMDS-10038 Rev 2.0 and
+ * PD0011-01-14-Meigs-II 2002-12-12
+ */
+
+/* Just 'cause it's in here doesn't mean it's used. */
+
+#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1))
+
+/* System and CPU comm's registers */
+#define REG_CHIP_ID		CRA(0x7,0xf,0x00)	/* Chip ID */
+#define REG_BLADE_ID		CRA(0x7,0xf,0x01)	/* Blade ID */
+#define REG_SW_RESET		CRA(0x7,0xf,0x02)	/* Global Soft Reset */
+#define REG_MEM_BIST		CRA(0x7,0xf,0x04)	/* mem */
+#define REG_IFACE_MODE		CRA(0x7,0xf,0x07)	/* Interface mode */
+#define REG_MSCH		CRA(0x7,0x2,0x06)	/* CRC error count */
+#define REG_CRC_CNT		CRA(0x7,0x2,0x0a)	/* CRC error count */
+#define REG_CRC_CFG		CRA(0x7,0x2,0x0b)	/* CRC config */
+#define REG_SI_TRANSFER_SEL	CRA(0x7,0xf,0x18)	/* SI Transfer Select */
+#define REG_PLL_CLK_SPEED	CRA(0x7,0xf,0x19)	/* Clock Speed Selection */
+#define REG_SYS_CLK_SELECT	CRA(0x7,0xf,0x1c)	/* System Clock Select */
+#define REG_GPIO_CTRL		CRA(0x7,0xf,0x1d)	/* GPIO Control */
+#define REG_GPIO_OUT		CRA(0x7,0xf,0x1e)	/* GPIO Out */
+#define REG_GPIO_IN		CRA(0x7,0xf,0x1f)	/* GPIO In */
+#define REG_CPU_TRANSFER_SEL	CRA(0x7,0xf,0x20)	/* CPU Transfer Select */
+#define REG_LOCAL_DATA		CRA(0x7,0xf,0xfe)	/* Local CPU Data Register */
+#define REG_LOCAL_STATUS	CRA(0x7,0xf,0xff)	/* Local CPU Status Register */
+
+/* Aggregator registers */
+#define REG_AGGR_SETUP		CRA(0x7,0x1,0x00)	/* Aggregator Setup */
+#define REG_PMAP_TABLE		CRA(0x7,0x1,0x01)	/* Port map table */
+#define REG_MPLS_BIT0		CRA(0x7,0x1,0x08)	/* MPLS bit0 position */
+#define REG_MPLS_BIT1		CRA(0x7,0x1,0x09)	/* MPLS bit1 position */
+#define REG_MPLS_BIT2		CRA(0x7,0x1,0x0a)	/* MPLS bit2 position */
+#define REG_MPLS_BIT3		CRA(0x7,0x1,0x0b)	/* MPLS bit3 position */
+#define REG_MPLS_BITMASK	CRA(0x7,0x1,0x0c)	/* MPLS bit mask */
+#define REG_PRE_BIT0POS		CRA(0x7,0x1,0x10)	/* Preamble bit0 position */
+#define REG_PRE_BIT1POS		CRA(0x7,0x1,0x11)	/* Preamble bit1 position */
+#define REG_PRE_BIT2POS		CRA(0x7,0x1,0x12)	/* Preamble bit2 position */
+#define REG_PRE_BIT3POS		CRA(0x7,0x1,0x13)	/* Preamble bit3 position */
+#define REG_PRE_ERR_CNT		CRA(0x7,0x1,0x14)	/* Preamble parity error count */
+
+/* BIST registers */
+/*#define REG_RAM_BIST_CMD	CRA(0x7,0x2,0x00)*/	/* RAM BIST Command Register */
+/*#define REG_RAM_BIST_RESULT	CRA(0x7,0x2,0x01)*/	/* RAM BIST Read Status/Result */
+#define REG_RAM_BIST_CMD	CRA(0x7,0x1,0x00)	/* RAM BIST Command Register */
+#define REG_RAM_BIST_RESULT	CRA(0x7,0x1,0x01)	/* RAM BIST Read Status/Result */
+#define   BIST_PORT_SELECT	0x00			/* BIST port select */
+#define   BIST_COMMAND		0x01			/* BIST enable/disable */
+#define   BIST_STATUS		0x02			/* BIST operation status */
+#define   BIST_ERR_CNT_LSB	0x03			/* BIST error count lo 8b */
+#define   BIST_ERR_CNT_MSB	0x04			/* BIST error count hi 8b */
+#define   BIST_ERR_SEL_LSB	0x05			/* BIST error select lo 8b */
+#define   BIST_ERR_SEL_MSB	0x06			/* BIST error select hi 8b */
+#define   BIST_ERROR_STATE	0x07			/* BIST engine internal state */
+#define   BIST_ERR_ADR0		0x08			/* BIST error address lo 8b */
+#define   BIST_ERR_ADR1		0x09			/* BIST error address lomid 8b */
+#define   BIST_ERR_ADR2		0x0a			/* BIST error address himid 8b */
+#define   BIST_ERR_ADR3		0x0b			/* BIST error address hi 8b */
+
+/* FIFO registers
+ *   ie = 0 for ingress, 1 for egress
+ *   fn = FIFO number, 0-9
+ */
+#define REG_TEST(ie,fn)		CRA(0x2,ie&1,0x00+fn)	/* Mode & Test Register */
+#define REG_TOP_BOTTOM(ie,fn)	CRA(0x2,ie&1,0x10+fn)	/* FIFO Buffer Top & Bottom */
+#define REG_TAIL(ie,fn)		CRA(0x2,ie&1,0x20+fn)	/* FIFO Write Pointer */
+#define REG_HEAD(ie,fn)		CRA(0x2,ie&1,0x30+fn)	/* FIFO Read Pointer */
+#define REG_HIGH_LOW_WM(ie,fn)	CRA(0x2,ie&1,0x40+fn)	/* Flow Control Water Marks */
+#define REG_CT_THRHLD(ie,fn)	CRA(0x2,ie&1,0x50+fn)	/* Cut Through Threshold */
+#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn)	/* Drop & CRC Error Counter */
+#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn)	/* Input Side Debug Counter */
+#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn)	/* Input Side Debug Counter */
+#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn)	/* Input Side Debug Counter */
+
+/* Traffic shaper buckets
+ *   ie = 0 for ingress, 1 for egress
+ *   bn = bucket number 0-10 (yes, 11 buckets)
+ */
+/* OK, this one's kinda ugly.  Some hardware designers are perverse. */
+#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4))
+#define REG_TRAFFIC_SHAPER_CONTROL(ie)	CRA(0x2,ie&1,0x3b)
+
+#define REG_SRAM_ADR(ie)	CRA(0x2,ie&1,0x0e)	/* FIFO SRAM address */
+#define REG_SRAM_WR_STRB(ie)	CRA(0x2,ie&1,0x1e)	/* FIFO SRAM write strobe */
+#define REG_SRAM_RD_STRB(ie)	CRA(0x2,ie&1,0x2e)	/* FIFO SRAM read strobe */
+#define REG_SRAM_DATA_0(ie)	CRA(0x2,ie&1,0x3e)	/* FIFO SRAM data lo 8b */
+#define REG_SRAM_DATA_1(ie)	CRA(0x2,ie&1,0x4e)	/* FIFO SRAM data lomid 8b */
+#define REG_SRAM_DATA_2(ie)	CRA(0x2,ie&1,0x5e)	/* FIFO SRAM data himid 8b */
+#define REG_SRAM_DATA_3(ie)	CRA(0x2,ie&1,0x6e)	/* FIFO SRAM data hi 8b */
+#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e)	/* FIFO SRAM tag */
+/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */
+#define REG_CONTROL(ie)		CRA(0x2,ie&1,0x0f)	/* FIFO control */
+#define REG_ING_CONTROL		CRA(0x2,0x0,0x0f)	/* Ingress control (alias) */
+#define REG_EGR_CONTROL		CRA(0x2,0x1,0x0f)	/* Egress control (alias) */
+#define REG_AGE_TIMER(ie)	CRA(0x2,ie&1,0x1f)	/* Aging timer */
+#define REG_AGE_INC(ie)		CRA(0x2,ie&1,0x2f)	/* Aging increment */
+#define DEBUG_OUT(ie)		CRA(0x2,ie&1,0x3f)	/* Output debug counter control */
+#define DEBUG_CNT(ie)		CRA(0x2,ie&1,0x4f)	/* Output debug counter */
+
+/* SPI4 interface */
+#define REG_SPI4_MISC		CRA(0x5,0x0,0x00)	/* Misc Register */
+#define REG_SPI4_STATUS		CRA(0x5,0x0,0x01)	/* CML Status */
+#define REG_SPI4_ING_SETUP0	CRA(0x5,0x0,0x02)	/* Ingress Status Channel Setup */
+#define REG_SPI4_ING_SETUP1	CRA(0x5,0x0,0x03)	/* Ingress Data Training Setup */
+#define REG_SPI4_ING_SETUP2	CRA(0x5,0x0,0x04)	/* Ingress Data Burst Size Setup */
+#define REG_SPI4_EGR_SETUP0	CRA(0x5,0x0,0x05)	/* Egress Status Channel Setup */
+#define REG_SPI4_DBG_CNT(n)	CRA(0x5,0x0,0x10+n)	/* Debug counters 0-9 */
+#define REG_SPI4_DBG_SETUP	CRA(0x5,0x0,0x1A)	/* Debug counters setup */
+#define REG_SPI4_TEST		CRA(0x5,0x0,0x20)	/* Test Setup Register */
+#define REG_TPGEN_UP0		CRA(0x5,0x0,0x21)	/* Test Pattern generator user pattern 0 */
+#define REG_TPGEN_UP1		CRA(0x5,0x0,0x22)	/* Test Pattern generator user pattern 1 */
+#define REG_TPCHK_UP0		CRA(0x5,0x0,0x23)	/* Test Pattern checker user pattern 0 */
+#define REG_TPCHK_UP1		CRA(0x5,0x0,0x24)	/* Test Pattern checker user pattern 1 */
+#define REG_TPSAM_P0		CRA(0x5,0x0,0x25)	/* Sampled pattern 0 */
+#define REG_TPSAM_P1		CRA(0x5,0x0,0x26)	/* Sampled pattern 1 */
+#define REG_TPERR_CNT		CRA(0x5,0x0,0x27)	/* Pattern checker error counter */
+#define REG_SPI4_STICKY		CRA(0x5,0x0,0x30)	/* Sticky bits register */
+#define REG_SPI4_DBG_INH	CRA(0x5,0x0,0x31)	/* Core egress & ingress inhibit */
+#define REG_SPI4_DBG_STATUS	CRA(0x5,0x0,0x32)	/* Sampled ingress status */
+#define REG_SPI4_DBG_GRANT	CRA(0x5,0x0,0x33)	/* Ingress cranted credit value */
+
+#define REG_SPI4_DESKEW 	CRA(0x5,0x0,0x43)	/* Ingress cranted credit value */
+
+/* 10GbE MAC Block Registers */
+/* Note that those registers that are exactly the same for 10GbE as for
+ * tri-speed are only defined with the version that needs a port number.
+ * Pass 0xa in those cases.
+ *
+ * Also note that despite the presence of a MAC address register, this part
+ * does no ingress MAC address filtering.  That register is used only for
+ * pause frame detection and generation.
+ */
+/* 10GbE specific, and different from tri-speed */
+#define REG_MISC_10G		CRA(0x1,0xa,0x00)	/* Misc 10GbE setup */
+#define REG_PAUSE_10G		CRA(0x1,0xa,0x01)	/* Pause register */
+#define REG_NORMALIZER_10G	CRA(0x1,0xa,0x05)	/* 10G normalizer */
+#define REG_STICKY_RX		CRA(0x1,0xa,0x06)	/* RX debug register */
+#define REG_DENORM_10G		CRA(0x1,0xa,0x07)	/* Denormalizer  */
+#define REG_STICKY_TX		CRA(0x1,0xa,0x08)	/* TX sticky bits */
+#define REG_MAX_RXHIGH		CRA(0x1,0xa,0x0a)	/* XGMII lane 0-3 debug */
+#define REG_MAX_RXLOW		CRA(0x1,0xa,0x0b)	/* XGMII lane 4-7 debug */
+#define REG_MAC_TX_STICKY	CRA(0x1,0xa,0x0c)	/* MAC Tx state sticky debug */
+#define REG_MAC_TX_RUNNING	CRA(0x1,0xa,0x0d)	/* MAC Tx state running debug */
+#define REG_TX_ABORT_AGE	CRA(0x1,0xa,0x14)	/* Aged Tx frames discarded */
+#define REG_TX_ABORT_SHORT	CRA(0x1,0xa,0x15)	/* Short Tx frames discarded */
+#define REG_TX_ABORT_TAXI	CRA(0x1,0xa,0x16)	/* Taxi error frames discarded */
+#define REG_TX_ABORT_UNDERRUN	CRA(0x1,0xa,0x17)	/* Tx Underrun abort counter */
+#define REG_TX_DENORM_DISCARD	CRA(0x1,0xa,0x18)	/* Tx denormalizer discards */
+#define REG_XAUI_STAT_A		CRA(0x1,0xa,0x20)	/* XAUI status A */
+#define REG_XAUI_STAT_B		CRA(0x1,0xa,0x21)	/* XAUI status B */
+#define REG_XAUI_STAT_C		CRA(0x1,0xa,0x22)	/* XAUI status C */
+#define REG_XAUI_CONF_A		CRA(0x1,0xa,0x23)	/* XAUI configuration A */
+#define REG_XAUI_CONF_B		CRA(0x1,0xa,0x24)	/* XAUI configuration B */
+#define REG_XAUI_CODE_GRP_CNT	CRA(0x1,0xa,0x25)	/* XAUI code group error count */
+#define REG_XAUI_CONF_TEST_A	CRA(0x1,0xa,0x26)	/* XAUI test register A */
+#define REG_PDERRCNT		CRA(0x1,0xa,0x27)	/* XAUI test register B */
+
+/* pn = port number 0-9 for tri-speed, 10 for 10GbE */
+/* Both tri-speed and 10GbE */
+#define REG_MAX_LEN(pn)		CRA(0x1,pn,0x02)	/* Max length */
+#define REG_MAC_HIGH_ADDR(pn)	CRA(0x1,pn,0x03)	/* Upper 24 bits of MAC addr */
+#define REG_MAC_LOW_ADDR(pn)	CRA(0x1,pn,0x04)	/* Lower 24 bits of MAC addr */
+
+/* tri-speed only
+ * pn = port number, 0-9
+ */
+#define REG_MODE_CFG(pn)	CRA(0x1,pn,0x00)	/* Mode configuration */
+#define REG_PAUSE_CFG(pn)	CRA(0x1,pn,0x01)	/* Pause configuration */
+#define REG_NORMALIZER(pn)	CRA(0x1,pn,0x05)	/* Normalizer */
+#define REG_TBI_STATUS(pn)	CRA(0x1,pn,0x06)	/* TBI status */
+#define REG_PCS_STATUS_DBG(pn)	CRA(0x1,pn,0x07)	/* PCS status debug */
+#define REG_PCS_CTRL(pn)	CRA(0x1,pn,0x08)	/* PCS control */
+#define REG_TBI_CONFIG(pn)	CRA(0x1,pn,0x09)	/* TBI configuration */
+#define REG_STICK_BIT(pn)	CRA(0x1,pn,0x0a)	/* Sticky bits */
+#define REG_DEV_SETUP(pn)	CRA(0x1,pn,0x0b)	/* MAC clock/reset setup */
+#define REG_DROP_CNT(pn)	CRA(0x1,pn,0x0c)	/* Drop counter */
+#define REG_PORT_POS(pn)	CRA(0x1,pn,0x0d)	/* Preamble port position */
+#define REG_PORT_FAIL(pn)	CRA(0x1,pn,0x0e)	/* Preamble port position */
+#define REG_SERDES_CONF(pn)	CRA(0x1,pn,0x0f)	/* SerDes configuration */
+#define REG_SERDES_TEST(pn)	CRA(0x1,pn,0x10)	/* SerDes test */
+#define REG_SERDES_STAT(pn)	CRA(0x1,pn,0x11)	/* SerDes status */
+#define REG_SERDES_COM_CNT(pn)	CRA(0x1,pn,0x12)	/* SerDes comma counter */
+#define REG_DENORM(pn)		CRA(0x1,pn,0x15)	/* Frame denormalization */
+#define REG_DBG(pn)		CRA(0x1,pn,0x16)	/* Device 1G debug */
+#define REG_TX_IFG(pn)		CRA(0x1,pn,0x18)	/* Tx IFG config */
+#define REG_HDX(pn)		CRA(0x1,pn,0x19)	/* Half-duplex config */
+
+/* Statistics */
+/* CRA(0x4,pn,reg) */
+/* reg below */
+/* pn = port number, 0-a, a = 10GbE */
+
+enum {
+	RxInBytes		= 0x00,	// # Rx in octets
+	RxSymbolCarrier		= 0x01,	// Frames w/ symbol errors
+	RxPause			= 0x02,	// # pause frames received
+	RxUnsupOpcode		= 0x03,	// # control frames with unsupported opcode
+	RxOkBytes		= 0x04,	// # octets in good frames
+	RxBadBytes		= 0x05,	// # octets in bad frames
+	RxUnicast		= 0x06,	// # good unicast frames
+	RxMulticast		= 0x07,	// # good multicast frames
+	RxBroadcast		= 0x08,	// # good broadcast frames
+	Crc			= 0x09,	// # frames w/ bad CRC only
+	RxAlignment		= 0x0a,	// # frames w/ alignment err
+	RxUndersize		= 0x0b,	// # frames undersize
+	RxFragments		= 0x0c,	// # frames undersize w/ crc err
+	RxInRangeLengthError	= 0x0d,	// # frames with length error
+	RxOutOfRangeError	= 0x0e,	// # frames with illegal length field
+	RxOversize		= 0x0f,	// # frames oversize
+	RxJabbers		= 0x10,	// # frames oversize w/ crc err
+	RxSize64		= 0x11,	// # frames 64 octets long
+	RxSize65To127		= 0x12,	// # frames 65-127 octets
+	RxSize128To255		= 0x13,	// # frames 128-255
+	RxSize256To511		= 0x14,	// # frames 256-511
+	RxSize512To1023		= 0x15,	// # frames 512-1023
+	RxSize1024To1518	= 0x16,	// # frames 1024-1518
+	RxSize1519ToMax		= 0x17,	// # frames 1519-max
+
+	TxOutBytes		= 0x18,	// # octets tx
+	TxPause			= 0x19,	// # pause frames sent
+	TxOkBytes		= 0x1a, // # octets tx OK
+	TxUnicast		= 0x1b,	// # frames unicast
+	TxMulticast		= 0x1c,	// # frames multicast
+	TxBroadcast		= 0x1d,	// # frames broadcast
+	TxMultipleColl		= 0x1e,	// # frames tx after multiple collisions
+	TxLateColl		= 0x1f,	// # late collisions detected
+	TxXcoll			= 0x20,	// # frames lost, excessive collisions
+	TxDefer			= 0x21,	// # frames deferred on first tx attempt
+	TxXdefer		= 0x22,	// # frames excessively deferred
+	TxCsense		= 0x23,	// carrier sense errors at frame end
+	TxSize64		= 0x24,	// # frames 64 octets long
+	TxSize65To127		= 0x25,	// # frames 65-127 octets
+	TxSize128To255		= 0x26,	// # frames 128-255
+	TxSize256To511		= 0x27,	// # frames 256-511
+	TxSize512To1023		= 0x28,	// # frames 512-1023
+	TxSize1024To1518	= 0x29,	// # frames 1024-1518
+	TxSize1519ToMax		= 0x2a,	// # frames 1519-max
+	TxSingleColl		= 0x2b,	// # frames tx after single collision
+	TxBackoff2		= 0x2c,	// # frames tx ok after 2 backoffs/collisions
+	TxBackoff3		= 0x2d,	//   after 3 backoffs/collisions
+	TxBackoff4		= 0x2e,	//   after 4
+	TxBackoff5		= 0x2f,	//   after 5
+	TxBackoff6		= 0x30,	//   after 6
+	TxBackoff7		= 0x31,	//   after 7
+	TxBackoff8		= 0x32,	//   after 8
+	TxBackoff9		= 0x33,	//   after 9
+	TxBackoff10		= 0x34,	//   after 10
+	TxBackoff11		= 0x35,	//   after 11
+	TxBackoff12		= 0x36,	//   after 12
+	TxBackoff13		= 0x37,	//   after 13
+	TxBackoff14		= 0x38,	//   after 14
+	TxBackoff15		= 0x39,	//   after 15
+	TxUnderrun		= 0x3a,	// # frames dropped from underrun
+	// Hole. See REG_RX_XGMII_PROT_ERR below.
+	RxIpgShrink		= 0x3c,	// # of IPG shrinks detected
+	// Duplicate. See REG_STAT_STICKY10G below.
+	StatSticky1G		= 0x3e,	// tri-speed sticky bits
+	StatInit		= 0x3f	// Clear all statistics
+};
+
+#define REG_RX_XGMII_PROT_ERR	CRA(0x4,0xa,0x3b)		/* # protocol errors detected on XGMII interface */
+#define REG_STAT_STICKY10G	CRA(0x4,0xa,StatSticky1G)	/* 10GbE sticky bits */
+
+#define REG_RX_OK_BYTES(pn)	CRA(0x4,pn,RxOkBytes)
+#define REG_RX_BAD_BYTES(pn)	CRA(0x4,pn,RxBadBytes)
+#define REG_TX_OK_BYTES(pn)	CRA(0x4,pn,TxOkBytes)
+
+/* MII-Management Block registers */
+/* These are for MII-M interface 0, which is the bidirectional LVTTL one.  If
+ * we hooked up to the one with separate directions, the middle 0x0 needs to
+ * change to 0x1.  And the current errata states that MII-M 1 doesn't work.
+ */
+
+#define REG_MIIM_STATUS		CRA(0x3,0x0,0x00)	/* MII-M Status */
+#define REG_MIIM_CMD		CRA(0x3,0x0,0x01)	/* MII-M Command */
+#define REG_MIIM_DATA		CRA(0x3,0x0,0x02)	/* MII-M Data */
+#define REG_MIIM_PRESCALE	CRA(0x3,0x0,0x03)	/* MII-M MDC Prescale */
+
+#define REG_ING_FFILT_UM_EN	CRA(0x2, 0, 0xd)
+#define REG_ING_FFILT_BE_EN	CRA(0x2, 0, 0x1d)
+#define REG_ING_FFILT_VAL0	CRA(0x2, 0, 0x2d)
+#define REG_ING_FFILT_VAL1	CRA(0x2, 0, 0x3d)
+#define REG_ING_FFILT_MASK0	CRA(0x2, 0, 0x4d)
+#define REG_ING_FFILT_MASK1	CRA(0x2, 0, 0x5d)
+#define REG_ING_FFILT_MASK2	CRA(0x2, 0, 0x6d)
+#define REG_ING_FFILT_ETYPE	CRA(0x2, 0, 0x7d)
+
+
+/* Whew. */
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/Makefile b/drivers/net/ethernet/chelsio/cxgb3/Makefile
new file mode 100644
index 0000000..29aff78
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/Makefile
@@ -0,0 +1,8 @@
+#
+# Chelsio T3 driver
+#
+
+obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
+
+cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
+	      xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
new file mode 100644
index 0000000..8b395b5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file should not be included directly.  Include common.h instead. */
+
+#ifndef __T3_ADAPTER_H__
+#define __T3_ADAPTER_H__
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include "t3cdev.h"
+#include <asm/io.h>
+
+struct adapter;
+struct sge_qset;
+struct port_info;
+
+enum mac_idx_types {
+	LAN_MAC_IDX	= 0,
+	SAN_MAC_IDX,
+
+	MAX_MAC_IDX
+};
+
+struct iscsi_config {
+	__u8	mac_addr[ETH_ALEN];
+	__u32	flags;
+	int (*send)(struct port_info *pi, struct sk_buff **skb);
+	int (*recv)(struct port_info *pi, struct sk_buff *skb);
+};
+
+struct port_info {
+	struct adapter *adapter;
+	struct sge_qset *qs;
+	u8 port_id;
+	u8 nqsets;
+	u8 first_qset;
+	struct cphy phy;
+	struct cmac mac;
+	struct link_config link_config;
+	struct net_device_stats netstats;
+	int activity;
+	__be32 iscsi_ipv4addr;
+	struct iscsi_config iscsic;
+
+	int link_fault; /* link fault was detected */
+};
+
+enum {				/* adapter flags */
+	FULL_INIT_DONE = (1 << 0),
+	USING_MSI = (1 << 1),
+	USING_MSIX = (1 << 2),
+	QUEUES_BOUND = (1 << 3),
+	TP_PARITY_INIT = (1 << 4),
+	NAPI_INIT = (1 << 5),
+};
+
+struct fl_pg_chunk {
+	struct page *page;
+	void *va;
+	unsigned int offset;
+	unsigned long *p_cnt;
+	dma_addr_t mapping;
+};
+
+struct rx_desc;
+struct rx_sw_desc;
+
+struct sge_fl {                     /* SGE per free-buffer list state */
+	unsigned int buf_size;      /* size of each Rx buffer */
+	unsigned int credits;       /* # of available Rx buffers */
+	unsigned int pend_cred;     /* new buffers since last FL DB ring */
+	unsigned int size;          /* capacity of free list */
+	unsigned int cidx;          /* consumer index */
+	unsigned int pidx;          /* producer index */
+	unsigned int gen;           /* free list generation */
+	struct fl_pg_chunk pg_chunk;/* page chunk cache */
+	unsigned int use_pages;     /* whether FL uses pages or sk_buffs */
+	unsigned int order;	    /* order of page allocations */
+	unsigned int alloc_size;    /* size of allocated buffer */
+	struct rx_desc *desc;       /* address of HW Rx descriptor ring */
+	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
+	dma_addr_t   phys_addr;     /* physical address of HW ring start */
+	unsigned int cntxt_id;      /* SGE context id for the free list */
+	unsigned long empty;        /* # of times queue ran out of buffers */
+	unsigned long alloc_failed; /* # of times buffer allocation failed */
+};
+
+/*
+ * Bundle size for grouping offload RX packets for delivery to the stack.
+ * Don't make this too big as we do prefetch on each packet in a bundle.
+ */
+# define RX_BUNDLE_SIZE 8
+
+struct rsp_desc;
+
+struct sge_rspq {		/* state for an SGE response queue */
+	unsigned int credits;	/* # of pending response credits */
+	unsigned int size;	/* capacity of response queue */
+	unsigned int cidx;	/* consumer index */
+	unsigned int gen;	/* current generation bit */
+	unsigned int polling;	/* is the queue serviced through NAPI? */
+	unsigned int holdoff_tmr;	/* interrupt holdoff timer in 100ns */
+	unsigned int next_holdoff;	/* holdoff time for next interrupt */
+	unsigned int rx_recycle_buf; /* whether recycling occurred
+					within current sop-eop */
+	struct rsp_desc *desc;	/* address of HW response ring */
+	dma_addr_t phys_addr;	/* physical address of the ring */
+	unsigned int cntxt_id;	/* SGE context id for the response q */
+	spinlock_t lock;	/* guards response processing */
+	struct sk_buff_head rx_queue; /* offload packet receive queue */
+	struct sk_buff *pg_skb; /* used to build frag list in napi handler */
+
+	unsigned long offload_pkts;
+	unsigned long offload_bundles;
+	unsigned long eth_pkts;	/* # of ethernet packets */
+	unsigned long pure_rsps;	/* # of pure (non-data) responses */
+	unsigned long imm_data;	/* responses with immediate data */
+	unsigned long rx_drops;	/* # of packets dropped due to no mem */
+	unsigned long async_notif; /* # of asynchronous notification events */
+	unsigned long empty;	/* # of times queue ran out of credits */
+	unsigned long nomem;	/* # of responses deferred due to no mem */
+	unsigned long unhandled_irqs;	/* # of spurious intrs */
+	unsigned long starved;
+	unsigned long restarted;
+};
+
+struct tx_desc;
+struct tx_sw_desc;
+
+struct sge_txq {		/* state for an SGE Tx queue */
+	unsigned long flags;	/* HW DMA fetch status */
+	unsigned int in_use;	/* # of in-use Tx descriptors */
+	unsigned int size;	/* # of descriptors */
+	unsigned int processed;	/* total # of descs HW has processed */
+	unsigned int cleaned;	/* total # of descs SW has reclaimed */
+	unsigned int stop_thres;	/* SW TX queue suspend threshold */
+	unsigned int cidx;	/* consumer index */
+	unsigned int pidx;	/* producer index */
+	unsigned int gen;	/* current value of generation bit */
+	unsigned int unacked;	/* Tx descriptors used since last COMPL */
+	struct tx_desc *desc;	/* address of HW Tx descriptor ring */
+	struct tx_sw_desc *sdesc;	/* address of SW Tx descriptor ring */
+	spinlock_t lock;	/* guards enqueueing of new packets */
+	unsigned int token;	/* WR token */
+	dma_addr_t phys_addr;	/* physical address of the ring */
+	struct sk_buff_head sendq;	/* List of backpressured offload packets */
+	struct tasklet_struct qresume_tsk;	/* restarts the queue */
+	unsigned int cntxt_id;	/* SGE context id for the Tx q */
+	unsigned long stops;	/* # of times q has been stopped */
+	unsigned long restarts;	/* # of queue restarts */
+};
+
+enum {				/* per port SGE statistics */
+	SGE_PSTAT_TSO,		/* # of TSO requests */
+	SGE_PSTAT_RX_CSUM_GOOD,	/* # of successful RX csum offloads */
+	SGE_PSTAT_TX_CSUM,	/* # of TX checksum offloads */
+	SGE_PSTAT_VLANEX,	/* # of VLAN tag extractions */
+	SGE_PSTAT_VLANINS,	/* # of VLAN tag insertions */
+
+	SGE_PSTAT_MAX		/* must be last */
+};
+
+struct napi_gro_fraginfo;
+
+struct sge_qset {		/* an SGE queue set */
+	struct adapter *adap;
+	struct napi_struct napi;
+	struct sge_rspq rspq;
+	struct sge_fl fl[SGE_RXQ_PER_SET];
+	struct sge_txq txq[SGE_TXQ_PER_SET];
+	int nomem;
+	void *lro_va;
+	struct net_device *netdev;
+	struct netdev_queue *tx_q;	/* associated netdev TX queue */
+	unsigned long txq_stopped;	/* which Tx queues are stopped */
+	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
+	struct timer_list rx_reclaim_timer;	/* reclaims RX buffers */
+	unsigned long port_stats[SGE_PSTAT_MAX];
+} ____cacheline_aligned;
+
+struct sge {
+	struct sge_qset qs[SGE_QSETS];
+	spinlock_t reg_lock;	/* guards non-atomic SGE registers (eg context) */
+};
+
+struct adapter {
+	struct t3cdev tdev;
+	struct list_head adapter_list;
+	void __iomem *regs;
+	struct pci_dev *pdev;
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+
+	const char *name;
+	int msg_enable;
+	unsigned int mmio_len;
+
+	struct adapter_params params;
+	unsigned int slow_intr_mask;
+	unsigned long irq_stats[IRQ_NUM_STATS];
+
+	int msix_nvectors;
+	struct {
+		unsigned short vec;
+		char desc[22];
+	} msix_info[SGE_QSETS + 1];
+
+	/* T3 modules */
+	struct sge sge;
+	struct mc7 pmrx;
+	struct mc7 pmtx;
+	struct mc7 cm;
+	struct mc5 mc5;
+
+	struct net_device *port[MAX_NPORTS];
+	unsigned int check_task_cnt;
+	struct delayed_work adap_check_task;
+	struct work_struct ext_intr_handler_task;
+	struct work_struct fatal_error_handler_task;
+	struct work_struct link_fault_handler_task;
+
+	struct work_struct db_full_task;
+	struct work_struct db_empty_task;
+	struct work_struct db_drop_task;
+
+	struct dentry *debugfs_root;
+
+	struct mutex mdio_lock;
+	spinlock_t stats_lock;
+	spinlock_t work_lock;
+
+	struct sk_buff *nofail_skb;
+};
+
+static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+	u32 val = readl(adapter->regs + reg_addr);
+
+	CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
+	return val;
+}
+
+static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+	CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
+	writel(val, adapter->regs + reg_addr);
+}
+
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+	return netdev_priv(adap->port[idx]);
+}
+
+static inline int phy2portid(struct cphy *phy)
+{
+	struct adapter *adap = phy->adapter;
+	struct port_info *port0 = adap2pinfo(adap, 0);
+
+	return &port0->phy == phy ? 0 : 1;
+}
+
+#define OFFLOAD_DEVMAP_BIT 15
+
+#define tdev2adap(d) container_of(d, struct adapter, tdev)
+
+static inline int offload_running(struct adapter *adapter)
+{
+	return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
+
+void t3_os_ext_intr_handler(struct adapter *adapter);
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
+			int speed, int duplex, int fc);
+void t3_os_phymod_changed(struct adapter *adap, int port_id);
+void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
+void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
+
+void t3_sge_start(struct adapter *adap);
+void t3_sge_stop(struct adapter *adap);
+void t3_start_sge_timers(struct adapter *adap);
+void t3_stop_sge_timers(struct adapter *adap);
+void t3_free_sge_resources(struct adapter *adap);
+void t3_sge_err_intr_handler(struct adapter *adapter);
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling);
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+		      int irq_vec_idx, const struct qset_params *p,
+		      int ntxq, struct net_device *dev,
+		      struct netdev_queue *netdevq);
+extern struct workqueue_struct *cxgb3_wq;
+
+int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
+
+#endif				/* __T3_ADAPTER_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
new file mode 100644
index 0000000..2028da9
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+	AEL100X_TX_CONFIG1 = 0xc002,
+	AEL1002_PWR_DOWN_HI = 0xc011,
+	AEL1002_PWR_DOWN_LO = 0xc012,
+	AEL1002_XFI_EQL = 0xc015,
+	AEL1002_LB_EN = 0xc017,
+	AEL_OPT_SETTINGS = 0xc017,
+	AEL_I2C_CTRL = 0xc30a,
+	AEL_I2C_DATA = 0xc30b,
+	AEL_I2C_STAT = 0xc30c,
+	AEL2005_GPIO_CTRL = 0xc214,
+	AEL2005_GPIO_STAT = 0xc215,
+
+	AEL2020_GPIO_INTR   = 0xc103,	/* Latch High (LH) */
+	AEL2020_GPIO_CTRL   = 0xc108,	/* Store Clear (SC) */
+	AEL2020_GPIO_STAT   = 0xc10c,	/* Read Only (RO) */
+	AEL2020_GPIO_CFG    = 0xc110,	/* Read Write (RW) */
+
+	AEL2020_GPIO_SDA    = 0,	/* IN: i2c serial data */
+	AEL2020_GPIO_MODDET = 1,	/* IN: Module Detect */
+	AEL2020_GPIO_0      = 3,	/* IN: unassigned */
+	AEL2020_GPIO_1      = 2,	/* OUT: unassigned */
+	AEL2020_GPIO_LSTAT  = AEL2020_GPIO_1, /* wired to link status LED */
+};
+
+enum { edc_none, edc_sr, edc_twinax };
+
+/* PHY module I2C device address */
+enum {
+	MODULE_DEV_ADDR	= 0xa0,
+	SFF_DEV_ADDR	= 0xa2,
+};
+
+/* PHY transceiver type */
+enum {
+	phy_transtype_unknown = 0,
+	phy_transtype_sfp     = 3,
+	phy_transtype_xfp     = 6,
+};
+
+#define AEL2005_MODDET_IRQ 4
+
+struct reg_val {
+	unsigned short mmd_addr;
+	unsigned short reg_addr;
+	unsigned short clear_bits;
+	unsigned short set_bits;
+};
+
+static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
+{
+	int err;
+
+	for (err = 0; rv->mmd_addr && !err; rv++) {
+		if (rv->clear_bits == 0xffff)
+			err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr,
+					    rv->set_bits);
+		else
+			err = t3_mdio_change_bits(phy, rv->mmd_addr,
+						  rv->reg_addr, rv->clear_bits,
+						  rv->set_bits);
+	}
+	return err;
+}
+
+static void ael100x_txon(struct cphy *phy)
+{
+	int tx_on_gpio =
+		phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+
+	msleep(100);
+	t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
+	msleep(30);
+}
+
+/*
+ * Read an 8-bit word from a device attached to the PHY's i2c bus.
+ */
+static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
+{
+	int i, err;
+	unsigned int stat, data;
+
+	err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
+			    (dev_addr << 8) | (1 << 8) | word_addr);
+	if (err)
+		return err;
+
+	for (i = 0; i < 200; i++) {
+		msleep(1);
+		err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
+		if (err)
+			return err;
+		if ((stat & 3) == 1) {
+			err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
+					   &data);
+			if (err)
+				return err;
+			return data >> 8;
+		}
+	}
+	CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
+		phy->mdio.prtad, dev_addr, word_addr);
+	return -ETIMEDOUT;
+}
+
+static int ael1002_power_down(struct cphy *phy, int enable)
+{
+	int err;
+
+	err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable);
+	if (!err)
+		err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+				    MDIO_MMD_PMAPMD, MDIO_CTRL1,
+				    MDIO_CTRL1_LPOWER, enable);
+	return err;
+}
+
+static int ael1002_reset(struct cphy *phy, int wait)
+{
+	int err;
+
+	if ((err = ael1002_power_down(phy, 0)) ||
+	    (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) ||
+	    (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) ||
+	    (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) ||
+	    (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) ||
+	    (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN,
+				       0, 1 << 5)))
+		return err;
+	return 0;
+}
+
+static int ael1002_intr_noop(struct cphy *phy)
+{
+	return 0;
+}
+
+/*
+ * Get link status for a 10GBASE-R device.
+ */
+static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
+			     int *duplex, int *fc)
+{
+	if (link_ok) {
+		unsigned int stat0, stat1, stat2;
+		int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+				       MDIO_PMA_RXDET, &stat0);
+
+		if (!err)
+			err = t3_mdio_read(phy, MDIO_MMD_PCS,
+					   MDIO_PCS_10GBRT_STAT1, &stat1);
+		if (!err)
+			err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+					   MDIO_PHYXS_LNSTAT, &stat2);
+		if (err)
+			return err;
+		*link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	return 0;
+}
+
+static struct cphy_ops ael1002_ops = {
+	.reset = ael1002_reset,
+	.intr_enable = ael1002_intr_noop,
+	.intr_disable = ael1002_intr_noop,
+	.intr_clear = ael1002_intr_noop,
+	.intr_handler = ael1002_intr_noop,
+	.get_link_status = get_link_status_r,
+	.power_down = ael1002_power_down,
+	.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
+		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+		   "10GBASE-R");
+	ael100x_txon(phy);
+	return 0;
+}
+
+static int ael1006_reset(struct cphy *phy, int wait)
+{
+	return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
+}
+
+static struct cphy_ops ael1006_ops = {
+	.reset = ael1006_reset,
+	.intr_enable = t3_phy_lasi_intr_enable,
+	.intr_disable = t3_phy_lasi_intr_disable,
+	.intr_clear = t3_phy_lasi_intr_clear,
+	.intr_handler = t3_phy_lasi_intr_handler,
+	.get_link_status = get_link_status_r,
+	.power_down = ael1002_power_down,
+	.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+			     int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
+		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+		   "10GBASE-SR");
+	ael100x_txon(phy);
+	return 0;
+}
+
+/*
+ * Decode our module type.
+ */
+static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
+{
+	int v;
+
+	if (delay_ms)
+		msleep(delay_ms);
+
+	/* see SFF-8472 for below */
+	v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
+	if (v < 0)
+		return v;
+
+	if (v == 0x10)
+		return phy_modtype_sr;
+	if (v == 0x20)
+		return phy_modtype_lr;
+	if (v == 0x40)
+		return phy_modtype_lrm;
+
+	v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
+	if (v < 0)
+		return v;
+	if (v != 4)
+		goto unknown;
+
+	v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
+	if (v < 0)
+		return v;
+
+	if (v & 0x80) {
+		v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
+		if (v < 0)
+			return v;
+		return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
+	}
+unknown:
+	return phy_modtype_unknown;
+}
+
+/*
+ * Code to support the Aeluros/NetLogic 2005 10Gb PHY.
+ */
+static int ael2005_setup_sr_edc(struct cphy *phy)
+{
+	static const struct reg_val regs[] = {
+		{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
+		{ MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
+		{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
+		{ 0, 0, 0, 0 }
+	};
+
+	int i, err;
+
+	err = set_phy_regs(phy, regs);
+	if (err)
+		return err;
+
+	msleep(50);
+
+	if (phy->priv != edc_sr)
+		err = t3_get_edc_fw(phy, EDC_OPT_AEL2005,
+				    EDC_OPT_AEL2005_SIZE);
+	if (err)
+		return err;
+
+	for (i = 0; i <  EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
+		err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+				    phy->phy_cache[i],
+				    phy->phy_cache[i + 1]);
+	if (!err)
+		phy->priv = edc_sr;
+	return err;
+}
+
+static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+	static const struct reg_val regs[] = {
+		{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
+		{ 0, 0, 0, 0 }
+	};
+	static const struct reg_val preemphasis[] = {
+		{ MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
+		{ MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
+		{ 0, 0, 0, 0 }
+	};
+	int i, err;
+
+	err = set_phy_regs(phy, regs);
+	if (!err && modtype == phy_modtype_twinax_long)
+		err = set_phy_regs(phy, preemphasis);
+	if (err)
+		return err;
+
+	msleep(50);
+
+	if (phy->priv != edc_twinax)
+		err = t3_get_edc_fw(phy, EDC_TWX_AEL2005,
+				    EDC_TWX_AEL2005_SIZE);
+	if (err)
+		return err;
+
+	for (i = 0; i <  EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
+		err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+				    phy->phy_cache[i],
+				    phy->phy_cache[i + 1]);
+	if (!err)
+		phy->priv = edc_twinax;
+	return err;
+}
+
+static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
+{
+	int v;
+	unsigned int stat;
+
+	v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat);
+	if (v)
+		return v;
+
+	if (stat & (1 << 8))			/* module absent */
+		return phy_modtype_none;
+
+	return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+static int ael2005_intr_enable(struct cphy *phy)
+{
+	int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200);
+	return err ? err : t3_phy_lasi_intr_enable(phy);
+}
+
+static int ael2005_intr_disable(struct cphy *phy)
+{
+	int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100);
+	return err ? err : t3_phy_lasi_intr_disable(phy);
+}
+
+static int ael2005_intr_clear(struct cphy *phy)
+{
+	int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00);
+	return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+static int ael2005_reset(struct cphy *phy, int wait)
+{
+	static const struct reg_val regs0[] = {
+		{ MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
+		{ MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
+		{ MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
+		{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+		{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 },
+		{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+		{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
+		{ 0, 0, 0, 0 }
+	};
+	static const struct reg_val regs1[] = {
+		{ MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
+		{ MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
+		{ 0, 0, 0, 0 }
+	};
+
+	int err;
+	unsigned int lasi_ctrl;
+
+	err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+			   &lasi_ctrl);
+	if (err)
+		return err;
+
+	err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0);
+	if (err)
+		return err;
+
+	msleep(125);
+	phy->priv = edc_none;
+	err = set_phy_regs(phy, regs0);
+	if (err)
+		return err;
+
+	msleep(50);
+
+	err = ael2005_get_module_type(phy, 0);
+	if (err < 0)
+		return err;
+	phy->modtype = err;
+
+	if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+		err = ael2005_setup_twinax_edc(phy, err);
+	else
+		err = ael2005_setup_sr_edc(phy);
+	if (err)
+		return err;
+
+	err = set_phy_regs(phy, regs1);
+	if (err)
+		return err;
+
+	/* reset wipes out interrupts, reenable them if they were on */
+	if (lasi_ctrl & 1)
+		err = ael2005_intr_enable(phy);
+	return err;
+}
+
+static int ael2005_intr_handler(struct cphy *phy)
+{
+	unsigned int stat;
+	int ret, edc_needed, cause = 0;
+
+	ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat);
+	if (ret)
+		return ret;
+
+	if (stat & AEL2005_MODDET_IRQ) {
+		ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL,
+				    0xd00);
+		if (ret)
+			return ret;
+
+		/* modules have max 300 ms init time after hot plug */
+		ret = ael2005_get_module_type(phy, 300);
+		if (ret < 0)
+			return ret;
+
+		phy->modtype = ret;
+		if (ret == phy_modtype_none)
+			edc_needed = phy->priv;       /* on unplug retain EDC */
+		else if (ret == phy_modtype_twinax ||
+			 ret == phy_modtype_twinax_long)
+			edc_needed = edc_twinax;
+		else
+			edc_needed = edc_sr;
+
+		if (edc_needed != phy->priv) {
+			ret = ael2005_reset(phy, 0);
+			return ret ? ret : cphy_cause_module_change;
+		}
+		cause = cphy_cause_module_change;
+	}
+
+	ret = t3_phy_lasi_intr_handler(phy);
+	if (ret < 0)
+		return ret;
+
+	ret |= cause;
+	return ret ? ret : cphy_cause_link_change;
+}
+
+static struct cphy_ops ael2005_ops = {
+	.reset           = ael2005_reset,
+	.intr_enable     = ael2005_intr_enable,
+	.intr_disable    = ael2005_intr_disable,
+	.intr_clear      = ael2005_intr_clear,
+	.intr_handler    = ael2005_intr_handler,
+	.get_link_status = get_link_status_r,
+	.power_down      = ael1002_power_down,
+	.mmds            = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
+		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+		  SUPPORTED_IRQ, "10GBASE-R");
+	msleep(125);
+	return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0,
+				   1 << 5);
+}
+
+/*
+ * Setup EDC and other parameters for operation with an optical module.
+ */
+static int ael2020_setup_sr_edc(struct cphy *phy)
+{
+	static const struct reg_val regs[] = {
+		/* set CDR offset to 10 */
+		{ MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
+
+		/* adjust 10G RX bias current */
+		{ MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
+		{ MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
+		{ MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
+
+		/* end */
+		{ 0, 0, 0, 0 }
+	};
+	int err;
+
+	err = set_phy_regs(phy, regs);
+	msleep(50);
+	if (err)
+		return err;
+
+	phy->priv = edc_sr;
+	return 0;
+}
+
+/*
+ * Setup EDC and other parameters for operation with an TWINAX module.
+ */
+static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+	/* set uC to 40MHz */
+	static const struct reg_val uCclock40MHz[] = {
+		{ MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
+		{ MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
+		{ 0, 0, 0, 0 }
+	};
+
+	/* activate uC clock */
+	static const struct reg_val uCclockActivate[] = {
+		{ MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
+		{ 0, 0, 0, 0 }
+	};
+
+	/* set PC to start of SRAM and activate uC */
+	static const struct reg_val uCactivate[] = {
+		{ MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
+		{ MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
+		{ 0, 0, 0, 0 }
+	};
+	int i, err;
+
+	/* set uC clock and activate it */
+	err = set_phy_regs(phy, uCclock40MHz);
+	msleep(500);
+	if (err)
+		return err;
+	err = set_phy_regs(phy, uCclockActivate);
+	msleep(500);
+	if (err)
+		return err;
+
+	if (phy->priv != edc_twinax)
+		err = t3_get_edc_fw(phy, EDC_TWX_AEL2020,
+				    EDC_TWX_AEL2020_SIZE);
+	if (err)
+		return err;
+
+	for (i = 0; i <  EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2)
+		err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+				    phy->phy_cache[i],
+				    phy->phy_cache[i + 1]);
+	/* activate uC */
+	err = set_phy_regs(phy, uCactivate);
+	if (!err)
+		phy->priv = edc_twinax;
+	return err;
+}
+
+/*
+ * Return Module Type.
+ */
+static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
+{
+	int v;
+	unsigned int stat;
+
+	v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
+	if (v)
+		return v;
+
+	if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
+		/* module absent */
+		return phy_modtype_none;
+	}
+
+	return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+/*
+ * Enable PHY interrupts.  We enable "Module Detection" interrupts (on any
+ * state transition) and then generic Link Alarm Status Interrupt (LASI).
+ */
+static int ael2020_intr_enable(struct cphy *phy)
+{
+	static const struct reg_val regs[] = {
+		/* output Module's Loss Of Signal (LOS) to LED */
+		{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
+			0xffff, 0x4 },
+		{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+			0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
+
+		 /* enable module detect status change interrupts */
+		{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+			0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) },
+
+		/* end */
+		{ 0, 0, 0, 0 }
+	};
+	int err, link_ok = 0;
+
+	/* set up "link status" LED and enable module change interrupts */
+	err = set_phy_regs(phy, regs);
+	if (err)
+		return err;
+
+	err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL);
+	if (err)
+		return err;
+	if (link_ok)
+		t3_link_changed(phy->adapter,
+				phy2portid(phy));
+
+	err = t3_phy_lasi_intr_enable(phy);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/*
+ * Disable PHY interrupts.  The mirror of the above ...
+ */
+static int ael2020_intr_disable(struct cphy *phy)
+{
+	static const struct reg_val regs[] = {
+		/* reset "link status" LED to "off" */
+		{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+			0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
+
+		/* disable module detect status change interrupts */
+		{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+			0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) },
+
+		/* end */
+		{ 0, 0, 0, 0 }
+	};
+	int err;
+
+	/* turn off "link status" LED and disable module change interrupts */
+	err = set_phy_regs(phy, regs);
+	if (err)
+		return err;
+
+	return t3_phy_lasi_intr_disable(phy);
+}
+
+/*
+ * Clear PHY interrupt state.
+ */
+static int ael2020_intr_clear(struct cphy *phy)
+{
+	/*
+	 * The GPIO Interrupt register on the AEL2020 is a "Latching High"
+	 * (LH) register which is cleared to the current state when it's read.
+	 * Thus, we simply read the register and discard the result.
+	 */
+	unsigned int stat;
+	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+	return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+static const struct reg_val ael2020_reset_regs[] = {
+	/* Erratum #2: CDRLOL asserted, causing PMA link down status */
+	{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
+
+	/* force XAUI to send LF when RX_LOS is asserted */
+	{ MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
+
+	/* allow writes to transceiver module EEPROM on i2c bus */
+	{ MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 },
+	{ MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 },
+	{ MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 },
+
+	/* end */
+	{ 0, 0, 0, 0 }
+};
+/*
+ * Reset the PHY and put it into a canonical operating state.
+ */
+static int ael2020_reset(struct cphy *phy, int wait)
+{
+	int err;
+	unsigned int lasi_ctrl;
+
+	/* grab current interrupt state */
+	err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+			   &lasi_ctrl);
+	if (err)
+		return err;
+
+	err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
+	if (err)
+		return err;
+	msleep(100);
+
+	/* basic initialization for all module types */
+	phy->priv = edc_none;
+	err = set_phy_regs(phy, ael2020_reset_regs);
+	if (err)
+		return err;
+
+	/* determine module type and perform appropriate initialization */
+	err = ael2020_get_module_type(phy, 0);
+	if (err < 0)
+		return err;
+	phy->modtype = (u8)err;
+	if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+		err = ael2020_setup_twinax_edc(phy, err);
+	else
+		err = ael2020_setup_sr_edc(phy);
+	if (err)
+		return err;
+
+	/* reset wipes out interrupts, reenable them if they were on */
+	if (lasi_ctrl & 1)
+		err = ael2005_intr_enable(phy);
+	return err;
+}
+
+/*
+ * Handle a PHY interrupt.
+ */
+static int ael2020_intr_handler(struct cphy *phy)
+{
+	unsigned int stat;
+	int ret, edc_needed, cause = 0;
+
+	ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+	if (ret)
+		return ret;
+
+	if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
+		/* modules have max 300 ms init time after hot plug */
+		ret = ael2020_get_module_type(phy, 300);
+		if (ret < 0)
+			return ret;
+
+		phy->modtype = (u8)ret;
+		if (ret == phy_modtype_none)
+			edc_needed = phy->priv;       /* on unplug retain EDC */
+		else if (ret == phy_modtype_twinax ||
+			 ret == phy_modtype_twinax_long)
+			edc_needed = edc_twinax;
+		else
+			edc_needed = edc_sr;
+
+		if (edc_needed != phy->priv) {
+			ret = ael2020_reset(phy, 0);
+			return ret ? ret : cphy_cause_module_change;
+		}
+		cause = cphy_cause_module_change;
+	}
+
+	ret = t3_phy_lasi_intr_handler(phy);
+	if (ret < 0)
+		return ret;
+
+	ret |= cause;
+	return ret ? ret : cphy_cause_link_change;
+}
+
+static struct cphy_ops ael2020_ops = {
+	.reset           = ael2020_reset,
+	.intr_enable     = ael2020_intr_enable,
+	.intr_disable    = ael2020_intr_disable,
+	.intr_clear      = ael2020_intr_clear,
+	.intr_handler    = ael2020_intr_handler,
+	.get_link_status = get_link_status_r,
+	.power_down      = ael1002_power_down,
+	.mmds		 = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+			const struct mdio_ops *mdio_ops)
+{
+	int err;
+
+	cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
+		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+		  SUPPORTED_IRQ, "10GBASE-R");
+	msleep(125);
+
+	err = set_phy_regs(phy, ael2020_reset_regs);
+	if (err)
+		return err;
+	return 0;
+}
+
+/*
+ * Get link status for a 10GBASE-X device.
+ */
+static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
+			     int *duplex, int *fc)
+{
+	if (link_ok) {
+		unsigned int stat0, stat1, stat2;
+		int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+				       MDIO_PMA_RXDET, &stat0);
+
+		if (!err)
+			err = t3_mdio_read(phy, MDIO_MMD_PCS,
+					   MDIO_PCS_10GBX_STAT1, &stat1);
+		if (!err)
+			err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+					   MDIO_PHYXS_LNSTAT, &stat2);
+		if (err)
+			return err;
+		*link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	return 0;
+}
+
+static struct cphy_ops qt2045_ops = {
+	.reset = ael1006_reset,
+	.intr_enable = t3_phy_lasi_intr_enable,
+	.intr_disable = t3_phy_lasi_intr_disable,
+	.intr_clear = t3_phy_lasi_intr_clear,
+	.intr_handler = t3_phy_lasi_intr_handler,
+	.get_link_status = get_link_status_x,
+	.power_down = ael1002_power_down,
+	.mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
+		       int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	unsigned int stat;
+
+	cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
+		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+		  "10GBASE-CX4");
+
+	/*
+	 * Some cards where the PHY is supposed to be at address 0 actually
+	 * have it at 1.
+	 */
+	if (!phy_addr &&
+	    !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) &&
+	    stat == 0xffff)
+		phy->mdio.prtad = 1;
+	return 0;
+}
+
+static int xaui_direct_reset(struct cphy *phy, int wait)
+{
+	return 0;
+}
+
+static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
+				       int *speed, int *duplex, int *fc)
+{
+	if (link_ok) {
+		unsigned int status;
+		int prtad = phy->mdio.prtad;
+
+		status = t3_read_reg(phy->adapter,
+				     XGM_REG(A_XGM_SERDES_STAT0, prtad)) |
+		    t3_read_reg(phy->adapter,
+				    XGM_REG(A_XGM_SERDES_STAT1, prtad)) |
+		    t3_read_reg(phy->adapter,
+				XGM_REG(A_XGM_SERDES_STAT2, prtad)) |
+		    t3_read_reg(phy->adapter,
+				XGM_REG(A_XGM_SERDES_STAT3, prtad));
+		*link_ok = !(status & F_LOWSIG0);
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	return 0;
+}
+
+static int xaui_direct_power_down(struct cphy *phy, int enable)
+{
+	return 0;
+}
+
+static struct cphy_ops xaui_direct_ops = {
+	.reset = xaui_direct_reset,
+	.intr_enable = ael1002_intr_noop,
+	.intr_disable = ael1002_intr_noop,
+	.intr_clear = ael1002_intr_noop,
+	.intr_handler = ael1002_intr_noop,
+	.get_link_status = xaui_direct_get_link_status,
+	.power_down = xaui_direct_power_down,
+};
+
+int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+			    int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
+		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+		  "10GBASE-CX4");
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
new file mode 100644
index 0000000..341b7ef
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "common.h"
+#include "regs.h"
+
+enum {
+	/* MDIO_DEV_PMA_PMD registers */
+	AQ_LINK_STAT	= 0xe800,
+	AQ_IMASK_PMA	= 0xf000,
+
+	/* MDIO_DEV_XGXS registers */
+	AQ_XAUI_RX_CFG	= 0xc400,
+	AQ_XAUI_TX_CFG	= 0xe400,
+
+	/* MDIO_DEV_ANEG registers */
+	AQ_1G_CTRL	= 0xc400,
+	AQ_ANEG_STAT	= 0xc800,
+
+	/* MDIO_DEV_VEND1 registers */
+	AQ_FW_VERSION	= 0x0020,
+	AQ_IFLAG_GLOBAL	= 0xfc00,
+	AQ_IMASK_GLOBAL	= 0xff00,
+};
+
+enum {
+	IMASK_PMA	= 1 << 2,
+	IMASK_GLOBAL	= 1 << 15,
+	ADV_1G_FULL	= 1 << 15,
+	ADV_1G_HALF	= 1 << 14,
+	ADV_10G_FULL	= 1 << 12,
+	AQ_RESET	= (1 << 14) | (1 << 15),
+	AQ_LOWPOWER	= 1 << 12,
+};
+
+static int aq100x_reset(struct cphy *phy, int wait)
+{
+	/*
+	 * Ignore the caller specified wait time; always wait for the reset to
+	 * complete. Can take up to 3s.
+	 */
+	int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
+
+	if (err)
+		CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
+			phy->mdio.prtad, err);
+
+	return err;
+}
+
+static int aq100x_intr_enable(struct cphy *phy)
+{
+	int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
+	return err;
+}
+
+static int aq100x_intr_disable(struct cphy *phy)
+{
+	return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
+}
+
+static int aq100x_intr_clear(struct cphy *phy)
+{
+	unsigned int v;
+
+	t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
+	t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+	return 0;
+}
+
+static int aq100x_intr_handler(struct cphy *phy)
+{
+	int err;
+	unsigned int cause, v;
+
+	err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
+	if (err)
+		return err;
+
+	/* Read (and reset) the latching version of the status */
+	t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+	return cphy_cause_link_change;
+}
+
+static int aq100x_power_down(struct cphy *phy, int off)
+{
+	return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+			     MDIO_MMD_PMAPMD, MDIO_CTRL1,
+			     MDIO_CTRL1_LPOWER, off);
+}
+
+static int aq100x_autoneg_enable(struct cphy *phy)
+{
+	int err;
+
+	err = aq100x_power_down(phy, 0);
+	if (!err)
+		err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+				    MDIO_MMD_AN, MDIO_CTRL1,
+				    BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+	return err;
+}
+
+static int aq100x_autoneg_restart(struct cphy *phy)
+{
+	int err;
+
+	err = aq100x_power_down(phy, 0);
+	if (!err)
+		err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+				    MDIO_MMD_AN, MDIO_CTRL1,
+				    BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+	return err;
+}
+
+static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+	unsigned int adv;
+	int err;
+
+	/* 10G advertisement */
+	adv = 0;
+	if (advertise_map & ADVERTISED_10000baseT_Full)
+		adv |= ADV_10G_FULL;
+	err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+				  ADV_10G_FULL, adv);
+	if (err)
+		return err;
+
+	/* 1G advertisement */
+	adv = 0;
+	if (advertise_map & ADVERTISED_1000baseT_Full)
+		adv |= ADV_1G_FULL;
+	if (advertise_map & ADVERTISED_1000baseT_Half)
+		adv |= ADV_1G_HALF;
+	err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
+				  ADV_1G_FULL | ADV_1G_HALF, adv);
+	if (err)
+		return err;
+
+	/* 100M, pause advertisement */
+	adv = 0;
+	if (advertise_map & ADVERTISED_100baseT_Half)
+		adv |= ADVERTISE_100HALF;
+	if (advertise_map & ADVERTISED_100baseT_Full)
+		adv |= ADVERTISE_100FULL;
+	if (advertise_map & ADVERTISED_Pause)
+		adv |= ADVERTISE_PAUSE_CAP;
+	if (advertise_map & ADVERTISED_Asym_Pause)
+		adv |= ADVERTISE_PAUSE_ASYM;
+	err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+				  0xfe0, adv);
+
+	return err;
+}
+
+static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
+{
+	return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+			     MDIO_MMD_PMAPMD, MDIO_CTRL1,
+			     BMCR_LOOPBACK, enable);
+}
+
+static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	/* no can do */
+	return -1;
+}
+
+static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
+				  int *speed, int *duplex, int *fc)
+{
+	int err;
+	unsigned int v;
+
+	if (link_ok) {
+		err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
+		if (err)
+			return err;
+
+		*link_ok = v & 1;
+		if (!*link_ok)
+			return 0;
+	}
+
+	err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
+	if (err)
+		return err;
+
+	if (speed) {
+		switch (v & 0x6) {
+		case 0x6:
+			*speed = SPEED_10000;
+			break;
+		case 0x4:
+			*speed = SPEED_1000;
+			break;
+		case 0x2:
+			*speed = SPEED_100;
+			break;
+		case 0x0:
+			*speed = SPEED_10;
+			break;
+		}
+	}
+
+	if (duplex)
+		*duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
+
+	return 0;
+}
+
+static struct cphy_ops aq100x_ops = {
+	.reset             = aq100x_reset,
+	.intr_enable       = aq100x_intr_enable,
+	.intr_disable      = aq100x_intr_disable,
+	.intr_clear        = aq100x_intr_clear,
+	.intr_handler      = aq100x_intr_handler,
+	.autoneg_enable    = aq100x_autoneg_enable,
+	.autoneg_restart   = aq100x_autoneg_restart,
+	.advertise         = aq100x_advertise,
+	.set_loopback      = aq100x_set_loopback,
+	.set_speed_duplex  = aq100x_set_speed_duplex,
+	.get_link_status   = aq100x_get_link_status,
+	.power_down        = aq100x_power_down,
+	.mmds 		   = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+		       const struct mdio_ops *mdio_ops)
+{
+	unsigned int v, v2, gpio, wait;
+	int err;
+
+	cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
+		  SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
+		  SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI,
+		  "1000/10GBASE-T");
+
+	/*
+	 * The PHY has been out of reset ever since the system powered up.  So
+	 * we do a hard reset over here.
+	 */
+	gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
+	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
+	msleep(1);
+	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
+
+	/*
+	 * Give it enough time to load the firmware and get ready for mdio.
+	 */
+	msleep(1000);
+	wait = 500; /* in 10ms increments */
+	do {
+		err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+		if (err || v == 0xffff) {
+
+			/* Allow prep_adapter to succeed when ffff is read */
+
+			CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
+				phy_addr, err, v);
+			goto done;
+		}
+
+		v &= AQ_RESET;
+		if (v)
+			msleep(10);
+	} while (v && --wait);
+	if (v) {
+		CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
+			phy_addr, v);
+
+		goto done; /* let prep_adapter succeed */
+	}
+
+	/* Datasheet says 3s max but this has been observed */
+	wait = (500 - wait) * 10 + 1000;
+	if (wait > 3000)
+		CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
+
+	/* Firmware version check. */
+	t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
+	if (v != 101)
+		CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
+			phy_addr, v);
+
+	/*
+	 * The PHY should start in really-low-power mode.  Prepare it for normal
+	 * operations.
+	 */
+	err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+	if (err)
+		return err;
+	if (v & AQ_LOWPOWER) {
+		err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
+					  AQ_LOWPOWER, 0);
+		if (err)
+			return err;
+		msleep(10);
+	} else
+		CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
+			phy_addr);
+
+	/*
+	 * Verify XAUI settings, but let prep succeed no matter what.
+	 */
+	v = v2 = 0;
+	t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
+	t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
+	if (v != 0x1b || v2 != 0x1b)
+		CH_WARN(adapter,
+			"PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
+			phy_addr, v, v2);
+
+done:
+	return err;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
new file mode 100644
index 0000000..4424809
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -0,0 +1,773 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include "version.h"
+
+#define CH_ERR(adap, fmt, ...)   dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...)  dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+	if ((adapter)->msg_enable & NETIF_MSG_##category) \
+		dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
+			   ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+	CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(adapter, category, fmt, ...)
+#endif
+
+/* Additional NETIF_MSG_* categories */
+#define NETIF_MSG_MMIO 0x8000000
+
+enum {
+	MAX_NPORTS = 2,		/* max # of ports */
+	MAX_FRAME_SIZE = 10240,	/* max MAC frame size, including header + FCS */
+	EEPROMSIZE = 8192,	/* Serial EEPROM size */
+	SERNUM_LEN     = 16,    /* Serial # length */
+	RSS_TABLE_SIZE = 64,	/* size of RSS lookup and mapping tables */
+	TCB_SIZE = 128,		/* TCB size */
+	NMTUS = 16,		/* size of MTU table */
+	NCCTRL_WIN = 32,	/* # of congestion control windows */
+	PROTO_SRAM_LINES = 128, /* size of TP sram */
+};
+
+#define MAX_RX_COALESCING_LEN 12288U
+
+enum {
+	PAUSE_RX = 1 << 0,
+	PAUSE_TX = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+enum {
+	SUPPORTED_IRQ      = 1 << 24
+};
+
+enum {				/* adapter interrupt-maintained statistics */
+	STAT_ULP_CH0_PBL_OOB,
+	STAT_ULP_CH1_PBL_OOB,
+	STAT_PCI_CORR_ECC,
+
+	IRQ_NUM_STATS		/* keep last */
+};
+
+#define TP_VERSION_MAJOR	1
+#define TP_VERSION_MINOR	1
+#define TP_VERSION_MICRO	0
+
+#define S_TP_VERSION_MAJOR		16
+#define M_TP_VERSION_MAJOR		0xFF
+#define V_TP_VERSION_MAJOR(x)		((x) << S_TP_VERSION_MAJOR)
+#define G_TP_VERSION_MAJOR(x)		\
+	    (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
+
+#define S_TP_VERSION_MINOR		8
+#define M_TP_VERSION_MINOR		0xFF
+#define V_TP_VERSION_MINOR(x)		((x) << S_TP_VERSION_MINOR)
+#define G_TP_VERSION_MINOR(x)		\
+	    (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
+
+#define S_TP_VERSION_MICRO		0
+#define M_TP_VERSION_MICRO		0xFF
+#define V_TP_VERSION_MICRO(x)		((x) << S_TP_VERSION_MICRO)
+#define G_TP_VERSION_MICRO(x)		\
+	    (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
+
+enum {
+	SGE_QSETS = 8,		/* # of SGE Tx/Rx/RspQ sets */
+	SGE_RXQ_PER_SET = 2,	/* # of Rx queues per set */
+	SGE_TXQ_PER_SET = 3	/* # of Tx queues per set */
+};
+
+enum sge_context_type {		/* SGE egress context types */
+	SGE_CNTXT_RDMA = 0,
+	SGE_CNTXT_ETH = 2,
+	SGE_CNTXT_OFLD = 4,
+	SGE_CNTXT_CTRL = 5
+};
+
+enum {
+	AN_PKT_SIZE = 32,	/* async notification packet size */
+	IMMED_PKT_SIZE = 48	/* packet size for immediate data */
+};
+
+struct sg_ent {			/* SGE scatter/gather entry */
+	__be32 len[2];
+	__be64 addr[2];
+};
+
+#ifndef SGE_NUM_GENBITS
+/* Must be 1 or 2 */
+# define SGE_NUM_GENBITS 2
+#endif
+
+#define TX_DESC_FLITS 16U
+#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
+
+struct cphy;
+struct adapter;
+
+struct mdio_ops {
+	int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+		    u16 reg_addr);
+	int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+		     u16 reg_addr, u16 val);
+	unsigned mode_support;
+};
+
+struct adapter_info {
+	unsigned char nports0;        /* # of ports on channel 0 */
+	unsigned char nports1;        /* # of ports on channel 1 */
+	unsigned char phy_base_addr;	/* MDIO PHY base address */
+	unsigned int gpio_out;	/* GPIO output settings */
+	unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
+	unsigned long caps;	/* adapter capabilities */
+	const struct mdio_ops *mdio_ops;	/* MDIO operations */
+	const char *desc;	/* product description */
+};
+
+struct mc5_stats {
+	unsigned long parity_err;
+	unsigned long active_rgn_full;
+	unsigned long nfa_srch_err;
+	unsigned long unknown_cmd;
+	unsigned long reqq_parity_err;
+	unsigned long dispq_parity_err;
+	unsigned long del_act_empty;
+};
+
+struct mc7_stats {
+	unsigned long corr_err;
+	unsigned long uncorr_err;
+	unsigned long parity_err;
+	unsigned long addr_err;
+};
+
+struct mac_stats {
+	u64 tx_octets;		/* total # of octets in good frames */
+	u64 tx_octets_bad;	/* total # of octets in error frames */
+	u64 tx_frames;		/* all good frames */
+	u64 tx_mcast_frames;	/* good multicast frames */
+	u64 tx_bcast_frames;	/* good broadcast frames */
+	u64 tx_pause;		/* # of transmitted pause frames */
+	u64 tx_deferred;	/* frames with deferred transmissions */
+	u64 tx_late_collisions;	/* # of late collisions */
+	u64 tx_total_collisions;	/* # of total collisions */
+	u64 tx_excess_collisions;	/* frame errors from excessive collissions */
+	u64 tx_underrun;	/* # of Tx FIFO underruns */
+	u64 tx_len_errs;	/* # of Tx length errors */
+	u64 tx_mac_internal_errs;	/* # of internal MAC errors on Tx */
+	u64 tx_excess_deferral;	/* # of frames with excessive deferral */
+	u64 tx_fcs_errs;	/* # of frames with bad FCS */
+
+	u64 tx_frames_64;	/* # of Tx frames in a particular range */
+	u64 tx_frames_65_127;
+	u64 tx_frames_128_255;
+	u64 tx_frames_256_511;
+	u64 tx_frames_512_1023;
+	u64 tx_frames_1024_1518;
+	u64 tx_frames_1519_max;
+
+	u64 rx_octets;		/* total # of octets in good frames */
+	u64 rx_octets_bad;	/* total # of octets in error frames */
+	u64 rx_frames;		/* all good frames */
+	u64 rx_mcast_frames;	/* good multicast frames */
+	u64 rx_bcast_frames;	/* good broadcast frames */
+	u64 rx_pause;		/* # of received pause frames */
+	u64 rx_fcs_errs;	/* # of received frames with bad FCS */
+	u64 rx_align_errs;	/* alignment errors */
+	u64 rx_symbol_errs;	/* symbol errors */
+	u64 rx_data_errs;	/* data errors */
+	u64 rx_sequence_errs;	/* sequence errors */
+	u64 rx_runt;		/* # of runt frames */
+	u64 rx_jabber;		/* # of jabber frames */
+	u64 rx_short;		/* # of short frames */
+	u64 rx_too_long;	/* # of oversized frames */
+	u64 rx_mac_internal_errs;	/* # of internal MAC errors on Rx */
+
+	u64 rx_frames_64;	/* # of Rx frames in a particular range */
+	u64 rx_frames_65_127;
+	u64 rx_frames_128_255;
+	u64 rx_frames_256_511;
+	u64 rx_frames_512_1023;
+	u64 rx_frames_1024_1518;
+	u64 rx_frames_1519_max;
+
+	u64 rx_cong_drops;	/* # of Rx drops due to SGE congestion */
+
+	unsigned long tx_fifo_parity_err;
+	unsigned long rx_fifo_parity_err;
+	unsigned long tx_fifo_urun;
+	unsigned long rx_fifo_ovfl;
+	unsigned long serdes_signal_loss;
+	unsigned long xaui_pcs_ctc_err;
+	unsigned long xaui_pcs_align_change;
+
+	unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
+	unsigned long num_resets;  /* # times reset due to stuck TX */
+
+	unsigned long link_faults;  /* # detected link faults */
+};
+
+struct tp_mib_stats {
+	u32 ipInReceive_hi;
+	u32 ipInReceive_lo;
+	u32 ipInHdrErrors_hi;
+	u32 ipInHdrErrors_lo;
+	u32 ipInAddrErrors_hi;
+	u32 ipInAddrErrors_lo;
+	u32 ipInUnknownProtos_hi;
+	u32 ipInUnknownProtos_lo;
+	u32 ipInDiscards_hi;
+	u32 ipInDiscards_lo;
+	u32 ipInDelivers_hi;
+	u32 ipInDelivers_lo;
+	u32 ipOutRequests_hi;
+	u32 ipOutRequests_lo;
+	u32 ipOutDiscards_hi;
+	u32 ipOutDiscards_lo;
+	u32 ipOutNoRoutes_hi;
+	u32 ipOutNoRoutes_lo;
+	u32 ipReasmTimeout;
+	u32 ipReasmReqds;
+	u32 ipReasmOKs;
+	u32 ipReasmFails;
+
+	u32 reserved[8];
+
+	u32 tcpActiveOpens;
+	u32 tcpPassiveOpens;
+	u32 tcpAttemptFails;
+	u32 tcpEstabResets;
+	u32 tcpOutRsts;
+	u32 tcpCurrEstab;
+	u32 tcpInSegs_hi;
+	u32 tcpInSegs_lo;
+	u32 tcpOutSegs_hi;
+	u32 tcpOutSegs_lo;
+	u32 tcpRetransSeg_hi;
+	u32 tcpRetransSeg_lo;
+	u32 tcpInErrs_hi;
+	u32 tcpInErrs_lo;
+	u32 tcpRtoMin;
+	u32 tcpRtoMax;
+};
+
+struct tp_params {
+	unsigned int nchan;	/* # of channels */
+	unsigned int pmrx_size;	/* total PMRX capacity */
+	unsigned int pmtx_size;	/* total PMTX capacity */
+	unsigned int cm_size;	/* total CM capacity */
+	unsigned int chan_rx_size;	/* per channel Rx size */
+	unsigned int chan_tx_size;	/* per channel Tx size */
+	unsigned int rx_pg_size;	/* Rx page size */
+	unsigned int tx_pg_size;	/* Tx page size */
+	unsigned int rx_num_pgs;	/* # of Rx pages */
+	unsigned int tx_num_pgs;	/* # of Tx pages */
+	unsigned int ntimer_qs;	/* # of timer queues */
+};
+
+struct qset_params {		/* SGE queue set parameters */
+	unsigned int polling;	/* polling/interrupt service for rspq */
+	unsigned int coalesce_usecs;	/* irq coalescing timer */
+	unsigned int rspq_size;	/* # of entries in response queue */
+	unsigned int fl_size;	/* # of entries in regular free list */
+	unsigned int jumbo_size;	/* # of entries in jumbo free list */
+	unsigned int txq_size[SGE_TXQ_PER_SET];	/* Tx queue sizes */
+	unsigned int cong_thres;	/* FL congestion threshold */
+	unsigned int vector;		/* Interrupt (line or vector) number */
+};
+
+struct sge_params {
+	unsigned int max_pkt_size;	/* max offload pkt size */
+	struct qset_params qset[SGE_QSETS];
+};
+
+struct mc5_params {
+	unsigned int mode;	/* selects MC5 width */
+	unsigned int nservers;	/* size of server region */
+	unsigned int nfilters;	/* size of filter region */
+	unsigned int nroutes;	/* size of routing region */
+};
+
+/* Default MC5 region sizes */
+enum {
+	DEFAULT_NSERVERS = 512,
+	DEFAULT_NFILTERS = 128
+};
+
+/* MC5 modes, these must be non-0 */
+enum {
+	MC5_MODE_144_BIT = 1,
+	MC5_MODE_72_BIT = 2
+};
+
+/* MC5 min active region size */
+enum { MC5_MIN_TIDS = 16 };
+
+struct vpd_params {
+	unsigned int cclk;
+	unsigned int mclk;
+	unsigned int uclk;
+	unsigned int mdc;
+	unsigned int mem_timing;
+	u8 sn[SERNUM_LEN + 1];
+	u8 eth_base[6];
+	u8 port_type[MAX_NPORTS];
+	unsigned short xauicfg[2];
+};
+
+struct pci_params {
+	unsigned int vpd_cap_addr;
+	unsigned short speed;
+	unsigned char width;
+	unsigned char variant;
+};
+
+enum {
+	PCI_VARIANT_PCI,
+	PCI_VARIANT_PCIX_MODE1_PARITY,
+	PCI_VARIANT_PCIX_MODE1_ECC,
+	PCI_VARIANT_PCIX_266_MODE2,
+	PCI_VARIANT_PCIE
+};
+
+struct adapter_params {
+	struct sge_params sge;
+	struct mc5_params mc5;
+	struct tp_params tp;
+	struct vpd_params vpd;
+	struct pci_params pci;
+
+	const struct adapter_info *info;
+
+	unsigned short mtus[NMTUS];
+	unsigned short a_wnd[NCCTRL_WIN];
+	unsigned short b_wnd[NCCTRL_WIN];
+
+	unsigned int nports;	/* # of ethernet ports */
+	unsigned int chan_map;  /* bitmap of in-use Tx channels */
+	unsigned int stats_update_period;	/* MAC stats accumulation period */
+	unsigned int linkpoll_period;	/* link poll period in 0.1s */
+	unsigned int rev;	/* chip revision */
+	unsigned int offload;
+};
+
+enum {					    /* chip revisions */
+	T3_REV_A  = 0,
+	T3_REV_B  = 2,
+	T3_REV_B2 = 3,
+	T3_REV_C  = 4,
+};
+
+struct trace_params {
+	u32 sip;
+	u32 sip_mask;
+	u32 dip;
+	u32 dip_mask;
+	u16 sport;
+	u16 sport_mask;
+	u16 dport;
+	u16 dport_mask;
+	u32 vlan:12;
+	u32 vlan_mask:12;
+	u32 intf:4;
+	u32 intf_mask:4;
+	u8 proto;
+	u8 proto_mask;
+};
+
+struct link_config {
+	unsigned int supported;	/* link capabilities */
+	unsigned int advertising;	/* advertised capabilities */
+	unsigned short requested_speed;	/* speed user has requested */
+	unsigned short speed;	/* actual link speed */
+	unsigned char requested_duplex;	/* duplex user has requested */
+	unsigned char duplex;	/* actual link duplex */
+	unsigned char requested_fc;	/* flow control user has requested */
+	unsigned char fc;	/* actual link flow control */
+	unsigned char autoneg;	/* autonegotiating? */
+	unsigned int link_ok;	/* link up? */
+};
+
+#define SPEED_INVALID   0xffff
+#define DUPLEX_INVALID  0xff
+
+struct mc5 {
+	struct adapter *adapter;
+	unsigned int tcam_size;
+	unsigned char part_type;
+	unsigned char parity_enabled;
+	unsigned char mode;
+	struct mc5_stats stats;
+};
+
+static inline unsigned int t3_mc5_size(const struct mc5 *p)
+{
+	return p->tcam_size;
+}
+
+struct mc7 {
+	struct adapter *adapter;	/* backpointer to adapter */
+	unsigned int size;	/* memory size in bytes */
+	unsigned int width;	/* MC7 interface width */
+	unsigned int offset;	/* register address offset for MC7 instance */
+	const char *name;	/* name of MC7 instance */
+	struct mc7_stats stats;	/* MC7 statistics */
+};
+
+static inline unsigned int t3_mc7_size(const struct mc7 *p)
+{
+	return p->size;
+}
+
+struct cmac {
+	struct adapter *adapter;
+	unsigned int offset;
+	unsigned int nucast;	/* # of address filters for unicast MACs */
+	unsigned int tx_tcnt;
+	unsigned int tx_xcnt;
+	u64 tx_mcnt;
+	unsigned int rx_xcnt;
+	unsigned int rx_ocnt;
+	u64 rx_mcnt;
+	unsigned int toggle_cnt;
+	unsigned int txen;
+	u64 rx_pause;
+	struct mac_stats stats;
+};
+
+enum {
+	MAC_DIRECTION_RX = 1,
+	MAC_DIRECTION_TX = 2,
+	MAC_RXFIFO_SIZE = 32768
+};
+
+/* PHY loopback direction */
+enum {
+	PHY_LOOPBACK_TX = 1,
+	PHY_LOOPBACK_RX = 2
+};
+
+/* PHY interrupt types */
+enum {
+	cphy_cause_link_change = 1,
+	cphy_cause_fifo_error = 2,
+	cphy_cause_module_change = 4,
+};
+
+/* PHY module types */
+enum {
+	phy_modtype_none,
+	phy_modtype_sr,
+	phy_modtype_lr,
+	phy_modtype_lrm,
+	phy_modtype_twinax,
+	phy_modtype_twinax_long,
+	phy_modtype_unknown
+};
+
+/* PHY operations */
+struct cphy_ops {
+	int (*reset)(struct cphy *phy, int wait);
+
+	int (*intr_enable)(struct cphy *phy);
+	int (*intr_disable)(struct cphy *phy);
+	int (*intr_clear)(struct cphy *phy);
+	int (*intr_handler)(struct cphy *phy);
+
+	int (*autoneg_enable)(struct cphy *phy);
+	int (*autoneg_restart)(struct cphy *phy);
+
+	int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+	int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
+	int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+	int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+			       int *duplex, int *fc);
+	int (*power_down)(struct cphy *phy, int enable);
+
+	u32 mmds;
+};
+enum {
+	EDC_OPT_AEL2005 = 0,
+	EDC_OPT_AEL2005_SIZE = 1084,
+	EDC_TWX_AEL2005 = 1,
+	EDC_TWX_AEL2005_SIZE = 1464,
+	EDC_TWX_AEL2020 = 2,
+	EDC_TWX_AEL2020_SIZE = 1628,
+	EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */
+};
+
+/* A PHY instance */
+struct cphy {
+	u8 modtype;			/* PHY module type */
+	short priv;			/* scratch pad */
+	unsigned int caps;		/* PHY capabilities */
+	struct adapter *adapter;	/* associated adapter */
+	const char *desc;		/* PHY description */
+	unsigned long fifo_errors;	/* FIFO over/under-flows */
+	const struct cphy_ops *ops;	/* PHY operations */
+	struct mdio_if_info mdio;
+	u16 phy_cache[EDC_MAX_SIZE];	/* EDC cache */
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
+			       unsigned int *valp)
+{
+	int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
+	*valp = (rc >= 0) ? rc : -1;
+	return (rc >= 0) ? 0 : rc;
+}
+
+static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
+				unsigned int val)
+{
+	return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
+				    reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
+			     int phy_addr, struct cphy_ops *phy_ops,
+			     const struct mdio_ops *mdio_ops,
+			      unsigned int caps, const char *desc)
+{
+	phy->caps = caps;
+	phy->adapter = adapter;
+	phy->desc = desc;
+	phy->ops = phy_ops;
+	if (mdio_ops) {
+		phy->mdio.prtad = phy_addr;
+		phy->mdio.mmds = phy_ops->mmds;
+		phy->mdio.mode_support = mdio_ops->mode_support;
+		phy->mdio.mdio_read = mdio_ops->read;
+		phy->mdio.mdio_write = mdio_ops->write;
+	}
+}
+
+/* Accumulate MAC statistics every 180 seconds.  For 1G we multiply by 10. */
+#define MAC_STATS_ACCUM_SECS 180
+
+#define XGM_REG(reg_addr, idx) \
+	((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
+
+struct addr_val_pair {
+	unsigned int reg_addr;
+	unsigned int val;
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define adapter_info(adap) ((adap)->params.info)
+
+static inline int uses_xaui(const struct adapter *adap)
+{
+	return adapter_info(adap)->caps & SUPPORTED_AUI;
+}
+
+static inline int is_10G(const struct adapter *adap)
+{
+	return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+	return adap->params.offload;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+	return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int is_pcie(const struct adapter *adap)
+{
+	return adap->params.pci.variant == PCI_VARIANT_PCIE;
+}
+
+void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+		      u32 val);
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+		   int n, unsigned int offset);
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			int polarity, int attempts, int delay, u32 *valp);
+static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+				  int polarity, int attempts, int delay)
+{
+	return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+				   delay, NULL);
+}
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+			unsigned int set);
+int t3_phy_reset(struct cphy *phy, int mmd, int wait);
+int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
+int t3_phy_lasi_intr_enable(struct cphy *phy);
+int t3_phy_lasi_intr_disable(struct cphy *phy);
+int t3_phy_lasi_intr_clear(struct cphy *phy);
+int t3_phy_lasi_intr_handler(struct cphy *phy);
+
+void t3_intr_enable(struct adapter *adapter);
+void t3_intr_disable(struct adapter *adapter);
+void t3_intr_clear(struct adapter *adapter);
+void t3_xgm_intr_enable(struct adapter *adapter, int idx);
+void t3_xgm_intr_disable(struct adapter *adapter, int idx);
+void t3_port_intr_enable(struct adapter *adapter, int idx);
+void t3_port_intr_disable(struct adapter *adapter, int idx);
+int t3_slow_intr_handler(struct adapter *adapter);
+int t3_phy_intr_handler(struct adapter *adapter);
+
+void t3_link_changed(struct adapter *adapter, int port_id);
+void t3_link_fault(struct adapter *adapter, int port_id);
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
+int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
+int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
+int t3_seeprom_wp(struct adapter *adapter, int enable);
+int t3_get_tp_version(struct adapter *adapter, u32 *vers);
+int t3_check_tpsram_version(struct adapter *adapter);
+int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
+		    unsigned int size);
+int t3_set_proto_sram(struct adapter *adap, const u8 *data);
+int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
+int t3_get_fw_version(struct adapter *adapter, u32 *vers);
+int t3_check_fw_version(struct adapter *adapter);
+int t3_init_hw(struct adapter *adapter, u32 fw_params);
+int t3_reset_adapter(struct adapter *adapter);
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+		    int reset);
+int t3_replay_prep_adapter(struct adapter *adapter);
+void t3_led_ready(struct adapter *adapter);
+void t3_fatal_err(struct adapter *adapter);
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+		   const u8 * cpus, const u16 *rspq);
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+			unsigned int n, unsigned int *valp);
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+		   u64 *buf);
+
+int t3_mac_reset(struct cmac *mac);
+void t3b_pcs_reset(struct cmac *mac);
+void t3_mac_disable_exact_filters(struct cmac *mac);
+void t3_mac_enable_exact_filters(struct cmac *mac);
+int t3_mac_enable(struct cmac *mac, int which);
+int t3_mac_disable(struct cmac *mac, int which);
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_num_ucast(struct cmac *mac, int n);
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
+int t3b2_mac_watchdog_task(struct cmac *mac);
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+		unsigned int nroutes);
+void t3_mc5_intr_handler(struct mc5 *mc5);
+
+void t3_tp_set_offload_mode(struct adapter *adap, int enable);
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+		  unsigned short alpha[NCCTRL_WIN],
+		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
+void t3_config_trace_filter(struct adapter *adapter,
+			    const struct trace_params *tp, int filter_index,
+			    int invert, int enable);
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
+
+void t3_sge_prep(struct adapter *adap, struct sge_params *p);
+void t3_sge_init(struct adapter *adap, struct sge_params *p);
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+		       enum sge_context_type type, int respq, u64 base_addr,
+		       unsigned int size, unsigned int token, int gen,
+		       unsigned int cidx);
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+			int gts_enable, u64 base_addr, unsigned int size,
+			unsigned int esize, unsigned int cong_thres, int gen,
+			unsigned int cidx);
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+			 int irq_vec_idx, u64 base_addr, unsigned int size,
+			 unsigned int fl_thres, int gen, unsigned int cidx);
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+			unsigned int size, int rspq, int ovfl_mode,
+			unsigned int credits, unsigned int credit_thres);
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+		      unsigned int credits);
+
+int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+		       const struct mdio_ops *mdio_ops);
+int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+			    int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
+			    int phy_addr, const struct mdio_ops *mdio_ops);
+#endif				/* __CHELSIO_COMMON_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 0000000..369fe71
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
+#define _CXGB3_OFFLOAD_CTL_DEFS_H
+
+enum {
+	GET_MAX_OUTSTANDING_WR 	= 0,
+	GET_TX_MAX_CHUNK	= 1,
+	GET_TID_RANGE		= 2,
+	GET_STID_RANGE		= 3,
+	GET_RTBL_RANGE		= 4,
+	GET_L2T_CAPACITY	= 5,
+	GET_MTUS		= 6,
+	GET_WR_LEN		= 7,
+	GET_IFF_FROM_MAC	= 8,
+	GET_DDP_PARAMS		= 9,
+	GET_PORTS		= 10,
+
+	ULP_ISCSI_GET_PARAMS	= 11,
+	ULP_ISCSI_SET_PARAMS	= 12,
+
+	RDMA_GET_PARAMS		= 13,
+	RDMA_CQ_OP		= 14,
+	RDMA_CQ_SETUP		= 15,
+	RDMA_CQ_DISABLE		= 16,
+	RDMA_CTRL_QP_SETUP	= 17,
+	RDMA_GET_MEM		= 18,
+	RDMA_GET_MIB		= 19,
+
+	GET_RX_PAGE_INFO	= 50,
+	GET_ISCSI_IPV4ADDR	= 51,
+
+	GET_EMBEDDED_INFO	= 70,
+};
+
+/*
+ * Structure used to describe a TID range.  Valid TIDs are [base, base+num).
+ */
+struct tid_range {
+	unsigned int base;	/* first TID */
+	unsigned int num;	/* number of TIDs in range */
+};
+
+/*
+ * Structure used to request the size and contents of the MTU table.
+ */
+struct mtutab {
+	unsigned int size;	/* # of entries in the MTU table */
+	const unsigned short *mtus;	/* the MTU table values */
+};
+
+struct net_device;
+
+/*
+ * Structure used to request the adapter net_device owning a given MAC address.
+ */
+struct iff_mac {
+	struct net_device *dev;	/* the net_device */
+	const unsigned char *mac_addr;	/* MAC address to lookup */
+	u16 vlan_tag;
+};
+
+/* Structure used to request a port's iSCSI IPv4 address */
+struct iscsi_ipv4addr {
+	struct net_device *dev;	/* the net_device */
+	__be32 ipv4addr;	/* the return iSCSI IPv4 address */
+};
+
+struct pci_dev;
+
+/*
+ * Structure used to request the TCP DDP parameters.
+ */
+struct ddp_params {
+	unsigned int llimit;	/* TDDP region start address */
+	unsigned int ulimit;	/* TDDP region end address */
+	unsigned int tag_mask;	/* TDDP tag mask */
+	struct pci_dev *pdev;
+};
+
+struct adap_ports {
+	unsigned int nports;	/* number of ports on this adapter */
+	struct net_device *lldevs[2];
+};
+
+/*
+ * Structure used to return information to the iscsi layer.
+ */
+struct ulp_iscsi_info {
+	unsigned int offset;
+	unsigned int llimit;
+	unsigned int ulimit;
+	unsigned int tagmask;
+	u8 pgsz_factor[4];
+	unsigned int max_rxsz;
+	unsigned int max_txsz;
+	struct pci_dev *pdev;
+};
+
+/*
+ * Structure used to return information to the RDMA layer.
+ */
+struct rdma_info {
+	unsigned int tpt_base;	/* TPT base address */
+	unsigned int tpt_top;	/* TPT last entry address */
+	unsigned int pbl_base;	/* PBL base address */
+	unsigned int pbl_top;	/* PBL last entry address */
+	unsigned int rqt_base;	/* RQT base address */
+	unsigned int rqt_top;	/* RQT last entry address */
+	unsigned int udbell_len;	/* user doorbell region length */
+	unsigned long udbell_physbase;	/* user doorbell physical start addr */
+	void __iomem *kdb_addr;	/* kernel doorbell register address */
+	struct pci_dev *pdev;	/* associated PCI device */
+};
+
+/*
+ * Structure used to request an operation on an RDMA completion queue.
+ */
+struct rdma_cq_op {
+	unsigned int id;
+	unsigned int op;
+	unsigned int credits;
+};
+
+/*
+ * Structure used to setup RDMA completion queues.
+ */
+struct rdma_cq_setup {
+	unsigned int id;
+	unsigned long long base_addr;
+	unsigned int size;
+	unsigned int credits;
+	unsigned int credit_thres;
+	unsigned int ovfl_mode;
+};
+
+/*
+ * Structure used to setup the RDMA control egress context.
+ */
+struct rdma_ctrlqp_setup {
+	unsigned long long base_addr;
+	unsigned int size;
+};
+
+/*
+ * Offload TX/RX page information.
+ */
+struct ofld_page_info {
+	unsigned int page_size;  /* Page size, should be a power of 2 */
+	unsigned int num;        /* Number of pages */
+};
+
+/*
+ * Structure used to get firmware and protocol engine versions.
+ */
+struct ch_embedded_info {
+	u32 fw_vers;
+	u32 tp_vers;
+};
+#endif				/* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
new file mode 100644
index 0000000..920d918
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_DEFS_H
+#define _CHELSIO_DEFS_H
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include "t3cdev.h"
+
+#include "cxgb3_offload.h"
+
+#define VALIDATE_TID 1
+
+void *cxgb_alloc_mem(unsigned long size);
+void cxgb_free_mem(void *addr);
+
+/*
+ * Map an ATID or STID to their entries in the corresponding TID tables.
+ */
+static inline union active_open_entry *atid2entry(const struct tid_info *t,
+						  unsigned int atid)
+{
+	return &t->atid_tab[atid - t->atid_base];
+}
+
+static inline union listen_entry *stid2entry(const struct tid_info *t,
+					     unsigned int stid)
+{
+	return &t->stid_tab[stid - t->stid_base];
+}
+
+/*
+ * Find the connection corresponding to a TID.
+ */
+static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
+					       unsigned int tid)
+{
+	struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+	    &(t->tid_tab[tid]) : NULL;
+
+	return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
+}
+
+/*
+ * Find the connection corresponding to a server TID.
+ */
+static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
+						unsigned int tid)
+{
+	union listen_entry *e;
+
+	if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
+		return NULL;
+
+	e = stid2entry(t, tid);
+	if ((void *)e->next >= (void *)t->tid_tab &&
+	    (void *)e->next < (void *)&t->atid_tab[t->natids])
+		return NULL;
+
+	return &e->t3c_tid;
+}
+
+/*
+ * Find the connection corresponding to an active-open TID.
+ */
+static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
+						unsigned int tid)
+{
+	union active_open_entry *e;
+
+	if (tid < t->atid_base || tid >= t->atid_base + t->natids)
+		return NULL;
+
+	e = atid2entry(t, tid);
+	if ((void *)e->next >= (void *)t->tid_tab &&
+	    (void *)e->next < (void *)&t->atid_tab[t->natids])
+		return NULL;
+
+	return &e->t3c_tid;
+}
+
+int attach_t3cdev(struct t3cdev *dev);
+void detach_t3cdev(struct t3cdev *dev);
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 0000000..b19e437
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHIOCTL_H__
+#define __CHIOCTL_H__
+
+/*
+ * Ioctl commands specific to this driver.
+ */
+enum {
+	CHELSIO_GETMTUTAB 		= 1029,
+	CHELSIO_SETMTUTAB 		= 1030,
+	CHELSIO_SET_PM 			= 1032,
+	CHELSIO_GET_PM			= 1033,
+	CHELSIO_GET_MEM			= 1038,
+	CHELSIO_LOAD_FW			= 1041,
+	CHELSIO_SET_TRACE_FILTER	= 1044,
+	CHELSIO_SET_QSET_PARAMS		= 1045,
+	CHELSIO_GET_QSET_PARAMS		= 1046,
+	CHELSIO_SET_QSET_NUM		= 1047,
+	CHELSIO_GET_QSET_NUM		= 1048,
+};
+
+struct ch_reg {
+	uint32_t cmd;
+	uint32_t addr;
+	uint32_t val;
+};
+
+struct ch_cntxt {
+	uint32_t cmd;
+	uint32_t cntxt_type;
+	uint32_t cntxt_id;
+	uint32_t data[4];
+};
+
+/* context types */
+enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
+
+struct ch_desc {
+	uint32_t cmd;
+	uint32_t queue_num;
+	uint32_t idx;
+	uint32_t size;
+	uint8_t data[128];
+};
+
+struct ch_mem_range {
+	uint32_t cmd;
+	uint32_t mem_id;
+	uint32_t addr;
+	uint32_t len;
+	uint32_t version;
+	uint8_t buf[0];
+};
+
+struct ch_qset_params {
+	uint32_t cmd;
+	uint32_t qset_idx;
+	int32_t txq_size[3];
+	int32_t rspq_size;
+	int32_t fl_size[2];
+	int32_t intr_lat;
+	int32_t polling;
+	int32_t lro;
+	int32_t cong_thres;
+	int32_t  vector;
+	int32_t  qnum;
+};
+
+struct ch_pktsched_params {
+	uint32_t cmd;
+	uint8_t sched;
+	uint8_t idx;
+	uint8_t min;
+	uint8_t max;
+	uint8_t binding;
+};
+
+#ifndef TCB_SIZE
+# define TCB_SIZE   128
+#endif
+
+/* TCB size in 32-bit words */
+#define TCB_WORDS (TCB_SIZE / 4)
+
+enum { MEM_CM, MEM_PMRX, MEM_PMTX };	/* ch_mem_range.mem_id values */
+
+struct ch_mtus {
+	uint32_t cmd;
+	uint32_t nmtus;
+	uint16_t mtus[NMTUS];
+};
+
+struct ch_pm {
+	uint32_t cmd;
+	uint32_t tx_pg_sz;
+	uint32_t tx_num_pg;
+	uint32_t rx_pg_sz;
+	uint32_t rx_num_pg;
+	uint32_t pm_total;
+};
+
+struct ch_tcam {
+	uint32_t cmd;
+	uint32_t tcam_size;
+	uint32_t nservers;
+	uint32_t nroutes;
+	uint32_t nfilters;
+};
+
+struct ch_tcb {
+	uint32_t cmd;
+	uint32_t tcb_index;
+	uint32_t tcb_data[TCB_WORDS];
+};
+
+struct ch_tcam_word {
+	uint32_t cmd;
+	uint32_t addr;
+	uint32_t buf[3];
+};
+
+struct ch_trace {
+	uint32_t cmd;
+	uint32_t sip;
+	uint32_t sip_mask;
+	uint32_t dip;
+	uint32_t dip_mask;
+	uint16_t sport;
+	uint16_t sport_mask;
+	uint16_t dport;
+	uint16_t dport_mask;
+	uint32_t vlan:12;
+	uint32_t vlan_mask:12;
+	uint32_t intf:4;
+	uint32_t intf_mask:4;
+	uint8_t proto;
+	uint8_t proto_mask;
+	uint8_t invert_match:1;
+	uint8_t config_tx:1;
+	uint8_t config_rx:1;
+	uint8_t trace_tx:1;
+	uint8_t trace_rx:1;
+};
+
+#define SIOCCHIOCTL SIOCDEVPRIVATE
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
new file mode 100644
index 0000000..8f7aa53
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -0,0 +1,3438 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/sockios.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
+#include <linux/log2.h>
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#include "common.h"
+#include "cxgb3_ioctl.h"
+#include "regs.h"
+#include "cxgb3_offload.h"
+#include "version.h"
+
+#include "cxgb3_ctl_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+enum {
+	MAX_TXQ_ENTRIES = 16384,
+	MAX_CTRL_TXQ_ENTRIES = 1024,
+	MAX_RSPQ_ENTRIES = 16384,
+	MAX_RX_BUFFERS = 16384,
+	MAX_RX_JUMBO_BUFFERS = 16384,
+	MIN_TXQ_ENTRIES = 4,
+	MIN_CTRL_TXQ_ENTRIES = 4,
+	MIN_RSPQ_ENTRIES = 32,
+	MIN_FL_ENTRIES = 32
+};
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+#define CH_DEVICE(devid, idx) \
+	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+
+static const struct pci_device_id cxgb3_pci_tbl[] = {
+	CH_DEVICE(0x20, 0),	/* PE9000 */
+	CH_DEVICE(0x21, 1),	/* T302E */
+	CH_DEVICE(0x22, 2),	/* T310E */
+	CH_DEVICE(0x23, 3),	/* T320X */
+	CH_DEVICE(0x24, 1),	/* T302X */
+	CH_DEVICE(0x25, 3),	/* T320E */
+	CH_DEVICE(0x26, 2),	/* T310X */
+	CH_DEVICE(0x30, 2),	/* T3B10 */
+	CH_DEVICE(0x31, 3),	/* T3B20 */
+	CH_DEVICE(0x32, 1),	/* T3B02 */
+	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
+	CH_DEVICE(0x36, 3),	/* S320E-CR */
+	CH_DEVICE(0x37, 7),	/* N320E-G2 */
+	{0,}
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and pin interrupts
+ * msi = 0: force pin interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
+
+/*
+ * The driver enables offload as a default.
+ * To disable it, use ofld_disable = 1.
+ */
+
+static int ofld_disable = 0;
+
+module_param(ofld_disable, int, 0644);
+MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
+
+/*
+ * We have work elements that we need to cancel when an interface is taken
+ * down.  Normally the work elements would be executed by keventd but that
+ * can deadlock because of linkwatch.  If our close method takes the rtnl
+ * lock and linkwatch is ahead of our work elements in keventd, linkwatch
+ * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
+ * for our work to complete.  Get our own work queue to solve this.
+ */
+struct workqueue_struct *cxgb3_wq;
+
+/**
+ *	link_report - show link status and link speed/duplex
+ *	@p: the port whose settings are to be reported
+ *
+ *	Shows the link status, speed, and duplex of a port.
+ */
+static void link_report(struct net_device *dev)
+{
+	if (!netif_carrier_ok(dev))
+		netdev_info(dev, "link down\n");
+	else {
+		const char *s = "10Mbps";
+		const struct port_info *p = netdev_priv(dev);
+
+		switch (p->link_config.speed) {
+		case SPEED_10000:
+			s = "10Gbps";
+			break;
+		case SPEED_1000:
+			s = "1000Mbps";
+			break;
+		case SPEED_100:
+			s = "100Mbps";
+			break;
+		}
+
+		netdev_info(dev, "link up, %s, %s-duplex\n",
+			    s, p->link_config.duplex == DUPLEX_FULL
+			    ? "full" : "half");
+	}
+}
+
+static void enable_tx_fifo_drain(struct adapter *adapter,
+				 struct port_info *pi)
+{
+	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
+			 F_ENDROPPKT);
+	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
+	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
+	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
+}
+
+static void disable_tx_fifo_drain(struct adapter *adapter,
+				  struct port_info *pi)
+{
+	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
+			 F_ENDROPPKT, 0);
+}
+
+void t3_os_link_fault(struct adapter *adap, int port_id, int state)
+{
+	struct net_device *dev = adap->port[port_id];
+	struct port_info *pi = netdev_priv(dev);
+
+	if (state == netif_carrier_ok(dev))
+		return;
+
+	if (state) {
+		struct cmac *mac = &pi->mac;
+
+		netif_carrier_on(dev);
+
+		disable_tx_fifo_drain(adap, pi);
+
+		/* Clear local faults */
+		t3_xgm_intr_disable(adap, pi->port_id);
+		t3_read_reg(adap, A_XGM_INT_STATUS +
+				    pi->mac.offset);
+		t3_write_reg(adap,
+			     A_XGM_INT_CAUSE + pi->mac.offset,
+			     F_XGM_INT);
+
+		t3_set_reg_field(adap,
+				 A_XGM_INT_ENABLE +
+				 pi->mac.offset,
+				 F_XGM_INT, F_XGM_INT);
+		t3_xgm_intr_enable(adap, pi->port_id);
+
+		t3_mac_enable(mac, MAC_DIRECTION_TX);
+	} else {
+		netif_carrier_off(dev);
+
+		/* Flush TX FIFO */
+		enable_tx_fifo_drain(adap, pi);
+	}
+	link_report(dev);
+}
+
+/**
+ *	t3_os_link_changed - handle link status changes
+ *	@adapter: the adapter associated with the link change
+ *	@port_id: the port index whose limk status has changed
+ *	@link_stat: the new status of the link
+ *	@speed: the new speed setting
+ *	@duplex: the new duplex setting
+ *	@pause: the new flow-control setting
+ *
+ *	This is the OS-dependent handler for link status changes.  The OS
+ *	neutral handler takes care of most of the processing for these events,
+ *	then calls this handler for any OS-specific processing.
+ */
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
+			int speed, int duplex, int pause)
+{
+	struct net_device *dev = adapter->port[port_id];
+	struct port_info *pi = netdev_priv(dev);
+	struct cmac *mac = &pi->mac;
+
+	/* Skip changes from disabled ports. */
+	if (!netif_running(dev))
+		return;
+
+	if (link_stat != netif_carrier_ok(dev)) {
+		if (link_stat) {
+			disable_tx_fifo_drain(adapter, pi);
+
+			t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+			/* Clear local faults */
+			t3_xgm_intr_disable(adapter, pi->port_id);
+			t3_read_reg(adapter, A_XGM_INT_STATUS +
+				    pi->mac.offset);
+			t3_write_reg(adapter,
+				     A_XGM_INT_CAUSE + pi->mac.offset,
+				     F_XGM_INT);
+
+			t3_set_reg_field(adapter,
+					 A_XGM_INT_ENABLE + pi->mac.offset,
+					 F_XGM_INT, F_XGM_INT);
+			t3_xgm_intr_enable(adapter, pi->port_id);
+
+			netif_carrier_on(dev);
+		} else {
+			netif_carrier_off(dev);
+
+			t3_xgm_intr_disable(adapter, pi->port_id);
+			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+			t3_set_reg_field(adapter,
+					 A_XGM_INT_ENABLE + pi->mac.offset,
+					 F_XGM_INT, 0);
+
+			if (is_10G(adapter))
+				pi->phy.ops->power_down(&pi->phy, 1);
+
+			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+			t3_mac_disable(mac, MAC_DIRECTION_RX);
+			t3_link_start(&pi->phy, mac, &pi->link_config);
+
+			/* Flush TX FIFO */
+			enable_tx_fifo_drain(adapter, pi);
+		}
+
+		link_report(dev);
+	}
+}
+
+/**
+ *	t3_os_phymod_changed - handle PHY module changes
+ *	@phy: the PHY reporting the module change
+ *	@mod_type: new module type
+ *
+ *	This is the OS-dependent handler for PHY module changes.  It is
+ *	invoked when a PHY module is removed or inserted for any OS-specific
+ *	processing.
+ */
+void t3_os_phymod_changed(struct adapter *adap, int port_id)
+{
+	static const char *mod_str[] = {
+		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
+	};
+
+	const struct net_device *dev = adap->port[port_id];
+	const struct port_info *pi = netdev_priv(dev);
+
+	if (pi->phy.modtype == phy_modtype_none)
+		netdev_info(dev, "PHY module unplugged\n");
+	else
+		netdev_info(dev, "%s PHY module inserted\n",
+			    mod_str[pi->phy.modtype]);
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	t3_mac_set_rx_mode(&pi->mac, dev);
+}
+
+/**
+ *	link_start - enable a port
+ *	@dev: the device to enable
+ *
+ *	Performs the MAC and PHY actions needed to enable a port.
+ */
+static void link_start(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct cmac *mac = &pi->mac;
+
+	t3_mac_reset(mac);
+	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
+	t3_mac_set_mtu(mac, dev->mtu);
+	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
+	t3_mac_set_rx_mode(mac, dev);
+	t3_link_start(&pi->phy, mac, &pi->link_config);
+	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static inline void cxgb_disable_msi(struct adapter *adapter)
+{
+	if (adapter->flags & USING_MSIX) {
+		pci_disable_msix(adapter->pdev);
+		adapter->flags &= ~USING_MSIX;
+	} else if (adapter->flags & USING_MSI) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~USING_MSI;
+	}
+}
+
+/*
+ * Interrupt handler for asynchronous events used with MSI-X.
+ */
+static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
+{
+	t3_slow_intr_handler(cookie);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+
+	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+	adap->msix_info[0].desc[n] = 0;
+
+	for_each_port(adap, j) {
+		struct net_device *d = adap->port[j];
+		const struct port_info *pi = netdev_priv(d);
+
+		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+			snprintf(adap->msix_info[msi_idx].desc, n,
+				 "%s-%d", d->name, pi->first_qset + i);
+			adap->msix_info[msi_idx].desc[n] = 0;
+		}
+	}
+}
+
+static int request_msix_data_irqs(struct adapter *adap)
+{
+	int i, j, err, qidx = 0;
+
+	for_each_port(adap, i) {
+		int nqsets = adap2pinfo(adap, i)->nqsets;
+
+		for (j = 0; j < nqsets; ++j) {
+			err = request_irq(adap->msix_info[qidx + 1].vec,
+					  t3_intr_handler(adap,
+							  adap->sge.qs[qidx].
+							  rspq.polling), 0,
+					  adap->msix_info[qidx + 1].desc,
+					  &adap->sge.qs[qidx]);
+			if (err) {
+				while (--qidx >= 0)
+					free_irq(adap->msix_info[qidx + 1].vec,
+						 &adap->sge.qs[qidx]);
+				return err;
+			}
+			qidx++;
+		}
+	}
+	return 0;
+}
+
+static void free_irq_resources(struct adapter *adapter)
+{
+	if (adapter->flags & USING_MSIX) {
+		int i, n = 0;
+
+		free_irq(adapter->msix_info[0].vec, adapter);
+		for_each_port(adapter, i)
+			n += adap2pinfo(adapter, i)->nqsets;
+
+		for (i = 0; i < n; ++i)
+			free_irq(adapter->msix_info[i + 1].vec,
+				 &adapter->sge.qs[i]);
+	} else
+		free_irq(adapter->pdev->irq, adapter);
+}
+
+static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
+			      unsigned long n)
+{
+	int attempts = 10;
+
+	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
+		if (!--attempts)
+			return -ETIMEDOUT;
+		msleep(10);
+	}
+	return 0;
+}
+
+static int init_tp_parity(struct adapter *adap)
+{
+	int i;
+	struct sk_buff *skb;
+	struct cpl_set_tcb_field *greq;
+	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
+
+	t3_tp_set_offload_mode(adap, 1);
+
+	for (i = 0; i < 16; i++) {
+		struct cpl_smt_write_req *req;
+
+		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+		if (!skb)
+			skb = adap->nofail_skb;
+		if (!skb)
+			goto alloc_skb_fail;
+
+		req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+		memset(req, 0, sizeof(*req));
+		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
+		req->mtu_idx = NMTUS - 1;
+		req->iff = i;
+		t3_mgmt_tx(adap, skb);
+		if (skb == adap->nofail_skb) {
+			await_mgmt_replies(adap, cnt, i + 1);
+			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+			if (!adap->nofail_skb)
+				goto alloc_skb_fail;
+		}
+	}
+
+	for (i = 0; i < 2048; i++) {
+		struct cpl_l2t_write_req *req;
+
+		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+		if (!skb)
+			skb = adap->nofail_skb;
+		if (!skb)
+			goto alloc_skb_fail;
+
+		req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+		memset(req, 0, sizeof(*req));
+		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
+		req->params = htonl(V_L2T_W_IDX(i));
+		t3_mgmt_tx(adap, skb);
+		if (skb == adap->nofail_skb) {
+			await_mgmt_replies(adap, cnt, 16 + i + 1);
+			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+			if (!adap->nofail_skb)
+				goto alloc_skb_fail;
+		}
+	}
+
+	for (i = 0; i < 2048; i++) {
+		struct cpl_rte_write_req *req;
+
+		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+		if (!skb)
+			skb = adap->nofail_skb;
+		if (!skb)
+			goto alloc_skb_fail;
+
+		req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
+		memset(req, 0, sizeof(*req));
+		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
+		req->l2t_idx = htonl(V_L2T_W_IDX(i));
+		t3_mgmt_tx(adap, skb);
+		if (skb == adap->nofail_skb) {
+			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
+			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+			if (!adap->nofail_skb)
+				goto alloc_skb_fail;
+		}
+	}
+
+	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+	if (!skb)
+		skb = adap->nofail_skb;
+	if (!skb)
+		goto alloc_skb_fail;
+
+	greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
+	memset(greq, 0, sizeof(*greq));
+	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
+	greq->mask = cpu_to_be64(1);
+	t3_mgmt_tx(adap, skb);
+
+	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+	if (skb == adap->nofail_skb) {
+		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+	}
+
+	t3_tp_set_offload_mode(adap, 0);
+	return i;
+
+alloc_skb_fail:
+	t3_tp_set_offload_mode(adap, 0);
+	return -ENOMEM;
+}
+
+/**
+ *	setup_rss - configure RSS
+ *	@adap: the adapter
+ *
+ *	Sets up RSS to distribute packets to multiple receive queues.  We
+ *	configure the RSS CPU lookup table to distribute to the number of HW
+ *	receive queues, and the response queue lookup table to narrow that
+ *	down to the response queues actually configured for each port.
+ *	We always configure the RSS mapping for two ports since the mapping
+ *	table has plenty of entries.
+ */
+static void setup_rss(struct adapter *adap)
+{
+	int i;
+	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
+	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
+	u8 cpus[SGE_QSETS + 1];
+	u16 rspq_map[RSS_TABLE_SIZE];
+
+	for (i = 0; i < SGE_QSETS; ++i)
+		cpus[i] = i;
+	cpus[SGE_QSETS] = 0xff;	/* terminator */
+
+	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
+		rspq_map[i] = i % nq0;
+		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
+	}
+
+	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
+		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
+		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
+}
+
+static void ring_dbs(struct adapter *adap)
+{
+	int i, j;
+
+	for (i = 0; i < SGE_QSETS; i++) {
+		struct sge_qset *qs = &adap->sge.qs[i];
+
+		if (qs->adap)
+			for (j = 0; j < SGE_TXQ_PER_SET; j++)
+				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+	}
+}
+
+static void init_napi(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < SGE_QSETS; i++) {
+		struct sge_qset *qs = &adap->sge.qs[i];
+
+		if (qs->adap)
+			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
+				       64);
+	}
+
+	/*
+	 * netif_napi_add() can be called only once per napi_struct because it
+	 * adds each new napi_struct to a list.  Be careful not to call it a
+	 * second time, e.g., during EEH recovery, by making a note of it.
+	 */
+	adap->flags |= NAPI_INIT;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.  This includes the handlers of
+ * both netdevices representing interfaces and the dummy ones for the extra
+ * queues.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < SGE_QSETS; i++)
+		if (adap->sge.qs[i].adap)
+			napi_disable(&adap->sge.qs[i].napi);
+}
+
+static void enable_all_napi(struct adapter *adap)
+{
+	int i;
+	for (i = 0; i < SGE_QSETS; i++)
+		if (adap->sge.qs[i].adap)
+			napi_enable(&adap->sge.qs[i].napi);
+}
+
+/**
+ *	setup_sge_qsets - configure SGE Tx/Rx/response queues
+ *	@adap: the adapter
+ *
+ *	Determines how many sets of SGE queues to use and initializes them.
+ *	We support multiple queue sets per port if we have MSI-X, otherwise
+ *	just one queue set per port.
+ */
+static int setup_sge_qsets(struct adapter *adap)
+{
+	int i, j, err, irq_idx = 0, qset_idx = 0;
+	unsigned int ntxq = SGE_TXQ_PER_SET;
+
+	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
+		irq_idx = -1;
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		struct port_info *pi = netdev_priv(dev);
+
+		pi->qs = &adap->sge.qs[pi->first_qset];
+		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+			err = t3_sge_alloc_qset(adap, qset_idx, 1,
+				(adap->flags & USING_MSIX) ? qset_idx + 1 :
+							     irq_idx,
+				&adap->params.sge.qset[qset_idx], ntxq, dev,
+				netdev_get_tx_queue(dev, j));
+			if (err) {
+				t3_free_sge_resources(adap);
+				return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static ssize_t attr_show(struct device *d, char *buf,
+			 ssize_t(*format) (struct net_device *, char *))
+{
+	ssize_t len;
+
+	/* Synchronize with ioctls that may shut down the device */
+	rtnl_lock();
+	len = (*format) (to_net_dev(d), buf);
+	rtnl_unlock();
+	return len;
+}
+
+static ssize_t attr_store(struct device *d,
+			  const char *buf, size_t len,
+			  ssize_t(*set) (struct net_device *, unsigned int),
+			  unsigned int min_val, unsigned int max_val)
+{
+	char *endp;
+	ssize_t ret;
+	unsigned int val;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (endp == buf || val < min_val || val > max_val)
+		return -EINVAL;
+
+	rtnl_lock();
+	ret = (*set) (to_net_dev(d), val);
+	if (!ret)
+		ret = len;
+	rtnl_unlock();
+	return ret;
+}
+
+#define CXGB3_SHOW(name, val_expr) \
+static ssize_t format_##name(struct net_device *dev, char *buf) \
+{ \
+	struct port_info *pi = netdev_priv(dev); \
+	struct adapter *adap = pi->adapter; \
+	return sprintf(buf, "%u\n", val_expr); \
+} \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+			   char *buf) \
+{ \
+	return attr_show(d, buf, format_##name); \
+}
+
+static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
+
+	if (adap->flags & FULL_INIT_DONE)
+		return -EBUSY;
+	if (val && adap->params.rev == 0)
+		return -EINVAL;
+	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+	    min_tids)
+		return -EINVAL;
+	adap->params.mc5.nfilters = val;
+	return 0;
+}
+
+static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
+			      const char *buf, size_t len)
+{
+	return attr_store(d, buf, len, set_nfilters, 0, ~0);
+}
+
+static ssize_t set_nservers(struct net_device *dev, unsigned int val)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+
+	if (adap->flags & FULL_INIT_DONE)
+		return -EBUSY;
+	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
+	    MC5_MIN_TIDS)
+		return -EINVAL;
+	adap->params.mc5.nservers = val;
+	return 0;
+}
+
+static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
+			      const char *buf, size_t len)
+{
+	return attr_store(d, buf, len, set_nservers, 0, ~0);
+}
+
+#define CXGB3_ATTR_R(name, val_expr) \
+CXGB3_SHOW(name, val_expr) \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+#define CXGB3_ATTR_RW(name, val_expr, store_method) \
+CXGB3_SHOW(name, val_expr) \
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+
+CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
+CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
+CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
+
+static struct attribute *cxgb3_attrs[] = {
+	&dev_attr_cam_size.attr,
+	&dev_attr_nfilters.attr,
+	&dev_attr_nservers.attr,
+	NULL
+};
+
+static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
+
+static ssize_t tm_attr_show(struct device *d,
+			    char *buf, int sched)
+{
+	struct port_info *pi = netdev_priv(to_net_dev(d));
+	struct adapter *adap = pi->adapter;
+	unsigned int v, addr, bpt, cpt;
+	ssize_t len;
+
+	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+	rtnl_lock();
+	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+	if (sched & 1)
+		v >>= 16;
+	bpt = (v >> 8) & 0xff;
+	cpt = v & 0xff;
+	if (!cpt)
+		len = sprintf(buf, "disabled\n");
+	else {
+		v = (adap->params.vpd.cclk * 1000) / cpt;
+		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
+	}
+	rtnl_unlock();
+	return len;
+}
+
+static ssize_t tm_attr_store(struct device *d,
+			     const char *buf, size_t len, int sched)
+{
+	struct port_info *pi = netdev_priv(to_net_dev(d));
+	struct adapter *adap = pi->adapter;
+	unsigned int val;
+	char *endp;
+	ssize_t ret;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (endp == buf || val > 10000000)
+		return -EINVAL;
+
+	rtnl_lock();
+	ret = t3_config_sched(adap, val, sched);
+	if (!ret)
+		ret = len;
+	rtnl_unlock();
+	return ret;
+}
+
+#define TM_ATTR(name, sched) \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+			   char *buf) \
+{ \
+	return tm_attr_show(d, buf, sched); \
+} \
+static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
+			    const char *buf, size_t len) \
+{ \
+	return tm_attr_store(d, buf, len, sched); \
+} \
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+
+TM_ATTR(sched0, 0);
+TM_ATTR(sched1, 1);
+TM_ATTR(sched2, 2);
+TM_ATTR(sched3, 3);
+TM_ATTR(sched4, 4);
+TM_ATTR(sched5, 5);
+TM_ATTR(sched6, 6);
+TM_ATTR(sched7, 7);
+
+static struct attribute *offload_attrs[] = {
+	&dev_attr_sched0.attr,
+	&dev_attr_sched1.attr,
+	&dev_attr_sched2.attr,
+	&dev_attr_sched3.attr,
+	&dev_attr_sched4.attr,
+	&dev_attr_sched5.attr,
+	&dev_attr_sched6.attr,
+	&dev_attr_sched7.attr,
+	NULL
+};
+
+static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
+
+/*
+ * Sends an sk_buff to an offload queue driver
+ * after dealing with any active network taps.
+ */
+static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = t3_offload_tx(tdev, skb);
+	local_bh_enable();
+	return ret;
+}
+
+static int write_smt_entry(struct adapter *adapter, int idx)
+{
+	struct cpl_smt_write_req *req;
+	struct port_info *pi = netdev_priv(adapter->port[idx]);
+	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
+	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
+	req->iff = idx;
+	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
+	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
+	skb->priority = 1;
+	offload_tx(&adapter->tdev, skb);
+	return 0;
+}
+
+static int init_smt(struct adapter *adapter)
+{
+	int i;
+
+	for_each_port(adapter, i)
+	    write_smt_entry(adapter, i);
+	return 0;
+}
+
+static void init_port_mtus(struct adapter *adapter)
+{
+	unsigned int mtus = adapter->port[0]->mtu;
+
+	if (adapter->port[1])
+		mtus |= adapter->port[1]->mtu << 16;
+	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
+}
+
+static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
+			      int hi, int port)
+{
+	struct sk_buff *skb;
+	struct mngt_pktsched_wr *req;
+	int ret;
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+	if (!skb)
+		skb = adap->nofail_skb;
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
+	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
+	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
+	req->sched = sched;
+	req->idx = qidx;
+	req->min = lo;
+	req->max = hi;
+	req->binding = port;
+	ret = t3_mgmt_tx(adap, skb);
+	if (skb == adap->nofail_skb) {
+		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					     GFP_KERNEL);
+		if (!adap->nofail_skb)
+			ret = -ENOMEM;
+	}
+
+	return ret;
+}
+
+static int bind_qsets(struct adapter *adap)
+{
+	int i, j, err = 0;
+
+	for_each_port(adap, i) {
+		const struct port_info *pi = adap2pinfo(adap, i);
+
+		for (j = 0; j < pi->nqsets; ++j) {
+			int ret = send_pktsched_cmd(adap, 1,
+						    pi->first_qset + j, -1,
+						    -1, i);
+			if (ret)
+				err = ret;
+		}
+	}
+
+	return err;
+}
+
+#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
+	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
+#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
+#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
+	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
+#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
+#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
+#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
+#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
+MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
+MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
+MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
+
+static inline const char *get_edc_fw_name(int edc_idx)
+{
+	const char *fw_name = NULL;
+
+	switch (edc_idx) {
+	case EDC_OPT_AEL2005:
+		fw_name = AEL2005_OPT_EDC_NAME;
+		break;
+	case EDC_TWX_AEL2005:
+		fw_name = AEL2005_TWX_EDC_NAME;
+		break;
+	case EDC_TWX_AEL2020:
+		fw_name = AEL2020_TWX_EDC_NAME;
+		break;
+	}
+	return fw_name;
+}
+
+int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
+{
+	struct adapter *adapter = phy->adapter;
+	const struct firmware *fw;
+	const char *fw_name;
+	u32 csum;
+	const __be32 *p;
+	u16 *cache = phy->phy_cache;
+	int i, ret = -EINVAL;
+
+	fw_name = get_edc_fw_name(edc_idx);
+	if (fw_name)
+		ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
+	if (ret < 0) {
+		dev_err(&adapter->pdev->dev,
+			"could not upgrade firmware: unable to load %s\n",
+			fw_name);
+		return ret;
+	}
+
+	/* check size, take checksum in account */
+	if (fw->size > size + 4) {
+		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
+		       (unsigned int)fw->size, size + 4);
+		ret = -EINVAL;
+	}
+
+	/* compute checksum */
+	p = (const __be32 *)fw->data;
+	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
+		csum += ntohl(p[i]);
+
+	if (csum != 0xffffffff) {
+		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+		       csum);
+		ret = -EINVAL;
+	}
+
+	for (i = 0; i < size / 4 ; i++) {
+		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
+		*cache++ = be32_to_cpu(p[i]) & 0xffff;
+	}
+
+	release_firmware(fw);
+
+	return ret;
+}
+
+static int upgrade_fw(struct adapter *adap)
+{
+	int ret;
+	const struct firmware *fw;
+	struct device *dev = &adap->pdev->dev;
+
+	ret = request_firmware(&fw, FW_FNAME, dev);
+	if (ret < 0) {
+		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
+			FW_FNAME);
+		return ret;
+	}
+	ret = t3_load_fw(adap, fw->data, fw->size);
+	release_firmware(fw);
+
+	if (ret == 0)
+		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
+			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+	else
+		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
+			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+
+	return ret;
+}
+
+static inline char t3rev2char(struct adapter *adapter)
+{
+	char rev = 0;
+
+	switch(adapter->params.rev) {
+	case T3_REV_B:
+	case T3_REV_B2:
+		rev = 'b';
+		break;
+	case T3_REV_C:
+		rev = 'c';
+		break;
+	}
+	return rev;
+}
+
+static int update_tpsram(struct adapter *adap)
+{
+	const struct firmware *tpsram;
+	char buf[64];
+	struct device *dev = &adap->pdev->dev;
+	int ret;
+	char rev;
+
+	rev = t3rev2char(adap);
+	if (!rev)
+		return 0;
+
+	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
+
+	ret = request_firmware(&tpsram, buf, dev);
+	if (ret < 0) {
+		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
+			buf);
+		return ret;
+	}
+
+	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
+	if (ret)
+		goto release_tpsram;
+
+	ret = t3_set_proto_sram(adap, tpsram->data);
+	if (ret == 0)
+		dev_info(dev,
+			 "successful update of protocol engine "
+			 "to %d.%d.%d\n",
+			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+	else
+		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
+			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+	if (ret)
+		dev_err(dev, "loading protocol SRAM failed\n");
+
+release_tpsram:
+	release_firmware(tpsram);
+
+	return ret;
+}
+
+/**
+ * t3_synchronize_rx - wait for current Rx processing on a port to complete
+ * @adap: the adapter
+ * @p: the port
+ *
+ * Ensures that current Rx processing on any of the queues associated with
+ * the given port completes before returning.  We do this by acquiring and
+ * releasing the locks of the response queues associated with the port.
+ */
+static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+{
+	int i;
+
+	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+		struct sge_rspq *q = &adap->sge.qs[i].rspq;
+
+		spin_lock_irq(&q->lock);
+		spin_unlock_irq(&q->lock);
+	}
+}
+
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (adapter->params.rev > 0) {
+		t3_set_vlan_accel(adapter, 1 << pi->port_id,
+				  features & NETIF_F_HW_VLAN_CTAG_RX);
+	} else {
+		/* single control for all ports */
+		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
+
+		for_each_port(adapter, i)
+			have_vlans |=
+				adapter->port[i]->features &
+				NETIF_F_HW_VLAN_CTAG_RX;
+
+		t3_set_vlan_accel(adapter, 1, have_vlans);
+	}
+	t3_synchronize_rx(adapter, pi);
+}
+
+/**
+ *	cxgb_up - enable the adapter
+ *	@adapter: adapter being enabled
+ *
+ *	Called when the first port is enabled, this function performs the
+ *	actions necessary to make an adapter operational, such as completing
+ *	the initialization of HW modules, and enabling interrupts.
+ *
+ *	Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+	int i, err;
+
+	if (!(adap->flags & FULL_INIT_DONE)) {
+		err = t3_check_fw_version(adap);
+		if (err == -EINVAL) {
+			err = upgrade_fw(adap);
+			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
+				FW_VERSION_MAJOR, FW_VERSION_MINOR,
+				FW_VERSION_MICRO, err ? "failed" : "succeeded");
+		}
+
+		err = t3_check_tpsram_version(adap);
+		if (err == -EINVAL) {
+			err = update_tpsram(adap);
+			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
+				TP_VERSION_MAJOR, TP_VERSION_MINOR,
+				TP_VERSION_MICRO, err ? "failed" : "succeeded");
+		}
+
+		/*
+		 * Clear interrupts now to catch errors if t3_init_hw fails.
+		 * We clear them again later as initialization may trigger
+		 * conditions that can interrupt.
+		 */
+		t3_intr_clear(adap);
+
+		err = t3_init_hw(adap, 0);
+		if (err)
+			goto out;
+
+		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
+		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+
+		err = setup_sge_qsets(adap);
+		if (err)
+			goto out;
+
+		for_each_port(adap, i)
+			cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
+
+		setup_rss(adap);
+		if (!(adap->flags & NAPI_INIT))
+			init_napi(adap);
+
+		t3_start_sge_timers(adap);
+		adap->flags |= FULL_INIT_DONE;
+	}
+
+	t3_intr_clear(adap);
+
+	if (adap->flags & USING_MSIX) {
+		name_msix_vecs(adap);
+		err = request_irq(adap->msix_info[0].vec,
+				  t3_async_intr_handler, 0,
+				  adap->msix_info[0].desc, adap);
+		if (err)
+			goto irq_err;
+
+		err = request_msix_data_irqs(adap);
+		if (err) {
+			free_irq(adap->msix_info[0].vec, adap);
+			goto irq_err;
+		}
+	} else if ((err = request_irq(adap->pdev->irq,
+				      t3_intr_handler(adap,
+						      adap->sge.qs[0].rspq.
+						      polling),
+				      (adap->flags & USING_MSI) ?
+				       0 : IRQF_SHARED,
+				      adap->name, adap)))
+		goto irq_err;
+
+	enable_all_napi(adap);
+	t3_sge_start(adap);
+	t3_intr_enable(adap);
+
+	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
+	    is_offload(adap) && init_tp_parity(adap) == 0)
+		adap->flags |= TP_PARITY_INIT;
+
+	if (adap->flags & TP_PARITY_INIT) {
+		t3_write_reg(adap, A_TP_INT_CAUSE,
+			     F_CMCACHEPERR | F_ARPLUTPERR);
+		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
+	}
+
+	if (!(adap->flags & QUEUES_BOUND)) {
+		int ret = bind_qsets(adap);
+
+		if (ret < 0) {
+			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
+			t3_intr_disable(adap);
+			free_irq_resources(adap);
+			err = ret;
+			goto out;
+		}
+		adap->flags |= QUEUES_BOUND;
+	}
+
+out:
+	return err;
+irq_err:
+	CH_ERR(adap, "request_irq failed, err %d\n", err);
+	goto out;
+}
+
+/*
+ * Release resources when all the ports and offloading have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter, int on_wq)
+{
+	t3_sge_stop(adapter);
+	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
+	t3_intr_disable(adapter);
+	spin_unlock_irq(&adapter->work_lock);
+
+	free_irq_resources(adapter);
+	quiesce_rx(adapter);
+	t3_sge_stop(adapter);
+	if (!on_wq)
+		flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
+}
+
+static void schedule_chk_task(struct adapter *adap)
+{
+	unsigned int timeo;
+
+	timeo = adap->params.linkpoll_period ?
+	    (HZ * adap->params.linkpoll_period) / 10 :
+	    adap->params.stats_update_period * HZ;
+	if (timeo)
+		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
+}
+
+static int offload_open(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct t3cdev *tdev = dev2t3cdev(dev);
+	int adap_up = adapter->open_device_map & PORT_MASK;
+	int err;
+
+	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+		return 0;
+
+	if (!adap_up && (err = cxgb_up(adapter)) < 0)
+		goto out;
+
+	t3_tp_set_offload_mode(adapter, 1);
+	tdev->lldev = adapter->port[0];
+	err = cxgb3_offload_activate(adapter);
+	if (err)
+		goto out;
+
+	init_port_mtus(adapter);
+	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
+		     adapter->params.b_wnd,
+		     adapter->params.rev == 0 ?
+		     adapter->port[0]->mtu : 0xffff);
+	init_smt(adapter);
+
+	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
+		dev_dbg(&dev->dev, "cannot create sysfs group\n");
+
+	/* Call back all registered clients */
+	cxgb3_add_clients(tdev);
+
+out:
+	/* restore them in case the offload module has changed them */
+	if (err) {
+		t3_tp_set_offload_mode(adapter, 0);
+		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+		cxgb3_set_dummy_ops(tdev);
+	}
+	return err;
+}
+
+static int offload_close(struct t3cdev *tdev)
+{
+	struct adapter *adapter = tdev2adap(tdev);
+	struct t3c_data *td = T3C_DATA(tdev);
+
+	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+		return 0;
+
+	/* Call back all registered clients */
+	cxgb3_remove_clients(tdev);
+
+	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
+
+	/* Flush work scheduled while releasing TIDs */
+	flush_work(&td->tid_release_task);
+
+	tdev->lldev = NULL;
+	cxgb3_set_dummy_ops(tdev);
+	t3_tp_set_offload_mode(adapter, 0);
+	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter, 0);
+
+	cxgb3_offload_deactivate(adapter);
+	return 0;
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int other_ports = adapter->open_device_map & PORT_MASK;
+	int err;
+
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+		return err;
+
+	set_bit(pi->port_id, &adapter->open_device_map);
+	if (is_offload(adapter) && !ofld_disable) {
+		err = offload_open(dev);
+		if (err)
+			pr_warn("Could not initialize offload capabilities\n");
+	}
+
+	netif_set_real_num_tx_queues(dev, pi->nqsets);
+	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+	if (err)
+		return err;
+	link_start(dev);
+	t3_port_intr_enable(adapter, pi->port_id);
+	netif_tx_start_all_queues(dev);
+	if (!other_ports)
+		schedule_chk_task(adapter);
+
+	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
+	return 0;
+}
+
+static int __cxgb_close(struct net_device *dev, int on_wq)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	
+	if (!adapter->open_device_map)
+		return 0;
+
+	/* Stop link fault interrupts */
+	t3_xgm_intr_disable(adapter, pi->port_id);
+	t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+
+	t3_port_intr_disable(adapter, pi->port_id);
+	netif_tx_stop_all_queues(dev);
+	pi->phy.ops->power_down(&pi->phy, 1);
+	netif_carrier_off(dev);
+	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+
+	spin_lock_irq(&adapter->work_lock);	/* sync with update task */
+	clear_bit(pi->port_id, &adapter->open_device_map);
+	spin_unlock_irq(&adapter->work_lock);
+
+	if (!(adapter->open_device_map & PORT_MASK))
+		cancel_delayed_work_sync(&adapter->adap_check_task);
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter, on_wq);
+
+	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
+	return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	return __cxgb_close(dev, 0);
+}
+
+static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct net_device_stats *ns = &pi->netstats;
+	const struct mac_stats *pstats;
+
+	spin_lock(&adapter->stats_lock);
+	pstats = t3_mac_update_stats(&pi->mac);
+	spin_unlock(&adapter->stats_lock);
+
+	ns->tx_bytes = pstats->tx_octets;
+	ns->tx_packets = pstats->tx_frames;
+	ns->rx_bytes = pstats->rx_octets;
+	ns->rx_packets = pstats->rx_frames;
+	ns->multicast = pstats->rx_mcast_frames;
+
+	ns->tx_errors = pstats->tx_underrun;
+	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
+	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
+	    pstats->rx_fifo_ovfl;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
+	ns->rx_over_errors = 0;
+	ns->rx_crc_errors = pstats->rx_fcs_errs;
+	ns->rx_frame_errors = pstats->rx_symbol_errs;
+	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
+	ns->rx_missed_errors = pstats->rx_cong_drops;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors = 0;
+	ns->tx_carrier_errors = 0;
+	ns->tx_fifo_errors = pstats->tx_underrun;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors = 0;
+	return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	adapter->msg_enable = val;
+}
+
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+	"TxOctetsOK         ",
+	"TxFramesOK         ",
+	"TxMulticastFramesOK",
+	"TxBroadcastFramesOK",
+	"TxPauseFrames      ",
+	"TxUnderrun         ",
+	"TxExtUnderrun      ",
+
+	"TxFrames64         ",
+	"TxFrames65To127    ",
+	"TxFrames128To255   ",
+	"TxFrames256To511   ",
+	"TxFrames512To1023  ",
+	"TxFrames1024To1518 ",
+	"TxFrames1519ToMax  ",
+
+	"RxOctetsOK         ",
+	"RxFramesOK         ",
+	"RxMulticastFramesOK",
+	"RxBroadcastFramesOK",
+	"RxPauseFrames      ",
+	"RxFCSErrors        ",
+	"RxSymbolErrors     ",
+	"RxShortErrors      ",
+	"RxJabberErrors     ",
+	"RxLengthErrors     ",
+	"RxFIFOoverflow     ",
+
+	"RxFrames64         ",
+	"RxFrames65To127    ",
+	"RxFrames128To255   ",
+	"RxFrames256To511   ",
+	"RxFrames512To1023  ",
+	"RxFrames1024To1518 ",
+	"RxFrames1519ToMax  ",
+
+	"PhyFIFOErrors      ",
+	"TSO                ",
+	"VLANextractions    ",
+	"VLANinsertions     ",
+	"TxCsumOffload      ",
+	"RxCsumGood         ",
+	"LroAggregated      ",
+	"LroFlushed         ",
+	"LroNoDesc          ",
+	"RxDrops            ",
+
+	"CheckTXEnToggled   ",
+	"CheckResets        ",
+
+	"LinkFaults         ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(stats_strings);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+#define T3_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+	return T3_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+	return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	u32 fw_vers = 0;
+	u32 tp_vers = 0;
+
+	spin_lock(&adapter->stats_lock);
+	t3_get_fw_version(adapter, &fw_vers);
+	t3_get_tp_version(adapter, &tp_vers);
+	spin_unlock(&adapter->stats_lock);
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(adapter->pdev),
+		sizeof(info->bus_info));
+	if (fw_vers)
+		snprintf(info->fw_version, sizeof(info->fw_version),
+			 "%s %u.%u.%u TP %u.%u.%u",
+			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
+			 G_FW_VERSION_MAJOR(fw_vers),
+			 G_FW_VERSION_MINOR(fw_vers),
+			 G_FW_VERSION_MICRO(fw_vers),
+			 G_TP_VERSION_MAJOR(tp_vers),
+			 G_TP_VERSION_MINOR(tp_vers),
+			 G_TP_VERSION_MICRO(tp_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static unsigned long collect_sge_port_stats(struct adapter *adapter,
+					    struct port_info *p, int idx)
+{
+	int i;
+	unsigned long tot = 0;
+
+	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
+		tot += adapter->sge.qs[i].port_stats[idx];
+	return tot;
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	const struct mac_stats *s;
+
+	spin_lock(&adapter->stats_lock);
+	s = t3_mac_update_stats(&pi->mac);
+	spin_unlock(&adapter->stats_lock);
+
+	*data++ = s->tx_octets;
+	*data++ = s->tx_frames;
+	*data++ = s->tx_mcast_frames;
+	*data++ = s->tx_bcast_frames;
+	*data++ = s->tx_pause;
+	*data++ = s->tx_underrun;
+	*data++ = s->tx_fifo_urun;
+
+	*data++ = s->tx_frames_64;
+	*data++ = s->tx_frames_65_127;
+	*data++ = s->tx_frames_128_255;
+	*data++ = s->tx_frames_256_511;
+	*data++ = s->tx_frames_512_1023;
+	*data++ = s->tx_frames_1024_1518;
+	*data++ = s->tx_frames_1519_max;
+
+	*data++ = s->rx_octets;
+	*data++ = s->rx_frames;
+	*data++ = s->rx_mcast_frames;
+	*data++ = s->rx_bcast_frames;
+	*data++ = s->rx_pause;
+	*data++ = s->rx_fcs_errs;
+	*data++ = s->rx_symbol_errs;
+	*data++ = s->rx_short;
+	*data++ = s->rx_jabber;
+	*data++ = s->rx_too_long;
+	*data++ = s->rx_fifo_ovfl;
+
+	*data++ = s->rx_frames_64;
+	*data++ = s->rx_frames_65_127;
+	*data++ = s->rx_frames_128_255;
+	*data++ = s->rx_frames_256_511;
+	*data++ = s->rx_frames_512_1023;
+	*data++ = s->rx_frames_1024_1518;
+	*data++ = s->rx_frames_1519_max;
+
+	*data++ = pi->phy.fifo_errors;
+
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
+	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
+	*data++ = 0;
+	*data++ = 0;
+	*data++ = 0;
+	*data++ = s->rx_cong_drops;
+
+	*data++ = s->num_toggled;
+	*data++ = s->num_resets;
+
+	*data++ = s->link_faults;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+				  unsigned int start, unsigned int end)
+{
+	u32 *p = buf + start;
+
+	for (; start <= end; start += sizeof(u32))
+		*p++ = t3_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *ap = pi->adapter;
+
+	/*
+	 * Version scheme:
+	 * bits 0..9: chip version
+	 * bits 10..15: chip revision
+	 * bit 31: set for PCIe cards
+	 */
+	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
+
+	/*
+	 * We skip the MAC statistics registers because they are clear-on-read.
+	 * Also reading multi-register stats would need to synchronize with the
+	 * periodic mac stats accumulation.  Hard to justify the complexity.
+	 */
+	memset(buf, 0, T3_REGMAP_SIZE);
+	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
+	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
+	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
+	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
+	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
+	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
+		       XGM_REG(A_XGM_SERDES_STAT3, 1));
+	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
+		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+	if (p->link_config.autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+	p->phy.ops->autoneg_restart(&p->phy);
+	return 0;
+}
+
+static int set_phys_id(struct net_device *dev,
+		       enum ethtool_phys_id_state state)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_OFF:
+		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
+		break;
+
+	case ETHTOOL_ID_ON:
+	case ETHTOOL_ID_INACTIVE:
+		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+			 F_GPIO0_OUT_VAL);
+	}
+
+	return 0;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	cmd->supported = p->link_config.supported;
+	cmd->advertising = p->link_config.advertising;
+
+	if (netif_carrier_ok(dev)) {
+		ethtool_cmd_speed_set(cmd, p->link_config.speed);
+		cmd->duplex = p->link_config.duplex;
+	} else {
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+	cmd->phy_address = p->phy.mdio.prtad;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = p->link_config.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+	int cap = 0;
+
+	switch (speed) {
+	case SPEED_10:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10baseT_Full;
+		else
+			cap = SUPPORTED_10baseT_Half;
+		break;
+	case SPEED_100:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_100baseT_Full;
+		else
+			cap = SUPPORTED_100baseT_Half;
+		break;
+	case SPEED_1000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_1000baseT_Full;
+		else
+			cap = SUPPORTED_1000baseT_Half;
+		break;
+	case SPEED_10000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10000baseT_Full;
+	}
+	return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+		      ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_config;
+
+	if (!(lc->supported & SUPPORTED_Autoneg)) {
+		/*
+		 * PHY offers a single speed/duplex.  See if that's what's
+		 * being requested.
+		 */
+		if (cmd->autoneg == AUTONEG_DISABLE) {
+			u32 speed = ethtool_cmd_speed(cmd);
+			int cap = speed_duplex_to_caps(speed, cmd->duplex);
+			if (lc->supported & cap)
+				return 0;
+		}
+		return -EINVAL;
+	}
+
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		u32 speed = ethtool_cmd_speed(cmd);
+		int cap = speed_duplex_to_caps(speed, cmd->duplex);
+
+		if (!(lc->supported & cap) || (speed == SPEED_1000))
+			return -EINVAL;
+		lc->requested_speed = speed;
+		lc->requested_duplex = cmd->duplex;
+		lc->advertising = 0;
+	} else {
+		cmd->advertising &= ADVERTISED_MASK;
+		cmd->advertising &= lc->supported;
+		if (!cmd->advertising)
+			return -EINVAL;
+		lc->requested_speed = SPEED_INVALID;
+		lc->requested_duplex = DUPLEX_INVALID;
+		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+	}
+	lc->autoneg = cmd->autoneg;
+	if (netif_running(dev))
+		t3_link_start(&p->phy, &p->mac, lc);
+	return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_config;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & SUPPORTED_Autoneg)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (lc->autoneg == AUTONEG_ENABLE) {
+		if (netif_running(dev))
+			t3_link_start(&p->phy, &p->mac, lc);
+	} else {
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		if (netif_running(dev))
+			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
+	}
+	return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+	e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+	e->rx_pending = q->fl_size;
+	e->rx_mini_pending = q->rspq_size;
+	e->rx_jumbo_pending = q->jumbo_size;
+	e->tx_pending = q->txq_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct qset_params *q;
+	int i;
+
+	if (e->rx_pending > MAX_RX_BUFFERS ||
+	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+	    e->tx_pending > MAX_TXQ_ENTRIES ||
+	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES ||
+	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	q = &adapter->params.sge.qset[pi->first_qset];
+	for (i = 0; i < pi->nqsets; ++i, ++q) {
+		q->rspq_size = e->rx_mini_pending;
+		q->fl_size = e->rx_pending;
+		q->jumbo_size = e->rx_jumbo_pending;
+		q->txq_size[0] = e->tx_pending;
+		q->txq_size[1] = e->tx_pending;
+		q->txq_size[2] = e->tx_pending;
+	}
+	return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct qset_params *qsp;
+	struct sge_qset *qs;
+	int i;
+
+	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
+		return -EINVAL;
+
+	for (i = 0; i < pi->nqsets; i++) {
+		qsp = &adapter->params.sge.qset[i];
+		qs = &adapter->sge.qs[i];
+		qsp->coalesce_usecs = c->rx_coalesce_usecs;
+		t3_update_qset_coalesce(qs, qsp);
+	}
+
+	return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct qset_params *q = adapter->params.sge.qset;
+
+	c->rx_coalesce_usecs = q->coalesce_usecs;
+	return 0;
+}
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 * data)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int i, err = 0;
+
+	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	e->magic = EEPROM_MAGIC;
+	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+		err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
+
+	if (!err)
+		memcpy(data, buf + e->offset, e->len);
+	kfree(buf);
+	return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		      u8 * data)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	u32 aligned_offset, aligned_len;
+	__le32 *p;
+	u8 *buf;
+	int err;
+
+	if (eeprom->magic != EEPROM_MAGIC)
+		return -EINVAL;
+
+	aligned_offset = eeprom->offset & ~3;
+	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+		buf = kmalloc(aligned_len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
+		if (!err && aligned_len > 4)
+			err = t3_seeprom_read(adapter,
+					      aligned_offset + aligned_len - 4,
+					      (__le32 *) & buf[aligned_len - 4]);
+		if (err)
+			goto out;
+		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+	} else
+		buf = data;
+
+	err = t3_seeprom_wp(adapter, 0);
+	if (err)
+		goto out;
+
+	for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
+		err = t3_seeprom_write(adapter, aligned_offset, *p);
+		aligned_offset += 4;
+	}
+
+	if (!err)
+		err = t3_seeprom_wp(adapter, 1);
+out:
+	if (buf != data)
+		kfree(buf);
+	return err;
+}
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+	.get_settings = get_settings,
+	.set_settings = set_settings,
+	.get_drvinfo = get_drvinfo,
+	.get_msglevel = get_msglevel,
+	.set_msglevel = set_msglevel,
+	.get_ringparam = get_sge_param,
+	.set_ringparam = set_sge_param,
+	.get_coalesce = get_coalesce,
+	.set_coalesce = set_coalesce,
+	.get_eeprom_len = get_eeprom_len,
+	.get_eeprom = get_eeprom,
+	.set_eeprom = set_eeprom,
+	.get_pauseparam = get_pauseparam,
+	.set_pauseparam = set_pauseparam,
+	.get_link = ethtool_op_get_link,
+	.get_strings = get_strings,
+	.set_phys_id = set_phys_id,
+	.nway_reset = restart_autoneg,
+	.get_sset_count = get_sset_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len = get_regs_len,
+	.get_regs = get_regs,
+	.get_wol = get_wol,
+};
+
+static int in_range(int val, int lo, int hi)
+{
+	return val < 0 || (val <= hi && val >= lo);
+}
+
+static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	u32 cmd;
+	int ret;
+
+	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case CHELSIO_SET_QSET_PARAMS:{
+		int i;
+		struct qset_params *q;
+		struct ch_qset_params t;
+		int q1 = pi->first_qset;
+		int nqsets = pi->nqsets;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+		if (t.qset_idx >= SGE_QSETS)
+			return -EINVAL;
+		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
+		    !in_range(t.cong_thres, 0, 255) ||
+		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+			      MAX_TXQ_ENTRIES) ||
+		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+			      MAX_TXQ_ENTRIES) ||
+		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+			      MAX_CTRL_TXQ_ENTRIES) ||
+		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
+			      MAX_RX_BUFFERS) ||
+		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
+			      MAX_RX_JUMBO_BUFFERS) ||
+		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
+			      MAX_RSPQ_ENTRIES))
+			return -EINVAL;
+
+		if ((adapter->flags & FULL_INIT_DONE) &&
+			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
+			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
+			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
+			t.polling >= 0 || t.cong_thres >= 0))
+			return -EBUSY;
+
+		/* Allow setting of any available qset when offload enabled */
+		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+			q1 = 0;
+			for_each_port(adapter, i) {
+				pi = adap2pinfo(adapter, i);
+				nqsets += pi->first_qset + pi->nqsets;
+			}
+		}
+
+		if (t.qset_idx < q1)
+			return -EINVAL;
+		if (t.qset_idx > q1 + nqsets - 1)
+			return -EINVAL;
+
+		q = &adapter->params.sge.qset[t.qset_idx];
+
+		if (t.rspq_size >= 0)
+			q->rspq_size = t.rspq_size;
+		if (t.fl_size[0] >= 0)
+			q->fl_size = t.fl_size[0];
+		if (t.fl_size[1] >= 0)
+			q->jumbo_size = t.fl_size[1];
+		if (t.txq_size[0] >= 0)
+			q->txq_size[0] = t.txq_size[0];
+		if (t.txq_size[1] >= 0)
+			q->txq_size[1] = t.txq_size[1];
+		if (t.txq_size[2] >= 0)
+			q->txq_size[2] = t.txq_size[2];
+		if (t.cong_thres >= 0)
+			q->cong_thres = t.cong_thres;
+		if (t.intr_lat >= 0) {
+			struct sge_qset *qs =
+				&adapter->sge.qs[t.qset_idx];
+
+			q->coalesce_usecs = t.intr_lat;
+			t3_update_qset_coalesce(qs, q);
+		}
+		if (t.polling >= 0) {
+			if (adapter->flags & USING_MSIX)
+				q->polling = t.polling;
+			else {
+				/* No polling with INTx for T3A */
+				if (adapter->params.rev == 0 &&
+					!(adapter->flags & USING_MSI))
+					t.polling = 0;
+
+				for (i = 0; i < SGE_QSETS; i++) {
+					q = &adapter->params.sge.
+						qset[i];
+					q->polling = t.polling;
+				}
+			}
+		}
+
+		if (t.lro >= 0) {
+			if (t.lro)
+				dev->wanted_features |= NETIF_F_GRO;
+			else
+				dev->wanted_features &= ~NETIF_F_GRO;
+			netdev_update_features(dev);
+		}
+
+		break;
+	}
+	case CHELSIO_GET_QSET_PARAMS:{
+		struct qset_params *q;
+		struct ch_qset_params t;
+		int q1 = pi->first_qset;
+		int nqsets = pi->nqsets;
+		int i;
+
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+
+		/* Display qsets for all ports when offload enabled */
+		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+			q1 = 0;
+			for_each_port(adapter, i) {
+				pi = adap2pinfo(adapter, i);
+				nqsets = pi->first_qset + pi->nqsets;
+			}
+		}
+
+		if (t.qset_idx >= nqsets)
+			return -EINVAL;
+
+		q = &adapter->params.sge.qset[q1 + t.qset_idx];
+		t.rspq_size = q->rspq_size;
+		t.txq_size[0] = q->txq_size[0];
+		t.txq_size[1] = q->txq_size[1];
+		t.txq_size[2] = q->txq_size[2];
+		t.fl_size[0] = q->fl_size;
+		t.fl_size[1] = q->jumbo_size;
+		t.polling = q->polling;
+		t.lro = !!(dev->features & NETIF_F_GRO);
+		t.intr_lat = q->coalesce_usecs;
+		t.cong_thres = q->cong_thres;
+		t.qnum = q1;
+
+		if (adapter->flags & USING_MSIX)
+			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
+		else
+			t.vector = adapter->pdev->irq;
+
+		if (copy_to_user(useraddr, &t, sizeof(t)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_SET_QSET_NUM:{
+		struct ch_reg edata;
+		unsigned int i, first_qset = 0, other_qsets = 0;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (adapter->flags & FULL_INIT_DONE)
+			return -EBUSY;
+		if (copy_from_user(&edata, useraddr, sizeof(edata)))
+			return -EFAULT;
+		if (edata.val < 1 ||
+			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
+			return -EINVAL;
+
+		for_each_port(adapter, i)
+			if (adapter->port[i] && adapter->port[i] != dev)
+				other_qsets += adap2pinfo(adapter, i)->nqsets;
+
+		if (edata.val + other_qsets > SGE_QSETS)
+			return -EINVAL;
+
+		pi->nqsets = edata.val;
+
+		for_each_port(adapter, i)
+			if (adapter->port[i]) {
+				pi = adap2pinfo(adapter, i);
+				pi->first_qset = first_qset;
+				first_qset += pi->nqsets;
+			}
+		break;
+	}
+	case CHELSIO_GET_QSET_NUM:{
+		struct ch_reg edata;
+
+		memset(&edata, 0, sizeof(struct ch_reg));
+
+		edata.cmd = CHELSIO_GET_QSET_NUM;
+		edata.val = pi->nqsets;
+		if (copy_to_user(useraddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_LOAD_FW:{
+		u8 *fw_data;
+		struct ch_mem_range t;
+
+		if (!capable(CAP_SYS_RAWIO))
+			return -EPERM;
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+		/* Check t.len sanity ? */
+		fw_data = memdup_user(useraddr + sizeof(t), t.len);
+		if (IS_ERR(fw_data))
+			return PTR_ERR(fw_data);
+
+		ret = t3_load_fw(adapter, fw_data, t.len);
+		kfree(fw_data);
+		if (ret)
+			return ret;
+		break;
+	}
+	case CHELSIO_SETMTUTAB:{
+		struct ch_mtus m;
+		int i;
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (offload_running(adapter))
+			return -EBUSY;
+		if (copy_from_user(&m, useraddr, sizeof(m)))
+			return -EFAULT;
+		if (m.nmtus != NMTUS)
+			return -EINVAL;
+		if (m.mtus[0] < 81)	/* accommodate SACK */
+			return -EINVAL;
+
+		/* MTUs must be in ascending order */
+		for (i = 1; i < NMTUS; ++i)
+			if (m.mtus[i] < m.mtus[i - 1])
+				return -EINVAL;
+
+		memcpy(adapter->params.mtus, m.mtus,
+			sizeof(adapter->params.mtus));
+		break;
+	}
+	case CHELSIO_GET_PM:{
+		struct tp_params *p = &adapter->params.tp;
+		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		m.tx_pg_sz = p->tx_pg_size;
+		m.tx_num_pg = p->tx_num_pgs;
+		m.rx_pg_sz = p->rx_pg_size;
+		m.rx_num_pg = p->rx_num_pgs;
+		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
+		if (copy_to_user(useraddr, &m, sizeof(m)))
+			return -EFAULT;
+		break;
+	}
+	case CHELSIO_SET_PM:{
+		struct ch_pm m;
+		struct tp_params *p = &adapter->params.tp;
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (adapter->flags & FULL_INIT_DONE)
+			return -EBUSY;
+		if (copy_from_user(&m, useraddr, sizeof(m)))
+			return -EFAULT;
+		if (!is_power_of_2(m.rx_pg_sz) ||
+			!is_power_of_2(m.tx_pg_sz))
+			return -EINVAL;	/* not power of 2 */
+		if (!(m.rx_pg_sz & 0x14000))
+			return -EINVAL;	/* not 16KB or 64KB */
+		if (!(m.tx_pg_sz & 0x1554000))
+			return -EINVAL;
+		if (m.tx_num_pg == -1)
+			m.tx_num_pg = p->tx_num_pgs;
+		if (m.rx_num_pg == -1)
+			m.rx_num_pg = p->rx_num_pgs;
+		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
+			return -EINVAL;
+		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
+			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
+			return -EINVAL;
+		p->rx_pg_size = m.rx_pg_sz;
+		p->tx_pg_size = m.tx_pg_sz;
+		p->rx_num_pgs = m.rx_num_pg;
+		p->tx_num_pgs = m.tx_num_pg;
+		break;
+	}
+	case CHELSIO_GET_MEM:{
+		struct ch_mem_range t;
+		struct mc7 *mem;
+		u64 buf[32];
+
+		if (!is_offload(adapter))
+			return -EOPNOTSUPP;
+		if (!(adapter->flags & FULL_INIT_DONE))
+			return -EIO;	/* need the memory controllers */
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+		if ((t.addr & 7) || (t.len & 7))
+			return -EINVAL;
+		if (t.mem_id == MEM_CM)
+			mem = &adapter->cm;
+		else if (t.mem_id == MEM_PMRX)
+			mem = &adapter->pmrx;
+		else if (t.mem_id == MEM_PMTX)
+			mem = &adapter->pmtx;
+		else
+			return -EINVAL;
+
+		/*
+		 * Version scheme:
+		 * bits 0..9: chip version
+		 * bits 10..15: chip revision
+		 */
+		t.version = 3 | (adapter->params.rev << 10);
+		if (copy_to_user(useraddr, &t, sizeof(t)))
+			return -EFAULT;
+
+		/*
+		 * Read 256 bytes at a time as len can be large and we don't
+		 * want to use huge intermediate buffers.
+		 */
+		useraddr += sizeof(t);	/* advance to start of buffer */
+		while (t.len) {
+			unsigned int chunk =
+				min_t(unsigned int, t.len, sizeof(buf));
+
+			ret =
+				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
+						buf);
+			if (ret)
+				return ret;
+			if (copy_to_user(useraddr, buf, chunk))
+				return -EFAULT;
+			useraddr += chunk;
+			t.addr += chunk;
+			t.len -= chunk;
+		}
+		break;
+	}
+	case CHELSIO_SET_TRACE_FILTER:{
+		struct ch_trace t;
+		const struct trace_params *tp;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (!offload_running(adapter))
+			return -EAGAIN;
+		if (copy_from_user(&t, useraddr, sizeof(t)))
+			return -EFAULT;
+
+		tp = (const struct trace_params *)&t.sip;
+		if (t.config_tx)
+			t3_config_trace_filter(adapter, tp, 0,
+						t.invert_match,
+						t.trace_tx);
+		if (t.config_rx)
+			t3_config_trace_filter(adapter, tp, 1,
+						t.invert_match,
+						t.trace_rx);
+		break;
+	}
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(req);
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	switch (cmd) {
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		/* Convert phy_id from older PRTAD/DEVAD format */
+		if (is_10G(adapter) &&
+		    !mdio_phy_id_is_c45(data->phy_id) &&
+		    (data->phy_id & 0x1f00) &&
+		    !(data->phy_id & 0xe0e0))
+			data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
+						       data->phy_id & 0x1f);
+		/* FALLTHRU */
+	case SIOCGMIIPHY:
+		return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
+	case SIOCCHIOCTL:
+		return cxgb_extension_ioctl(dev, req->ifr_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int ret;
+
+	if (new_mtu < 81)	/* accommodate SACK */
+		return -EINVAL;
+	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
+		return ret;
+	dev->mtu = new_mtu;
+	init_port_mtus(adapter);
+	if (adapter->params.rev == 0 && offload_running(adapter))
+		t3_load_mtus(adapter, adapter->params.mtus,
+			     adapter->params.a_wnd, adapter->params.b_wnd,
+			     adapter->port[0]->mtu);
+	return 0;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
+	if (offload_running(adapter))
+		write_smt_entry(adapter, pi->port_id);
+	return 0;
+}
+
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	/*
+	 * Since there is no support for separate rx/tx vlan accel
+	 * enable/disable make sure tx flag is always in same state as rx.
+	 */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		features |= NETIF_F_HW_VLAN_CTAG_TX;
+	else
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+	return features;
+}
+
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
+{
+	netdev_features_t changed = dev->features ^ features;
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		cxgb_vlan_mode(dev, features);
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int qidx;
+
+	for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
+		struct sge_qset *qs = &adapter->sge.qs[qidx];
+		void *source;
+
+		if (adapter->flags & USING_MSIX)
+			source = qs;
+		else
+			source = adapter;
+
+		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
+	}
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.
+ */
+static void mac_stats_update(struct adapter *adapter)
+{
+	int i;
+
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+		struct port_info *p = netdev_priv(dev);
+
+		if (netif_running(dev)) {
+			spin_lock(&adapter->stats_lock);
+			t3_mac_update_stats(&p->mac);
+			spin_unlock(&adapter->stats_lock);
+		}
+	}
+}
+
+static void check_link_status(struct adapter *adapter)
+{
+	int i;
+
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+		struct port_info *p = netdev_priv(dev);
+		int link_fault;
+
+		spin_lock_irq(&adapter->work_lock);
+		link_fault = p->link_fault;
+		spin_unlock_irq(&adapter->work_lock);
+
+		if (link_fault) {
+			t3_link_fault(adapter, i);
+			continue;
+		}
+
+		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
+			t3_xgm_intr_disable(adapter, i);
+			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
+
+			t3_link_changed(adapter, i);
+			t3_xgm_intr_enable(adapter, i);
+		}
+	}
+}
+
+static void check_t3b2_mac(struct adapter *adapter)
+{
+	int i;
+
+	if (!rtnl_trylock())	/* synchronize with ifdown */
+		return;
+
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+		struct port_info *p = netdev_priv(dev);
+		int status;
+
+		if (!netif_running(dev))
+			continue;
+
+		status = 0;
+		if (netif_running(dev) && netif_carrier_ok(dev))
+			status = t3b2_mac_watchdog_task(&p->mac);
+		if (status == 1)
+			p->mac.stats.num_toggled++;
+		else if (status == 2) {
+			struct cmac *mac = &p->mac;
+
+			t3_mac_set_mtu(mac, dev->mtu);
+			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+			cxgb_set_rxmode(dev);
+			t3_link_start(&p->phy, mac, &p->link_config);
+			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+			t3_port_intr_enable(adapter, p->port_id);
+			p->mac.stats.num_resets++;
+		}
+	}
+	rtnl_unlock();
+}
+
+
+static void t3_adap_check_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       adap_check_task.work);
+	const struct adapter_params *p = &adapter->params;
+	int port;
+	unsigned int v, status, reset;
+
+	adapter->check_task_cnt++;
+
+	check_link_status(adapter);
+
+	/* Accumulate MAC stats if needed */
+	if (!p->linkpoll_period ||
+	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
+	    p->stats_update_period) {
+		mac_stats_update(adapter);
+		adapter->check_task_cnt = 0;
+	}
+
+	if (p->rev == T3_REV_B2)
+		check_t3b2_mac(adapter);
+
+	/*
+	 * Scan the XGMAC's to check for various conditions which we want to
+	 * monitor in a periodic polling manner rather than via an interrupt
+	 * condition.  This is used for conditions which would otherwise flood
+	 * the system with interrupts and we only really need to know that the
+	 * conditions are "happening" ...  For each condition we count the
+	 * detection of the condition and reset it for the next polling loop.
+	 */
+	for_each_port(adapter, port) {
+		struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
+		u32 cause;
+
+		cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
+		reset = 0;
+		if (cause & F_RXFIFO_OVERFLOW) {
+			mac->stats.rx_fifo_ovfl++;
+			reset |= F_RXFIFO_OVERFLOW;
+		}
+
+		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
+	}
+
+	/*
+	 * We do the same as above for FL_EMPTY interrupts.
+	 */
+	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+	reset = 0;
+
+	if (status & F_FLEMPTY) {
+		struct sge_qset *qs = &adapter->sge.qs[0];
+		int i = 0;
+
+		reset |= F_FLEMPTY;
+
+		v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
+		    0xffff;
+
+		while (v) {
+			qs->fl[i].empty += (v & 1);
+			if (i)
+				qs++;
+			i ^= 1;
+			v >>= 1;
+		}
+	}
+
+	t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
+
+	/* Schedule the next check update if any port is active. */
+	spin_lock_irq(&adapter->work_lock);
+	if (adapter->open_device_map & PORT_MASK)
+		schedule_chk_task(adapter);
+	spin_unlock_irq(&adapter->work_lock);
+}
+
+static void db_full_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       db_full_task);
+
+	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       db_empty_task);
+
+	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       db_drop_task);
+	unsigned long delay = 1000;
+	unsigned short r;
+
+	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+	/*
+	 * Sleep a while before ringing the driver qset dbs.
+	 * The delay is between 1000-2023 usecs.
+	 */
+	get_random_bytes(&r, 2);
+	delay += r & 1023;
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(usecs_to_jiffies(delay));
+	ring_dbs(adapter);
+}
+
+/*
+ * Processes external (PHY) interrupts in process context.
+ */
+static void ext_intr_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       ext_intr_handler_task);
+	int i;
+
+	/* Disable link fault interrupts */
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+		struct port_info *p = netdev_priv(dev);
+
+		t3_xgm_intr_disable(adapter, i);
+		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
+	}
+
+	/* Re-enable link fault interrupts */
+	t3_phy_intr_handler(adapter);
+
+	for_each_port(adapter, i)
+		t3_xgm_intr_enable(adapter, i);
+
+	/* Now reenable external interrupts */
+	spin_lock_irq(&adapter->work_lock);
+	if (adapter->slow_intr_mask) {
+		adapter->slow_intr_mask |= F_T3DBG;
+		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
+		t3_write_reg(adapter, A_PL_INT_ENABLE0,
+			     adapter->slow_intr_mask);
+	}
+	spin_unlock_irq(&adapter->work_lock);
+}
+
+/*
+ * Interrupt-context handler for external (PHY) interrupts.
+ */
+void t3_os_ext_intr_handler(struct adapter *adapter)
+{
+	/*
+	 * Schedule a task to handle external interrupts as they may be slow
+	 * and we use a mutex to protect MDIO registers.  We disable PHY
+	 * interrupts in the meantime and let the task reenable them when
+	 * it's done.
+	 */
+	spin_lock(&adapter->work_lock);
+	if (adapter->slow_intr_mask) {
+		adapter->slow_intr_mask &= ~F_T3DBG;
+		t3_write_reg(adapter, A_PL_INT_ENABLE0,
+			     adapter->slow_intr_mask);
+		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
+	}
+	spin_unlock(&adapter->work_lock);
+}
+
+void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
+{
+	struct net_device *netdev = adapter->port[port_id];
+	struct port_info *pi = netdev_priv(netdev);
+
+	spin_lock(&adapter->work_lock);
+	pi->link_fault = 1;
+	spin_unlock(&adapter->work_lock);
+}
+
+static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
+{
+	int i, ret = 0;
+
+	if (is_offload(adapter) &&
+	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
+		offload_close(&adapter->tdev);
+	}
+
+	/* Stop all ports */
+	for_each_port(adapter, i) {
+		struct net_device *netdev = adapter->port[i];
+
+		if (netif_running(netdev))
+			__cxgb_close(netdev, on_wq);
+	}
+
+	/* Stop SGE timers */
+	t3_stop_sge_timers(adapter);
+
+	adapter->flags &= ~FULL_INIT_DONE;
+
+	if (reset)
+		ret = t3_reset_adapter(adapter);
+
+	pci_disable_device(adapter->pdev);
+
+	return ret;
+}
+
+static int t3_reenable_adapter(struct adapter *adapter)
+{
+	if (pci_enable_device(adapter->pdev)) {
+		dev_err(&adapter->pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		goto err;
+	}
+	pci_set_master(adapter->pdev);
+	pci_restore_state(adapter->pdev);
+	pci_save_state(adapter->pdev);
+
+	/* Free sge resources */
+	t3_free_sge_resources(adapter);
+
+	if (t3_replay_prep_adapter(adapter))
+		goto err;
+
+	return 0;
+err:
+	return -1;
+}
+
+static void t3_resume_ports(struct adapter *adapter)
+{
+	int i;
+
+	/* Restart the ports */
+	for_each_port(adapter, i) {
+		struct net_device *netdev = adapter->port[i];
+
+		if (netif_running(netdev)) {
+			if (cxgb_open(netdev)) {
+				dev_err(&adapter->pdev->dev,
+					"can't bring device back up"
+					" after reset\n");
+				continue;
+			}
+		}
+	}
+
+	if (is_offload(adapter) && !ofld_disable)
+		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
+}
+
+/*
+ * processes a fatal error.
+ * Bring the ports down, reset the chip, bring the ports back up.
+ */
+static void fatal_error_task(struct work_struct *work)
+{
+	struct adapter *adapter = container_of(work, struct adapter,
+					       fatal_error_handler_task);
+	int err = 0;
+
+	rtnl_lock();
+	err = t3_adapter_error(adapter, 1, 1);
+	if (!err)
+		err = t3_reenable_adapter(adapter);
+	if (!err)
+		t3_resume_ports(adapter);
+
+	CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
+	rtnl_unlock();
+}
+
+void t3_fatal_err(struct adapter *adapter)
+{
+	unsigned int fw_status[4];
+
+	if (adapter->flags & FULL_INIT_DONE) {
+		t3_sge_stop(adapter);
+		t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
+		t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
+		t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
+		t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
+
+		spin_lock(&adapter->work_lock);
+		t3_intr_disable(adapter);
+		queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
+		spin_unlock(&adapter->work_lock);
+	}
+	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
+	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
+		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
+			 fw_status[0], fw_status[1],
+			 fw_status[2], fw_status[3]);
+}
+
+/**
+ * t3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
+					     pci_channel_state_t state)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	t3_adapter_error(adapter, 0, 0);
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * t3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	if (!t3_reenable_adapter(adapter))
+		return PCI_ERS_RESULT_RECOVERED;
+
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * t3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void t3_io_resume(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
+		 t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
+	rtnl_lock();
+	t3_resume_ports(adapter);
+	rtnl_unlock();
+}
+
+static const struct pci_error_handlers t3_err_handler = {
+	.error_detected = t3_io_error_detected,
+	.slot_reset = t3_io_slot_reset,
+	.resume = t3_io_resume,
+};
+
+/*
+ * Set the number of qsets based on the number of CPUs and the number of ports,
+ * not to exceed the number of available qsets, assuming there are enough qsets
+ * per port in HW.
+ */
+static void set_nqsets(struct adapter *adap)
+{
+	int i, j = 0;
+	int num_cpus = netif_get_num_default_rss_queues();
+	int hwports = adap->params.nports;
+	int nqsets = adap->msix_nvectors - 1;
+
+	if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
+		if (hwports == 2 &&
+		    (hwports * nqsets > SGE_QSETS ||
+		     num_cpus >= nqsets / hwports))
+			nqsets /= hwports;
+		if (nqsets > num_cpus)
+			nqsets = num_cpus;
+		if (nqsets < 1 || hwports == 4)
+			nqsets = 1;
+	} else
+		nqsets = 1;
+
+	for_each_port(adap, i) {
+		struct port_info *pi = adap2pinfo(adap, i);
+
+		pi->first_qset = j;
+		pi->nqsets = nqsets;
+		j = pi->first_qset + nqsets;
+
+		dev_info(&adap->pdev->dev,
+			 "Port %d using %d queue sets.\n", i, nqsets);
+	}
+}
+
+static int cxgb_enable_msix(struct adapter *adap)
+{
+	struct msix_entry entries[SGE_QSETS + 1];
+	int vectors;
+	int i;
+
+	vectors = ARRAY_SIZE(entries);
+	for (i = 0; i < vectors; ++i)
+		entries[i].entry = i;
+
+	vectors = pci_enable_msix_range(adap->pdev, entries,
+					adap->params.nports + 1, vectors);
+	if (vectors < 0)
+		return vectors;
+
+	for (i = 0; i < vectors; ++i)
+		adap->msix_info[i].vec = entries[i].vector;
+	adap->msix_nvectors = vectors;
+
+	return 0;
+}
+
+static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
+{
+	static const char *pci_variant[] = {
+		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
+	};
+
+	int i;
+	char buf[80];
+
+	if (is_pcie(adap))
+		snprintf(buf, sizeof(buf), "%s x%d",
+			 pci_variant[adap->params.pci.variant],
+			 adap->params.pci.width);
+	else
+		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
+			 pci_variant[adap->params.pci.variant],
+			 adap->params.pci.speed, adap->params.pci.width);
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		const struct port_info *pi = netdev_priv(dev);
+
+		if (!test_bit(i, &adap->registered_device_map))
+			continue;
+		netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
+			    ai->desc, pi->phy.desc,
+			    is_offload(adap) ? "R" : "", adap->params.rev, buf,
+			    (adap->flags & USING_MSIX) ? " MSI-X" :
+			    (adap->flags & USING_MSI) ? " MSI" : "");
+		if (adap->name == dev->name && adap->params.vpd.mclk)
+			pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
+			       adap->name, t3_mc7_size(&adap->cm) >> 20,
+			       t3_mc7_size(&adap->pmtx) >> 20,
+			       t3_mc7_size(&adap->pmrx) >> 20,
+			       adap->params.vpd.sn);
+	}
+}
+
+static const struct net_device_ops cxgb_netdev_ops = {
+	.ndo_open		= cxgb_open,
+	.ndo_stop		= cxgb_close,
+	.ndo_start_xmit		= t3_eth_xmit,
+	.ndo_get_stats		= cxgb_get_stats,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= cxgb_set_rxmode,
+	.ndo_do_ioctl		= cxgb_ioctl,
+	.ndo_change_mtu		= cxgb_change_mtu,
+	.ndo_set_mac_address	= cxgb_set_mac_addr,
+	.ndo_fix_features	= cxgb_fix_features,
+	.ndo_set_features	= cxgb_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= cxgb_netpoll,
+#endif
+};
+
+static void cxgb3_init_iscsi_mac(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
+	pi->iscsic.mac_addr[3] |= 0x80;
+}
+
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+			NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int i, err, pci_using_dac = 0;
+	resource_size_t mmio_start, mmio_len;
+	const struct adapter_info *ai;
+	struct adapter *adapter = NULL;
+	struct port_info *pi;
+
+	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+	if (!cxgb3_wq) {
+		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
+		if (!cxgb3_wq) {
+			pr_err("cannot initialize work queue\n");
+			return -ENOMEM;
+		}
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto out;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		/* Just info, some other driver may have claimed the device. */
+		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+		goto out_disable_device;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_using_dac = 1;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+			       "coherent allocations\n");
+			goto out_release_regions;
+		}
+	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+		dev_err(&pdev->dev, "no usable DMA configuration\n");
+		goto out_release_regions;
+	}
+
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	ai = t3_get_adapter_info(ent->driver_data);
+
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto out_release_regions;
+	}
+
+	adapter->nofail_skb =
+		alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
+	if (!adapter->nofail_skb) {
+		dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+
+	adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+	if (!adapter->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+
+	adapter->pdev = pdev;
+	adapter->name = pci_name(pdev);
+	adapter->msg_enable = dflt_msg_enable;
+	adapter->mmio_len = mmio_len;
+
+	mutex_init(&adapter->mdio_lock);
+	spin_lock_init(&adapter->work_lock);
+	spin_lock_init(&adapter->stats_lock);
+
+	INIT_LIST_HEAD(&adapter->adapter_list);
+	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
+	INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+	INIT_WORK(&adapter->db_full_task, db_full_task);
+	INIT_WORK(&adapter->db_empty_task, db_empty_task);
+	INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
+	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
+
+	for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		adapter->port[i] = netdev;
+		pi = netdev_priv(netdev);
+		pi->adapter = adapter;
+		pi->port_id = i;
+		netif_carrier_off(netdev);
+		netdev->irq = pdev->irq;
+		netdev->mem_start = mmio_start;
+		netdev->mem_end = mmio_start + mmio_len - 1;
+		netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+			NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+		netdev->features |= netdev->hw_features |
+				    NETIF_F_HW_VLAN_CTAG_TX;
+		netdev->vlan_features |= netdev->features & VLAN_FEAT;
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+
+		netdev->netdev_ops = &cxgb_netdev_ops;
+		netdev->ethtool_ops = &cxgb_ethtool_ops;
+	}
+
+	pci_set_drvdata(pdev, adapter);
+	if (t3_prep_adapter(adapter, ai, 1) < 0) {
+		err = -ENODEV;
+		goto out_free_dev;
+	}
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for_each_port(adapter, i) {
+		err = register_netdev(adapter->port[i]);
+		if (err)
+			dev_warn(&pdev->dev,
+				 "cannot register net device %s, skipping\n",
+				 adapter->port[i]->name);
+		else {
+			/*
+			 * Change the name we use for messages to the name of
+			 * the first successfully registered interface.
+			 */
+			if (!adapter->registered_device_map)
+				adapter->name = adapter->port[i]->name;
+
+			__set_bit(i, &adapter->registered_device_map);
+		}
+	}
+	if (!adapter->registered_device_map) {
+		dev_err(&pdev->dev, "could not register any net devices\n");
+		goto out_free_dev;
+	}
+
+	for_each_port(adapter, i)
+		cxgb3_init_iscsi_mac(adapter->port[i]);
+
+	/* Driver's ready. Reflect it on LEDs */
+	t3_led_ready(adapter);
+
+	if (is_offload(adapter)) {
+		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
+		cxgb3_adapter_ofld(adapter);
+	}
+
+	/* See what interrupts we'll be using */
+	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
+		adapter->flags |= USING_MSIX;
+	else if (msi > 0 && pci_enable_msi(pdev) == 0)
+		adapter->flags |= USING_MSI;
+
+	set_nqsets(adapter);
+
+	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
+				 &cxgb3_attr_group);
+
+	print_port_info(adapter, ai);
+	return 0;
+
+out_free_dev:
+	iounmap(adapter->regs);
+	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
+		if (adapter->port[i])
+			free_netdev(adapter->port[i]);
+
+out_free_adapter:
+	kfree(adapter);
+
+out_release_regions:
+	pci_release_regions(pdev);
+out_disable_device:
+	pci_disable_device(pdev);
+out:
+	return err;
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	if (adapter) {
+		int i;
+
+		t3_sge_stop(adapter);
+		sysfs_remove_group(&adapter->port[0]->dev.kobj,
+				   &cxgb3_attr_group);
+
+		if (is_offload(adapter)) {
+			cxgb3_adapter_unofld(adapter);
+			if (test_bit(OFFLOAD_DEVMAP_BIT,
+				     &adapter->open_device_map))
+				offload_close(&adapter->tdev);
+		}
+
+		for_each_port(adapter, i)
+		    if (test_bit(i, &adapter->registered_device_map))
+			unregister_netdev(adapter->port[i]);
+
+		t3_stop_sge_timers(adapter);
+		t3_free_sge_resources(adapter);
+		cxgb_disable_msi(adapter);
+
+		for_each_port(adapter, i)
+			if (adapter->port[i])
+				free_netdev(adapter->port[i]);
+
+		iounmap(adapter->regs);
+		if (adapter->nofail_skb)
+			kfree_skb(adapter->nofail_skb);
+		kfree(adapter);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+	}
+}
+
+static struct pci_driver driver = {
+	.name = DRV_NAME,
+	.id_table = cxgb3_pci_tbl,
+	.probe = init_one,
+	.remove = remove_one,
+	.err_handler = &t3_err_handler,
+};
+
+static int __init cxgb3_init_module(void)
+{
+	int ret;
+
+	cxgb3_offload_init();
+
+	ret = pci_register_driver(&driver);
+	return ret;
+}
+
+static void __exit cxgb3_cleanup_module(void)
+{
+	pci_unregister_driver(&driver);
+	if (cxgb3_wq)
+		destroy_workqueue(cxgb3_wq);
+}
+
+module_init(cxgb3_init_module);
+module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
new file mode 100644
index 0000000..76684dc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1424 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/neighbour.h>
+#include <linux/notifier.h>
+#include <linux/atomic.h>
+#include <linux/proc_fs.h>
+#include <linux/if_vlan.h>
+#include <net/netevent.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include "common.h"
+#include "regs.h"
+#include "cxgb3_ioctl.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+static LIST_HEAD(client_list);
+static LIST_HEAD(ofld_dev_list);
+static DEFINE_MUTEX(cxgb3_db_lock);
+
+static DEFINE_RWLOCK(adapter_list_lock);
+static LIST_HEAD(adapter_list);
+
+static const unsigned int MAX_ATIDS = 64 * 1024;
+static const unsigned int ATID_BASE = 0x10000;
+
+static void cxgb_neigh_update(struct neighbour *neigh);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+			  struct neighbour *neigh, const void *daddr);
+
+static inline int offload_activated(struct t3cdev *tdev)
+{
+	const struct adapter *adapter = tdev2adap(tdev);
+
+	return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+/**
+ *	cxgb3_register_client - register an offload client
+ *	@client: the client
+ *
+ *	Add the client to the client list,
+ *	and call backs the client for each activated offload device
+ */
+void cxgb3_register_client(struct cxgb3_client *client)
+{
+	struct t3cdev *tdev;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_add_tail(&client->client_list, &client_list);
+
+	if (client->add) {
+		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+			if (offload_activated(tdev))
+				client->add(tdev);
+		}
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_register_client);
+
+/**
+ *	cxgb3_unregister_client - unregister an offload client
+ *	@client: the client
+ *
+ *	Remove the client to the client list,
+ *	and call backs the client for each activated offload device.
+ */
+void cxgb3_unregister_client(struct cxgb3_client *client)
+{
+	struct t3cdev *tdev;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_del(&client->client_list);
+
+	if (client->remove) {
+		list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+			if (offload_activated(tdev))
+				client->remove(tdev);
+		}
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_unregister_client);
+
+/**
+ *	cxgb3_add_clients - activate registered clients for an offload device
+ *	@tdev: the offload device
+ *
+ *	Call backs all registered clients once a offload device is activated
+ */
+void cxgb3_add_clients(struct t3cdev *tdev)
+{
+	struct cxgb3_client *client;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_for_each_entry(client, &client_list, client_list) {
+		if (client->add)
+			client->add(tdev);
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+/**
+ *	cxgb3_remove_clients - deactivates registered clients
+ *			       for an offload device
+ *	@tdev: the offload device
+ *
+ *	Call backs all registered clients once a offload device is deactivated
+ */
+void cxgb3_remove_clients(struct t3cdev *tdev)
+{
+	struct cxgb3_client *client;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_for_each_entry(client, &client_list, client_list) {
+		if (client->remove)
+			client->remove(tdev);
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
+{
+	struct cxgb3_client *client;
+
+	mutex_lock(&cxgb3_db_lock);
+	list_for_each_entry(client, &client_list, client_list) {
+		if (client->event_handler)
+			client->event_handler(tdev, event, port);
+	}
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+static struct net_device *get_iff_from_mac(struct adapter *adapter,
+					   const unsigned char *mac,
+					   unsigned int vlan)
+{
+	int i;
+
+	for_each_port(adapter, i) {
+		struct net_device *dev = adapter->port[i];
+
+		if (ether_addr_equal(dev->dev_addr, mac)) {
+			rcu_read_lock();
+			if (vlan && vlan != VLAN_VID_MASK) {
+				dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
+			} else if (netif_is_bond_slave(dev)) {
+				struct net_device *upper_dev;
+
+				while ((upper_dev =
+					netdev_master_upper_dev_get_rcu(dev)))
+					dev = upper_dev;
+			}
+			rcu_read_unlock();
+			return dev;
+		}
+	}
+	return NULL;
+}
+
+static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
+			      void *data)
+{
+	int i;
+	int ret = 0;
+	unsigned int val = 0;
+	struct ulp_iscsi_info *uiip = data;
+
+	switch (req) {
+	case ULP_ISCSI_GET_PARAMS:
+		uiip->pdev = adapter->pdev;
+		uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
+		uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
+		uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
+
+		val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
+		for (i = 0; i < 4; i++, val >>= 8)
+			uiip->pgsz_factor[i] = val & 0xFF;
+
+		val = t3_read_reg(adapter, A_TP_PARA_REG7);
+		uiip->max_txsz =
+		uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
+				     (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
+		/*
+		 * On tx, the iscsi pdu has to be <= tx page size and has to
+		 * fit into the Tx PM FIFO.
+		 */
+		val = min(adapter->params.tp.tx_pg_size,
+			  t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
+		uiip->max_txsz = min(val, uiip->max_txsz);
+
+		/* set MaxRxData to 16224 */
+		val = t3_read_reg(adapter, A_TP_PARA_REG2);
+		if ((val >> S_MAXRXDATA) != 0x3f60) {
+			val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
+			val |= V_MAXRXDATA(0x3f60);
+			pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
+				adapter->name, val);
+			t3_write_reg(adapter, A_TP_PARA_REG2, val);
+		}
+
+		/*
+		 * on rx, the iscsi pdu has to be < rx page size and the
+		 * the max rx data length programmed in TP
+		 */
+		val = min(adapter->params.tp.rx_pg_size,
+			  ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
+				S_MAXRXDATA) & M_MAXRXDATA);
+		uiip->max_rxsz = min(val, uiip->max_rxsz);
+		break;
+	case ULP_ISCSI_SET_PARAMS:
+		t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
+		/* program the ddp page sizes */
+		for (i = 0; i < 4; i++)
+			val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
+		if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
+			pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
+				adapter->name, val, uiip->pgsz_factor[0],
+				uiip->pgsz_factor[1], uiip->pgsz_factor[2],
+				uiip->pgsz_factor[3]);
+			t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
+		}
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+/* Response queue used for RDMA events. */
+#define ASYNC_NOTIF_RSPQ 0
+
+static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
+{
+	int ret = 0;
+
+	switch (req) {
+	case RDMA_GET_PARAMS: {
+		struct rdma_info *rdma = data;
+		struct pci_dev *pdev = adapter->pdev;
+
+		rdma->udbell_physbase = pci_resource_start(pdev, 2);
+		rdma->udbell_len = pci_resource_len(pdev, 2);
+		rdma->tpt_base =
+			t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
+		rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+		rdma->pbl_base =
+			t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
+		rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+		rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+		rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+		rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+		rdma->pdev = pdev;
+		break;
+	}
+	case RDMA_CQ_OP:{
+		unsigned long flags;
+		struct rdma_cq_op *rdma = data;
+
+		/* may be called in any context */
+		spin_lock_irqsave(&adapter->sge.reg_lock, flags);
+		ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
+					rdma->credits);
+		spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
+		break;
+	}
+	case RDMA_GET_MEM:{
+		struct ch_mem_range *t = data;
+		struct mc7 *mem;
+
+		if ((t->addr & 7) || (t->len & 7))
+			return -EINVAL;
+		if (t->mem_id == MEM_CM)
+			mem = &adapter->cm;
+		else if (t->mem_id == MEM_PMRX)
+			mem = &adapter->pmrx;
+		else if (t->mem_id == MEM_PMTX)
+			mem = &adapter->pmtx;
+		else
+			return -EINVAL;
+
+		ret =
+			t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
+					(u64 *) t->buf);
+		if (ret)
+			return ret;
+		break;
+	}
+	case RDMA_CQ_SETUP:{
+		struct rdma_cq_setup *rdma = data;
+
+		spin_lock_irq(&adapter->sge.reg_lock);
+		ret =
+			t3_sge_init_cqcntxt(adapter, rdma->id,
+					rdma->base_addr, rdma->size,
+					ASYNC_NOTIF_RSPQ,
+					rdma->ovfl_mode, rdma->credits,
+					rdma->credit_thres);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		break;
+	}
+	case RDMA_CQ_DISABLE:
+		spin_lock_irq(&adapter->sge.reg_lock);
+		ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		break;
+	case RDMA_CTRL_QP_SETUP:{
+		struct rdma_ctrlqp_setup *rdma = data;
+
+		spin_lock_irq(&adapter->sge.reg_lock);
+		ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
+						SGE_CNTXT_RDMA,
+						ASYNC_NOTIF_RSPQ,
+						rdma->base_addr, rdma->size,
+						FW_RI_TID_START, 1, 0);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		break;
+	}
+	case RDMA_GET_MIB: {
+		spin_lock(&adapter->stats_lock);
+		t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
+		spin_unlock(&adapter->stats_lock);
+		break;
+	}
+	default:
+		ret = -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
+{
+	struct adapter *adapter = tdev2adap(tdev);
+	struct tid_range *tid;
+	struct mtutab *mtup;
+	struct iff_mac *iffmacp;
+	struct ddp_params *ddpp;
+	struct adap_ports *ports;
+	struct ofld_page_info *rx_page_info;
+	struct tp_params *tp = &adapter->params.tp;
+	int i;
+
+	switch (req) {
+	case GET_MAX_OUTSTANDING_WR:
+		*(unsigned int *)data = FW_WR_NUM;
+		break;
+	case GET_WR_LEN:
+		*(unsigned int *)data = WR_FLITS;
+		break;
+	case GET_TX_MAX_CHUNK:
+		*(unsigned int *)data = 1 << 20;	/* 1MB */
+		break;
+	case GET_TID_RANGE:
+		tid = data;
+		tid->num = t3_mc5_size(&adapter->mc5) -
+		    adapter->params.mc5.nroutes -
+		    adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
+		tid->base = 0;
+		break;
+	case GET_STID_RANGE:
+		tid = data;
+		tid->num = adapter->params.mc5.nservers;
+		tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
+		    adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
+		break;
+	case GET_L2T_CAPACITY:
+		*(unsigned int *)data = 2048;
+		break;
+	case GET_MTUS:
+		mtup = data;
+		mtup->size = NMTUS;
+		mtup->mtus = adapter->params.mtus;
+		break;
+	case GET_IFF_FROM_MAC:
+		iffmacp = data;
+		iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
+						iffmacp->vlan_tag &
+						VLAN_VID_MASK);
+		break;
+	case GET_DDP_PARAMS:
+		ddpp = data;
+		ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
+		ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
+		ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
+		break;
+	case GET_PORTS:
+		ports = data;
+		ports->nports = adapter->params.nports;
+		for_each_port(adapter, i)
+			ports->lldevs[i] = adapter->port[i];
+		break;
+	case ULP_ISCSI_GET_PARAMS:
+	case ULP_ISCSI_SET_PARAMS:
+		if (!offload_running(adapter))
+			return -EAGAIN;
+		return cxgb_ulp_iscsi_ctl(adapter, req, data);
+	case RDMA_GET_PARAMS:
+	case RDMA_CQ_OP:
+	case RDMA_CQ_SETUP:
+	case RDMA_CQ_DISABLE:
+	case RDMA_CTRL_QP_SETUP:
+	case RDMA_GET_MEM:
+	case RDMA_GET_MIB:
+		if (!offload_running(adapter))
+			return -EAGAIN;
+		return cxgb_rdma_ctl(adapter, req, data);
+	case GET_RX_PAGE_INFO:
+		rx_page_info = data;
+		rx_page_info->page_size = tp->rx_pg_size;
+		rx_page_info->num = tp->rx_num_pgs;
+		break;
+	case GET_ISCSI_IPV4ADDR: {
+		struct iscsi_ipv4addr *p = data;
+		struct port_info *pi = netdev_priv(p->dev);
+		p->ipv4addr = pi->iscsi_ipv4addr;
+		break;
+	}
+	case GET_EMBEDDED_INFO: {
+		struct ch_embedded_info *e = data;
+
+		spin_lock(&adapter->stats_lock);
+		t3_get_fw_version(adapter, &e->fw_vers);
+		t3_get_tp_version(adapter, &e->tp_vers);
+		spin_unlock(&adapter->stats_lock);
+		break;
+	}
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ * Dummy handler for Rx offload packets in case we get an offload packet before
+ * proper processing is setup.  This complains and drops the packet as it isn't
+ * normal to get offload packets at this stage.
+ */
+static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
+				int n)
+{
+	while (n--)
+		dev_kfree_skb_any(skbs[n]);
+	return 0;
+}
+
+static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+}
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev)
+{
+	dev->recv = rx_offload_blackhole;
+	dev->neigh_update = dummy_neigh_update;
+}
+
+/*
+ * Free an active-open TID.
+ */
+void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+	union active_open_entry *p = atid2entry(t, atid);
+	void *ctx = p->t3c_tid.ctx;
+
+	spin_lock_bh(&t->atid_lock);
+	p->next = t->afree;
+	t->afree = p;
+	t->atids_in_use--;
+	spin_unlock_bh(&t->atid_lock);
+
+	return ctx;
+}
+
+EXPORT_SYMBOL(cxgb3_free_atid);
+
+/*
+ * Free a server TID and return it to the free pool.
+ */
+void cxgb3_free_stid(struct t3cdev *tdev, int stid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+	union listen_entry *p = stid2entry(t, stid);
+
+	spin_lock_bh(&t->stid_lock);
+	p->next = t->sfree;
+	t->sfree = p;
+	t->stids_in_use--;
+	spin_unlock_bh(&t->stid_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_free_stid);
+
+void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
+		      void *ctx, unsigned int tid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	t->tid_tab[tid].client = client;
+	t->tid_tab[tid].ctx = ctx;
+	atomic_inc(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_insert_tid);
+
+/*
+ * Populate a TID_RELEASE WR.  The skb must be already propely sized.
+ */
+static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
+{
+	struct cpl_tid_release *req;
+
+	skb->priority = CPL_PRIORITY_SETUP;
+	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+static void t3_process_tid_release_list(struct work_struct *work)
+{
+	struct t3c_data *td = container_of(work, struct t3c_data,
+					   tid_release_task);
+	struct sk_buff *skb;
+	struct t3cdev *tdev = td->dev;
+
+
+	spin_lock_bh(&td->tid_release_lock);
+	while (td->tid_release_list) {
+		struct t3c_tid_entry *p = td->tid_release_list;
+
+		td->tid_release_list = p->ctx;
+		spin_unlock_bh(&td->tid_release_lock);
+
+		skb = alloc_skb(sizeof(struct cpl_tid_release),
+				GFP_KERNEL);
+		if (!skb)
+			skb = td->nofail_skb;
+		if (!skb) {
+			spin_lock_bh(&td->tid_release_lock);
+			p->ctx = (void *)td->tid_release_list;
+			td->tid_release_list = p;
+			break;
+		}
+		mk_tid_release(skb, p - td->tid_maps.tid_tab);
+		cxgb3_ofld_send(tdev, skb);
+		p->ctx = NULL;
+		if (skb == td->nofail_skb)
+			td->nofail_skb =
+				alloc_skb(sizeof(struct cpl_tid_release),
+					GFP_KERNEL);
+		spin_lock_bh(&td->tid_release_lock);
+	}
+	td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
+	spin_unlock_bh(&td->tid_release_lock);
+
+	if (!td->nofail_skb)
+		td->nofail_skb =
+			alloc_skb(sizeof(struct cpl_tid_release),
+				GFP_KERNEL);
+}
+
+/* use ctx as a next pointer in the tid release list */
+void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
+{
+	struct t3c_data *td = T3C_DATA(tdev);
+	struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
+
+	spin_lock_bh(&td->tid_release_lock);
+	p->ctx = (void *)td->tid_release_list;
+	p->client = NULL;
+	td->tid_release_list = p;
+	if (!p->ctx || td->release_list_incomplete)
+		schedule_work(&td->tid_release_task);
+	spin_unlock_bh(&td->tid_release_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_queue_tid_release);
+
+/*
+ * Remove a tid from the TID table.  A client may defer processing its last
+ * CPL message if it is locked at the time it arrives, and while the message
+ * sits in the client's backlog the TID may be reused for another connection.
+ * To handle this we atomically switch the TID association if it still points
+ * to the original client context.
+ */
+void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
+{
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	BUG_ON(tid >= t->ntids);
+	if (tdev->type == T3A)
+		(void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
+	else {
+		struct sk_buff *skb;
+
+		skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+		if (likely(skb)) {
+			mk_tid_release(skb, tid);
+			cxgb3_ofld_send(tdev, skb);
+			t->tid_tab[tid].ctx = NULL;
+		} else
+			cxgb3_queue_tid_release(tdev, tid);
+	}
+	atomic_dec(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_remove_tid);
+
+int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
+		     void *ctx)
+{
+	int atid = -1;
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	spin_lock_bh(&t->atid_lock);
+	if (t->afree &&
+	    t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
+	    t->ntids) {
+		union active_open_entry *p = t->afree;
+
+		atid = (p - t->atid_tab) + t->atid_base;
+		t->afree = p->next;
+		p->t3c_tid.ctx = ctx;
+		p->t3c_tid.client = client;
+		t->atids_in_use++;
+	}
+	spin_unlock_bh(&t->atid_lock);
+	return atid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_atid);
+
+int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
+		     void *ctx)
+{
+	int stid = -1;
+	struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+	spin_lock_bh(&t->stid_lock);
+	if (t->sfree) {
+		union listen_entry *p = t->sfree;
+
+		stid = (p - t->stid_tab) + t->stid_base;
+		t->sfree = p->next;
+		p->t3c_tid.ctx = ctx;
+		p->t3c_tid.client = client;
+		t->stids_in_use++;
+	}
+	spin_unlock_bh(&t->stid_lock);
+	return stid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_stid);
+
+/* Get the t3cdev associated with a net_device */
+struct t3cdev *dev2t3cdev(struct net_device *dev)
+{
+	const struct port_info *pi = netdev_priv(dev);
+
+	return (struct t3cdev *)pi->adapter;
+}
+
+EXPORT_SYMBOL(dev2t3cdev);
+
+static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_smt_write_rpl *rpl = cplhdr(skb);
+
+	if (rpl->status != CPL_ERR_NONE)
+		pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+		       rpl->status, GET_TID(rpl));
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
+
+	if (rpl->status != CPL_ERR_NONE)
+		pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+		       rpl->status, GET_TID(rpl));
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_rte_write_rpl *rpl = cplhdr(skb);
+
+	if (rpl->status != CPL_ERR_NONE)
+		pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+		       rpl->status, GET_TID(rpl));
+
+	return CPL_RET_BUF_DONE;
+}
+
+static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+	unsigned int atid = G_TID(ntohl(rpl->atid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+	    t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
+		return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
+								    t3c_tid->
+								    ctx);
+	} else {
+		pr_err("%s: received clientless CPL command 0x%x\n",
+		       dev->name, CPL_ACT_OPEN_RPL);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	union opcode_tid *p = cplhdr(skb);
+	unsigned int stid = G_TID(ntohl(p->opcode_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[p->opcode]) {
+		return t3c_tid->client->handlers[p->opcode] (dev, skb,
+							     t3c_tid->ctx);
+	} else {
+		pr_err("%s: received clientless CPL command 0x%x\n",
+		       dev->name, p->opcode);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	union opcode_tid *p = cplhdr(skb);
+	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[p->opcode]) {
+		return t3c_tid->client->handlers[p->opcode]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		pr_err("%s: received clientless CPL command 0x%x\n",
+		       dev->name, p->opcode);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_pass_accept_req *req = cplhdr(skb);
+	unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+	struct t3c_tid_entry *t3c_tid;
+	unsigned int tid = GET_TID(req);
+
+	if (unlikely(tid >= t->ntids)) {
+		printk("%s: passive open TID %u too large\n",
+		       dev->name, tid);
+		t3_fatal_err(tdev2adap(dev));
+		return CPL_RET_BUF_DONE;
+	}
+
+	t3c_tid = lookup_stid(t, stid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
+		return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		pr_err("%s: received clientless CPL command 0x%x\n",
+		       dev->name, CPL_PASS_ACCEPT_REQ);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+/*
+ * Returns an sk_buff for a reply CPL message of size len.  If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated.  The input skb must be of size at least len.  Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+					       gfp_t gfp)
+{
+	if (likely(!skb_cloned(skb))) {
+		BUG_ON(skb->len < len);
+		__skb_trim(skb, len);
+		skb_get(skb);
+	} else {
+		skb = alloc_skb(len, gfp);
+		if (skb)
+			__skb_put(skb, len);
+	}
+	return skb;
+}
+
+static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
+{
+	union opcode_tid *p = cplhdr(skb);
+	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[p->opcode]) {
+		return t3c_tid->client->handlers[p->opcode]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		struct cpl_abort_req_rss *req = cplhdr(skb);
+		struct cpl_abort_rpl *rpl;
+		struct sk_buff *reply_skb;
+		unsigned int tid = GET_TID(req);
+		u8 cmd = req->status;
+
+		if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+		    req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+			goto out;
+
+		reply_skb = cxgb3_get_cpl_reply_skb(skb,
+						    sizeof(struct
+							   cpl_abort_rpl),
+						    GFP_ATOMIC);
+
+		if (!reply_skb) {
+			printk("do_abort_req_rss: couldn't get skb!\n");
+			goto out;
+		}
+		reply_skb->priority = CPL_PRIORITY_DATA;
+		__skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+		rpl = cplhdr(reply_skb);
+		rpl->wr.wr_hi =
+		    htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+		rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+		OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+		rpl->cmd = cmd;
+		cxgb3_ofld_send(dev, reply_skb);
+out:
+		return CPL_RET_BUF_DONE;
+	}
+}
+
+static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_act_establish *req = cplhdr(skb);
+	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+	struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+	struct t3c_tid_entry *t3c_tid;
+	unsigned int tid = GET_TID(req);
+
+	if (unlikely(tid >= t->ntids)) {
+		printk("%s: active establish TID %u too large\n",
+		       dev->name, tid);
+		t3_fatal_err(tdev2adap(dev));
+		return CPL_RET_BUF_DONE;
+	}
+
+	t3c_tid = lookup_atid(t, atid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
+		return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
+		    (dev, skb, t3c_tid->ctx);
+	} else {
+		pr_err("%s: received clientless CPL command 0x%x\n",
+		       dev->name, CPL_ACT_ESTABLISH);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
+{
+	struct cpl_trace_pkt *p = cplhdr(skb);
+
+	skb->protocol = htons(0xffff);
+	skb->dev = dev->lldev;
+	skb_pull(skb, sizeof(*p));
+	skb_reset_mac_header(skb);
+	netif_receive_skb(skb);
+	return 0;
+}
+
+/*
+ * That skb would better have come from process_responses() where we abuse
+ * ->priority and ->csum to carry our data.  NB: if we get to per-arch
+ * ->csum, the things might get really interesting here.
+ */
+
+static inline u32 get_hwtid(struct sk_buff *skb)
+{
+	return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
+}
+
+static inline u32 get_opcode(struct sk_buff *skb)
+{
+	return G_OPCODE(ntohl((__force __be32)skb->csum));
+}
+
+static int do_term(struct t3cdev *dev, struct sk_buff *skb)
+{
+	unsigned int hwtid = get_hwtid(skb);
+	unsigned int opcode = get_opcode(skb);
+	struct t3c_tid_entry *t3c_tid;
+
+	t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+	if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+	    t3c_tid->client->handlers[opcode]) {
+		return t3c_tid->client->handlers[opcode] (dev, skb,
+							  t3c_tid->ctx);
+	} else {
+		pr_err("%s: received clientless CPL command 0x%x\n",
+		       dev->name, opcode);
+		return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+	}
+}
+
+static int nb_callback(struct notifier_block *self, unsigned long event,
+		       void *ctx)
+{
+	switch (event) {
+	case (NETEVENT_NEIGH_UPDATE):{
+		cxgb_neigh_update((struct neighbour *)ctx);
+		break;
+	}
+	case (NETEVENT_REDIRECT):{
+		struct netevent_redirect *nr = ctx;
+		cxgb_redirect(nr->old, nr->new, nr->neigh,
+			      nr->daddr);
+		cxgb_neigh_update(nr->neigh);
+		break;
+	}
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = nb_callback
+};
+
+/*
+ * Process a received packet with an unknown/unexpected CPL opcode.
+ */
+static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+	pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
+	return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+}
+
+/*
+ * Handlers for each CPL opcode
+ */
+static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
+
+/*
+ * Add a new handler to the CPL dispatch table.  A NULL handler may be supplied
+ * to unregister an existing handler.
+ */
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
+{
+	if (opcode < NUM_CPL_CMDS)
+		cpl_handlers[opcode] = h ? h : do_bad_cpl;
+	else
+		pr_err("T3C: handler registration for opcode %x failed\n",
+		       opcode);
+}
+
+EXPORT_SYMBOL(t3_register_cpl_handler);
+
+/*
+ * T3CDEV's receive method.
+ */
+static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
+{
+	while (n--) {
+		struct sk_buff *skb = *skbs++;
+		unsigned int opcode = get_opcode(skb);
+		int ret = cpl_handlers[opcode] (dev, skb);
+
+#if VALIDATE_TID
+		if (ret & CPL_RET_UNKNOWN_TID) {
+			union opcode_tid *p = cplhdr(skb);
+
+			pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
+			       dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
+		}
+#endif
+		if (ret & CPL_RET_BUF_DONE)
+			kfree_skb(skb);
+	}
+	return 0;
+}
+
+/*
+ * Sends an sk_buff to a T3C driver after dealing with any active network taps.
+ */
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
+{
+	int r;
+
+	local_bh_disable();
+	r = dev->send(dev, skb);
+	local_bh_enable();
+	return r;
+}
+
+EXPORT_SYMBOL(cxgb3_ofld_send);
+
+static int is_offloading(struct net_device *dev)
+{
+	struct adapter *adapter;
+	int i;
+
+	read_lock_bh(&adapter_list_lock);
+	list_for_each_entry(adapter, &adapter_list, adapter_list) {
+		for_each_port(adapter, i) {
+			if (dev == adapter->port[i]) {
+				read_unlock_bh(&adapter_list_lock);
+				return 1;
+			}
+		}
+	}
+	read_unlock_bh(&adapter_list_lock);
+	return 0;
+}
+
+static void cxgb_neigh_update(struct neighbour *neigh)
+{
+	struct net_device *dev;
+
+	if (!neigh)
+		return;
+	dev = neigh->dev;
+	if (dev && (is_offloading(dev))) {
+		struct t3cdev *tdev = dev2t3cdev(dev);
+
+		BUG_ON(!tdev);
+		t3_l2t_update(tdev, neigh);
+	}
+}
+
+static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
+{
+	struct sk_buff *skb;
+	struct cpl_set_tcb_field *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+	if (!skb) {
+		pr_err("%s: cannot allocate skb!\n", __func__);
+		return;
+	}
+	skb->priority = CPL_PRIORITY_CONTROL;
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	req->reply = 0;
+	req->cpu_idx = 0;
+	req->word = htons(W_TCB_L2T_IX);
+	req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
+	req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
+	tdev->send(tdev, skb);
+}
+
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+			  struct neighbour *neigh,
+			  const void *daddr)
+{
+	struct net_device *dev;
+	struct tid_info *ti;
+	struct t3cdev *tdev;
+	u32 tid;
+	int update_tcb;
+	struct l2t_entry *e;
+	struct t3c_tid_entry *te;
+
+	dev = neigh->dev;
+
+	if (!is_offloading(dev))
+		return;
+	tdev = dev2t3cdev(dev);
+	BUG_ON(!tdev);
+
+	/* Add new L2T entry */
+	e = t3_l2t_get(tdev, new, dev, daddr);
+	if (!e) {
+		pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
+		return;
+	}
+
+	/* Walk tid table and notify clients of dst change. */
+	ti = &(T3C_DATA(tdev))->tid_maps;
+	for (tid = 0; tid < ti->ntids; tid++) {
+		te = lookup_tid(ti, tid);
+		BUG_ON(!te);
+		if (te && te->ctx && te->client && te->client->redirect) {
+			update_tcb = te->client->redirect(te->ctx, old, new, e);
+			if (update_tcb) {
+				rcu_read_lock();
+				l2t_hold(L2DATA(tdev), e);
+				rcu_read_unlock();
+				set_l2t_ix(tdev, tid, e);
+			}
+		}
+	}
+	l2t_release(tdev, e);
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *cxgb_alloc_mem(unsigned long size)
+{
+	void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+	if (!p)
+		p = vzalloc(size);
+	return p;
+}
+
+/*
+ * Free memory allocated through t3_alloc_mem().
+ */
+void cxgb_free_mem(void *addr)
+{
+	kvfree(addr);
+}
+
+/*
+ * Allocate and initialize the TID tables.  Returns 0 on success.
+ */
+static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
+			 unsigned int natids, unsigned int nstids,
+			 unsigned int atid_base, unsigned int stid_base)
+{
+	unsigned long size = ntids * sizeof(*t->tid_tab) +
+	    natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
+
+	t->tid_tab = cxgb_alloc_mem(size);
+	if (!t->tid_tab)
+		return -ENOMEM;
+
+	t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
+	t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
+	t->ntids = ntids;
+	t->nstids = nstids;
+	t->stid_base = stid_base;
+	t->sfree = NULL;
+	t->natids = natids;
+	t->atid_base = atid_base;
+	t->afree = NULL;
+	t->stids_in_use = t->atids_in_use = 0;
+	atomic_set(&t->tids_in_use, 0);
+	spin_lock_init(&t->stid_lock);
+	spin_lock_init(&t->atid_lock);
+
+	/*
+	 * Setup the free lists for stid_tab and atid_tab.
+	 */
+	if (nstids) {
+		while (--nstids)
+			t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
+		t->sfree = t->stid_tab;
+	}
+	if (natids) {
+		while (--natids)
+			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+		t->afree = t->atid_tab;
+	}
+	return 0;
+}
+
+static void free_tid_maps(struct tid_info *t)
+{
+	cxgb_free_mem(t->tid_tab);
+}
+
+static inline void add_adapter(struct adapter *adap)
+{
+	write_lock_bh(&adapter_list_lock);
+	list_add_tail(&adap->adapter_list, &adapter_list);
+	write_unlock_bh(&adapter_list_lock);
+}
+
+static inline void remove_adapter(struct adapter *adap)
+{
+	write_lock_bh(&adapter_list_lock);
+	list_del(&adap->adapter_list);
+	write_unlock_bh(&adapter_list_lock);
+}
+
+int cxgb3_offload_activate(struct adapter *adapter)
+{
+	struct t3cdev *dev = &adapter->tdev;
+	int natids, err;
+	struct t3c_data *t;
+	struct tid_range stid_range, tid_range;
+	struct mtutab mtutab;
+	unsigned int l2t_capacity;
+	struct l2t_data *l2td;
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	err = -EOPNOTSUPP;
+	if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
+	    dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
+	    dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
+	    dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
+	    dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
+	    dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
+		goto out_free;
+
+	err = -ENOMEM;
+	l2td = t3_init_l2t(l2t_capacity);
+	if (!l2td)
+		goto out_free;
+
+	natids = min(tid_range.num / 2, MAX_ATIDS);
+	err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
+			    stid_range.num, ATID_BASE, stid_range.base);
+	if (err)
+		goto out_free_l2t;
+
+	t->mtus = mtutab.mtus;
+	t->nmtus = mtutab.size;
+
+	INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
+	spin_lock_init(&t->tid_release_lock);
+	INIT_LIST_HEAD(&t->list_node);
+	t->dev = dev;
+
+	RCU_INIT_POINTER(dev->l2opt, l2td);
+	T3C_DATA(dev) = t;
+	dev->recv = process_rx;
+	dev->neigh_update = t3_l2t_update;
+
+	/* Register netevent handler once */
+	if (list_empty(&adapter_list))
+		register_netevent_notifier(&nb);
+
+	t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
+	t->release_list_incomplete = 0;
+
+	add_adapter(adapter);
+	return 0;
+
+out_free_l2t:
+	t3_free_l2t(l2td);
+out_free:
+	kfree(t);
+	return err;
+}
+
+static void clean_l2_data(struct rcu_head *head)
+{
+	struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+	t3_free_l2t(d);
+}
+
+
+void cxgb3_offload_deactivate(struct adapter *adapter)
+{
+	struct t3cdev *tdev = &adapter->tdev;
+	struct t3c_data *t = T3C_DATA(tdev);
+	struct l2t_data *d;
+
+	remove_adapter(adapter);
+	if (list_empty(&adapter_list))
+		unregister_netevent_notifier(&nb);
+
+	free_tid_maps(&t->tid_maps);
+	T3C_DATA(tdev) = NULL;
+	rcu_read_lock();
+	d = L2DATA(tdev);
+	rcu_read_unlock();
+	RCU_INIT_POINTER(tdev->l2opt, NULL);
+	call_rcu(&d->rcu_head, clean_l2_data);
+	if (t->nofail_skb)
+		kfree_skb(t->nofail_skb);
+	kfree(t);
+}
+
+static inline void register_tdev(struct t3cdev *tdev)
+{
+	static int unit;
+
+	mutex_lock(&cxgb3_db_lock);
+	snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
+	list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline void unregister_tdev(struct t3cdev *tdev)
+{
+	mutex_lock(&cxgb3_db_lock);
+	list_del(&tdev->ofld_dev_list);
+	mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline int adap2type(struct adapter *adapter)
+{
+	int type = 0;
+
+	switch (adapter->params.rev) {
+	case T3_REV_A:
+		type = T3A;
+		break;
+	case T3_REV_B:
+	case T3_REV_B2:
+		type = T3B;
+		break;
+	case T3_REV_C:
+		type = T3C;
+		break;
+	}
+	return type;
+}
+
+void cxgb3_adapter_ofld(struct adapter *adapter)
+{
+	struct t3cdev *tdev = &adapter->tdev;
+
+	INIT_LIST_HEAD(&tdev->ofld_dev_list);
+
+	cxgb3_set_dummy_ops(tdev);
+	tdev->send = t3_offload_tx;
+	tdev->ctl = cxgb_offload_ctl;
+	tdev->type = adap2type(adapter);
+
+	register_tdev(tdev);
+}
+
+void cxgb3_adapter_unofld(struct adapter *adapter)
+{
+	struct t3cdev *tdev = &adapter->tdev;
+
+	tdev->recv = NULL;
+	tdev->neigh_update = NULL;
+
+	unregister_tdev(tdev);
+}
+
+void __init cxgb3_offload_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_CPL_CMDS; ++i)
+		cpl_handlers[i] = do_bad_cpl;
+
+	t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
+	t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
+	t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
+	t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
+	t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
+	t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
+	t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
+	t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
+	t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+	t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
+	t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
+	t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
+	t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
new file mode 100644
index 0000000..929c298
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_H
+#define _CXGB3_OFFLOAD_H
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+
+#include "l2t.h"
+
+#include "t3cdev.h"
+#include "t3_cpl.h"
+
+struct adapter;
+
+void cxgb3_offload_init(void);
+
+void cxgb3_adapter_ofld(struct adapter *adapter);
+void cxgb3_adapter_unofld(struct adapter *adapter);
+int cxgb3_offload_activate(struct adapter *adapter);
+void cxgb3_offload_deactivate(struct adapter *adapter);
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev);
+
+struct t3cdev *dev2t3cdev(struct net_device *dev);
+
+/*
+ * Client registration.  Users of T3 driver must register themselves.
+ * The T3 driver will call the add function of every client for each T3
+ * adapter activated, passing up the t3cdev ptr.  Each client fills out an
+ * array of callback functions to process CPL messages.
+ */
+
+void cxgb3_register_client(struct cxgb3_client *client);
+void cxgb3_unregister_client(struct cxgb3_client *client);
+void cxgb3_add_clients(struct t3cdev *tdev);
+void cxgb3_remove_clients(struct t3cdev *tdev);
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
+
+typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
+				      struct sk_buff *skb, void *ctx);
+
+enum {
+	OFFLOAD_STATUS_UP,
+	OFFLOAD_STATUS_DOWN,
+	OFFLOAD_PORT_DOWN,
+	OFFLOAD_PORT_UP,
+	OFFLOAD_DB_FULL,
+	OFFLOAD_DB_EMPTY,
+	OFFLOAD_DB_DROP
+};
+
+struct cxgb3_client {
+	char *name;
+	void (*add) (struct t3cdev *);
+	void (*remove) (struct t3cdev *);
+	cxgb3_cpl_handler_func *handlers;
+	int (*redirect)(void *ctx, struct dst_entry *old,
+			struct dst_entry *new, struct l2t_entry *l2t);
+	struct list_head client_list;
+	void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
+};
+
+/*
+ * TID allocation services.
+ */
+int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
+		     void *ctx);
+int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
+		     void *ctx);
+void *cxgb3_free_atid(struct t3cdev *dev, int atid);
+void cxgb3_free_stid(struct t3cdev *dev, int stid);
+void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
+		      void *ctx, unsigned int tid);
+void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
+void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
+
+struct t3c_tid_entry {
+	struct cxgb3_client *client;
+	void *ctx;
+};
+
+/* CPL message priority levels */
+enum {
+	CPL_PRIORITY_DATA = 0,	/* data messages */
+	CPL_PRIORITY_SETUP = 1,	/* connection setup messages */
+	CPL_PRIORITY_TEARDOWN = 0,	/* connection teardown messages */
+	CPL_PRIORITY_LISTEN = 1,	/* listen start/stop messages */
+	CPL_PRIORITY_ACK = 1,	/* RX ACK messages */
+	CPL_PRIORITY_CONTROL = 1	/* offload control messages */
+};
+
+/* Flags for return value of CPL message handlers */
+enum {
+	CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
+	CPL_RET_BAD_MSG = 2,  /* bad CPL message (e.g., unknown opcode) */
+	CPL_RET_UNKNOWN_TID = 4	/* unexpected unknown TID */
+};
+
+typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
+
+/*
+ * Returns a pointer to the first byte of the CPL header in an sk_buff that
+ * contains a CPL message.
+ */
+static inline void *cplhdr(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
+
+union listen_entry {
+	struct t3c_tid_entry t3c_tid;
+	union listen_entry *next;
+};
+
+union active_open_entry {
+	struct t3c_tid_entry t3c_tid;
+	union active_open_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables for a offload device.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+	struct t3c_tid_entry *tid_tab;
+	unsigned int ntids;
+	atomic_t tids_in_use;
+
+	union listen_entry *stid_tab;
+	unsigned int nstids;
+	unsigned int stid_base;
+
+	union active_open_entry *atid_tab;
+	unsigned int natids;
+	unsigned int atid_base;
+
+	/*
+	 * The following members are accessed R/W so we put them in their own
+	 * cache lines.
+	 *
+	 * XXX We could combine the atid fields above with the lock here since
+	 * atids are use once (unlike other tids).  OTOH the above fields are
+	 * usually in cache due to tid_tab.
+	 */
+	spinlock_t atid_lock ____cacheline_aligned_in_smp;
+	union active_open_entry *afree;
+	unsigned int atids_in_use;
+
+	spinlock_t stid_lock ____cacheline_aligned;
+	union listen_entry *sfree;
+	unsigned int stids_in_use;
+};
+
+struct t3c_data {
+	struct list_head list_node;
+	struct t3cdev *dev;
+	unsigned int tx_max_chunk;	/* max payload for TX_DATA */
+	unsigned int max_wrs;	/* max in-flight WRs per connection */
+	unsigned int nmtus;
+	const unsigned short *mtus;
+	struct tid_info tid_maps;
+
+	struct t3c_tid_entry *tid_release_list;
+	spinlock_t tid_release_lock;
+	struct work_struct tid_release_task;
+
+	struct sk_buff *nofail_skb;
+	unsigned int release_list_incomplete;
+};
+
+/*
+ * t3cdev -> t3c_data accessor
+ */
+#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
new file mode 100644
index 0000000..0d9b0e6
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FIRMWARE_EXPORTS_H_
+#define _FIRMWARE_EXPORTS_H_
+
+/* WR OPCODES supported by the firmware.
+ */
+#define	FW_WROPCODE_FORWARD			0x01
+#define FW_WROPCODE_BYPASS			0x05
+
+#define FW_WROPCODE_TUNNEL_TX_PKT		0x03
+
+#define FW_WROPOCDE_ULPTX_DATA_SGL		0x00
+#define FW_WROPCODE_ULPTX_MEM_READ		0x02
+#define FW_WROPCODE_ULPTX_PKT			0x04
+#define FW_WROPCODE_ULPTX_INVALIDATE		0x06
+
+#define FW_WROPCODE_TUNNEL_RX_PKT		0x07
+
+#define FW_WROPCODE_OFLD_GETTCB_RPL		0x08
+#define FW_WROPCODE_OFLD_CLOSE_CON		0x09
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ	0x0A
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL	0x0F
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ	0x0B
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL	0x0C
+#define FW_WROPCODE_OFLD_TX_DATA		0x0D
+#define FW_WROPCODE_OFLD_TX_DATA_ACK		0x0E
+
+#define FW_WROPCODE_RI_RDMA_INIT		0x10
+#define FW_WROPCODE_RI_RDMA_WRITE		0x11
+#define FW_WROPCODE_RI_RDMA_READ_REQ		0x12
+#define FW_WROPCODE_RI_RDMA_READ_RESP		0x13
+#define FW_WROPCODE_RI_SEND			0x14
+#define FW_WROPCODE_RI_TERMINATE		0x15
+#define FW_WROPCODE_RI_RDMA_READ		0x16
+#define FW_WROPCODE_RI_RECEIVE			0x17
+#define FW_WROPCODE_RI_BIND_MW			0x18
+#define FW_WROPCODE_RI_FASTREGISTER_MR		0x19
+#define FW_WROPCODE_RI_LOCAL_INV		0x1A
+#define FW_WROPCODE_RI_MODIFY_QP		0x1B
+#define FW_WROPCODE_RI_BYPASS			0x1C
+
+#define FW_WROPOCDE_RSVD			0x1E
+
+#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR	0x1F
+
+#define FW_WROPCODE_MNGT			0x1D
+#define FW_MNGTOPCODE_PKTSCHED_SET		0x00
+
+/* Maximum size of a WR sent from the host, limited by the SGE.
+ *
+ * Note: WR coming from ULP or TP are only limited by CIM.
+ */
+#define FW_WR_SIZE			128
+
+/* Maximum number of outstanding WRs sent from the host. Value must be
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
+ * offload modules to limit the number of WRs per connection.
+ */
+#define FW_T3_WR_NUM			16
+#define FW_N3_WR_NUM			7
+
+#ifndef N3
+# define FW_WR_NUM			FW_T3_WR_NUM
+#else
+# define FW_WR_NUM			FW_N3_WR_NUM
+#endif
+
+/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
+ * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
+ * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
+ *
+ * Ingress Traffic (e.g. DMA completion credit)  for TUNNEL Queue[i] is sent
+ * to RESP Queue[i].
+ */
+#define FW_TUNNEL_NUM			8
+#define FW_TUNNEL_SGEEC_START		8
+#define FW_TUNNEL_TID_START		65544
+
+/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
+ * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
+ * (or 'uP Token') FW_CTRL_TID_START.
+ *
+ * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_CTRL_NUM			8
+#define FW_CTRL_SGEEC_START		65528
+#define FW_CTRL_TID_START		65536
+
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
+ *
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
+ * OFFLOAD Queues, as the host is responsible for providing the correct TID in
+ * every WR.
+ *
+ * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_OFLD_NUM			8
+#define FW_OFLD_SGEEC_START		0
+
+/*
+ *
+ */
+#define FW_RI_NUM			1
+#define FW_RI_SGEEC_START		65527
+#define FW_RI_TID_START			65552
+
+/*
+ * The RX_PKT_TID
+ */
+#define FW_RX_PKT_NUM			1
+#define FW_RX_PKT_TID_START		65553
+
+/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
+ * by the firmware.
+ */
+#define FW_WRC_NUM			\
+    (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
+
+/*
+ * FW type and version.
+ */
+#define S_FW_VERSION_TYPE		28
+#define M_FW_VERSION_TYPE		0xF
+#define V_FW_VERSION_TYPE(x)		((x) << S_FW_VERSION_TYPE)
+#define G_FW_VERSION_TYPE(x)		\
+    (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
+
+#define S_FW_VERSION_MAJOR		16
+#define M_FW_VERSION_MAJOR		0xFFF
+#define V_FW_VERSION_MAJOR(x)		((x) << S_FW_VERSION_MAJOR)
+#define G_FW_VERSION_MAJOR(x)		\
+    (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
+
+#define S_FW_VERSION_MINOR		8
+#define M_FW_VERSION_MINOR		0xFF
+#define V_FW_VERSION_MINOR(x)		((x) << S_FW_VERSION_MINOR)
+#define G_FW_VERSION_MINOR(x)		\
+    (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
+
+#define S_FW_VERSION_MICRO		0
+#define M_FW_VERSION_MICRO		0xFF
+#define V_FW_VERSION_MICRO(x)		((x) << S_FW_VERSION_MICRO)
+#define G_FW_VERSION_MICRO(x)		\
+    (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
+
+#endif				/* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
new file mode 100644
index 0000000..5f226ed
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <net/neighbour.h>
+#include "common.h"
+#include "t3cdev.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+#define VLAN_NONE 0xfff
+
+/*
+ * Module locking notes:  There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock.  The table lock nests outside the
+ * entry locks.  Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries.  Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel.  An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ */
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+	return e->vlan >> 13;
+}
+
+static inline unsigned int arp_hash(u32 key, int ifindex,
+				    const struct l2t_data *d)
+{
+	return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
+}
+
+static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+	neigh_hold(n);
+	if (e->neigh)
+		neigh_release(e->neigh);
+	e->neigh = n;
+}
+
+/*
+ * Set up an L2T entry and send any packets waiting in the arp queue.  The
+ * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
+ * entry locked.
+ */
+static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
+				  struct l2t_entry *e)
+{
+	struct cpl_l2t_write_req *req;
+	struct sk_buff *tmp;
+
+	if (!skb) {
+		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+		if (!skb)
+			return -ENOMEM;
+	}
+
+	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
+	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
+			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
+			    V_L2T_W_PRIO(vlan_prio(e)));
+	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+	skb->priority = CPL_PRIORITY_CONTROL;
+	cxgb3_ofld_send(dev, skb);
+
+	skb_queue_walk_safe(&e->arpq, skb, tmp) {
+		__skb_unlink(skb, &e->arpq);
+		cxgb3_ofld_send(dev, skb);
+	}
+	e->state = L2T_STATE_VALID;
+
+	return 0;
+}
+
+/*
+ * Add a packet to the an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+	__skb_queue_tail(&e->arpq, skb);
+}
+
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+		     struct l2t_entry *e)
+{
+again:
+	switch (e->state) {
+	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
+		neigh_event_send(e->neigh, NULL);
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_STALE)
+			e->state = L2T_STATE_VALID;
+		spin_unlock_bh(&e->lock);
+	case L2T_STATE_VALID:	/* fast-path, send the packet on */
+		return cxgb3_ofld_send(dev, skb);
+	case L2T_STATE_RESOLVING:
+		spin_lock_bh(&e->lock);
+		if (e->state != L2T_STATE_RESOLVING) {
+			/* ARP already completed */
+			spin_unlock_bh(&e->lock);
+			goto again;
+		}
+		arpq_enqueue(e, skb);
+		spin_unlock_bh(&e->lock);
+
+		/*
+		 * Only the first packet added to the arpq should kick off
+		 * resolution.  However, because the alloc_skb below can fail,
+		 * we allow each packet added to the arpq to retry resolution
+		 * as a way of recovering from transient memory exhaustion.
+		 * A better way would be to use a work request to retry L2T
+		 * entries when there's no memory.
+		 */
+		if (!neigh_event_send(e->neigh, NULL)) {
+			skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
+					GFP_ATOMIC);
+			if (!skb)
+				break;
+
+			spin_lock_bh(&e->lock);
+			if (!skb_queue_empty(&e->arpq))
+				setup_l2e_send_pending(dev, skb, e);
+			else	/* we lost the race */
+				__kfree_skb(skb);
+			spin_unlock_bh(&e->lock);
+		}
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(t3_l2t_send_slow);
+
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
+{
+again:
+	switch (e->state) {
+	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
+		neigh_event_send(e->neigh, NULL);
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_STALE) {
+			e->state = L2T_STATE_VALID;
+		}
+		spin_unlock_bh(&e->lock);
+		return;
+	case L2T_STATE_VALID:	/* fast-path, send the packet on */
+		return;
+	case L2T_STATE_RESOLVING:
+		spin_lock_bh(&e->lock);
+		if (e->state != L2T_STATE_RESOLVING) {
+			/* ARP already completed */
+			spin_unlock_bh(&e->lock);
+			goto again;
+		}
+		spin_unlock_bh(&e->lock);
+
+		/*
+		 * Only the first packet added to the arpq should kick off
+		 * resolution.  However, because the alloc_skb below can fail,
+		 * we allow each packet added to the arpq to retry resolution
+		 * as a way of recovering from transient memory exhaustion.
+		 * A better way would be to use a work request to retry L2T
+		 * entries when there's no memory.
+		 */
+		neigh_event_send(e->neigh, NULL);
+	}
+}
+
+EXPORT_SYMBOL(t3_l2t_send_event);
+
+/*
+ * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+	struct l2t_entry *end, *e, **p;
+
+	if (!atomic_read(&d->nfree))
+		return NULL;
+
+	/* there's definitely a free entry */
+	for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
+		if (atomic_read(&e->refcnt) == 0)
+			goto found;
+
+	for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
+found:
+	d->rover = e + 1;
+	atomic_dec(&d->nfree);
+
+	/*
+	 * The entry we found may be an inactive entry that is
+	 * presently in the hash table.  We need to remove it.
+	 */
+	if (e->state != L2T_STATE_UNUSED) {
+		int hash = arp_hash(e->addr, e->ifindex, d);
+
+		for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
+			if (*p == e) {
+				*p = e->next;
+				break;
+			}
+		e->state = L2T_STATE_UNUSED;
+	}
+	return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.  The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor.  We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
+{
+	spin_lock_bh(&e->lock);
+	if (atomic_read(&e->refcnt) == 0) {	/* hasn't been recycled */
+		if (e->neigh) {
+			neigh_release(e->neigh);
+			e->neigh = NULL;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	atomic_inc(&d->nfree);
+}
+
+EXPORT_SYMBOL(t3_l2e_free);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+	unsigned int nud_state;
+
+	spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
+
+	if (neigh != e->neigh)
+		neigh_replace(e, neigh);
+	nud_state = neigh->nud_state;
+	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+	    !(nud_state & NUD_VALID))
+		e->state = L2T_STATE_RESOLVING;
+	else if (nud_state & NUD_CONNECTED)
+		e->state = L2T_STATE_VALID;
+	else
+		e->state = L2T_STATE_STALE;
+	spin_unlock(&e->lock);
+}
+
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
+			     struct net_device *dev, const void *daddr)
+{
+	struct l2t_entry *e = NULL;
+	struct neighbour *neigh;
+	struct port_info *p;
+	struct l2t_data *d;
+	int hash;
+	u32 addr;
+	int ifidx;
+	int smt_idx;
+
+	rcu_read_lock();
+	neigh = dst_neigh_lookup(dst, daddr);
+	if (!neigh)
+		goto done_rcu;
+
+	addr = *(u32 *) neigh->primary_key;
+	ifidx = neigh->dev->ifindex;
+
+	if (!dev)
+		dev = neigh->dev;
+	p = netdev_priv(dev);
+	smt_idx = p->port_id;
+
+	d = L2DATA(cdev);
+	if (!d)
+		goto done_rcu;
+
+	hash = arp_hash(addr, ifidx, d);
+
+	write_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (e->addr == addr && e->ifindex == ifidx &&
+		    e->smt_idx == smt_idx) {
+			l2t_hold(d, e);
+			if (atomic_read(&e->refcnt) == 1)
+				reuse_entry(e, neigh);
+			goto done_unlock;
+		}
+
+	/* Need to allocate a new entry */
+	e = alloc_l2e(d);
+	if (e) {
+		spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
+		e->next = d->l2tab[hash].first;
+		d->l2tab[hash].first = e;
+		e->state = L2T_STATE_RESOLVING;
+		e->addr = addr;
+		e->ifindex = ifidx;
+		e->smt_idx = smt_idx;
+		atomic_set(&e->refcnt, 1);
+		neigh_replace(e, neigh);
+		if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+			e->vlan = vlan_dev_vlan_id(neigh->dev);
+		else
+			e->vlan = VLAN_NONE;
+		spin_unlock(&e->lock);
+	}
+done_unlock:
+	write_unlock_bh(&d->lock);
+done_rcu:
+	if (neigh)
+		neigh_release(neigh);
+	rcu_read_unlock();
+	return e;
+}
+
+EXPORT_SYMBOL(t3_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head.  If a packet specifies a failure handler it is invoked,
+ * otherwise the packets is sent to the offload device.
+ *
+ * XXX: maybe we should abandon the latter behavior and just require a failure
+ * handler.
+ */
+static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
+{
+	struct sk_buff *skb, *tmp;
+
+	skb_queue_walk_safe(arpq, skb, tmp) {
+		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+		__skb_unlink(skb, arpq);
+		if (cb->arp_failure_handler)
+			cb->arp_failure_handler(dev, skb);
+		else
+			cxgb3_ofld_send(dev, skb);
+	}
+}
+
+/*
+ * Called when the host's ARP layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+	struct sk_buff_head arpq;
+	struct l2t_entry *e;
+	struct l2t_data *d = L2DATA(dev);
+	u32 addr = *(u32 *) neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = arp_hash(addr, ifidx, d);
+
+	read_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (e->addr == addr && e->ifindex == ifidx) {
+			spin_lock(&e->lock);
+			goto found;
+		}
+	read_unlock_bh(&d->lock);
+	return;
+
+found:
+	__skb_queue_head_init(&arpq);
+
+	read_unlock(&d->lock);
+	if (atomic_read(&e->refcnt)) {
+		if (neigh != e->neigh)
+			neigh_replace(e, neigh);
+
+		if (e->state == L2T_STATE_RESOLVING) {
+			if (neigh->nud_state & NUD_FAILED) {
+				skb_queue_splice_init(&e->arpq, &arpq);
+			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
+				setup_l2e_send_pending(dev, NULL, e);
+		} else {
+			e->state = neigh->nud_state & NUD_CONNECTED ?
+			    L2T_STATE_VALID : L2T_STATE_STALE;
+			if (!ether_addr_equal(e->dmac, neigh->ha))
+				setup_l2e_send_pending(dev, NULL, e);
+		}
+	}
+	spin_unlock_bh(&e->lock);
+
+	if (!skb_queue_empty(&arpq))
+		handle_failed_resolution(dev, &arpq);
+}
+
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
+{
+	struct l2t_data *d;
+	int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+
+	d = cxgb_alloc_mem(size);
+	if (!d)
+		return NULL;
+
+	d->nentries = l2t_capacity;
+	d->rover = &d->l2tab[1];	/* entry 0 is not used */
+	atomic_set(&d->nfree, l2t_capacity - 1);
+	rwlock_init(&d->lock);
+
+	for (i = 0; i < l2t_capacity; ++i) {
+		d->l2tab[i].idx = i;
+		d->l2tab[i].state = L2T_STATE_UNUSED;
+		__skb_queue_head_init(&d->l2tab[i].arpq);
+		spin_lock_init(&d->l2tab[i].lock);
+		atomic_set(&d->l2tab[i].refcnt, 0);
+	}
+	return d;
+}
+
+void t3_free_l2t(struct l2t_data *d)
+{
+	cxgb_free_mem(d);
+}
+
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
new file mode 100644
index 0000000..8cffcdf
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_L2T_H
+#define _CHELSIO_L2T_H
+
+#include <linux/spinlock.h>
+#include "t3cdev.h"
+#include <linux/atomic.h>
+
+enum {
+	L2T_STATE_VALID,	/* entry is up to date */
+	L2T_STATE_STALE,	/* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,	/* entry needs address resolution */
+	L2T_STATE_UNUSED	/* entry not in use */
+};
+
+struct neighbour;
+struct sk_buff;
+
+/*
+ * Each L2T entry plays multiple roles.  First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution.  Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer.  Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+	u16 state;		/* entry state */
+	u16 idx;		/* entry index */
+	u32 addr;		/* dest IP address */
+	int ifindex;		/* neighbor's net_device's ifindex */
+	u16 smt_idx;		/* SMT index */
+	u16 vlan;		/* VLAN TCI (id: bits 0-11, prio: 13-15 */
+	struct neighbour *neigh;	/* associated neighbour */
+	struct l2t_entry *first;	/* start of hash chain */
+	struct l2t_entry *next;	/* next l2t_entry on chain */
+	struct sk_buff_head arpq;	/* queue of packets awaiting resolution */
+	spinlock_t lock;
+	atomic_t refcnt;	/* entry reference count */
+	u8 dmac[6];		/* neighbour's MAC address */
+};
+
+struct l2t_data {
+	unsigned int nentries;	/* number of entries */
+	struct l2t_entry *rover;	/* starting point for next allocation */
+	atomic_t nfree;		/* number of free entries */
+	rwlock_t lock;
+	struct l2t_entry l2tab[0];
+	struct rcu_head rcu_head;	/* to handle rcu cleanup */
+};
+
+typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+					 struct sk_buff * skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+	arp_failure_handler_func arp_failure_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void set_arp_failure_handler(struct sk_buff *skb,
+					   arp_failure_handler_func hnd)
+{
+	L2T_SKB_CB(skb)->arp_failure_handler = hnd;
+}
+
+/*
+ * Getting to the L2 data from an offload device.
+ */
+#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
+
+#define W_TCB_L2T_IX    0
+#define S_TCB_L2T_IX    7
+#define M_TCB_L2T_IX    0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
+			     struct net_device *dev, const void *daddr);
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+		     struct l2t_entry *e);
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
+void t3_free_l2t(struct l2t_data *d);
+
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
+
+static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
+			   struct l2t_entry *e)
+{
+	if (likely(e->state == L2T_STATE_VALID))
+		return cxgb3_ofld_send(dev, skb);
+	return t3_l2t_send_slow(dev, skb, e);
+}
+
+static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
+{
+	struct l2t_data *d;
+
+	rcu_read_lock();
+	d = L2DATA(t);
+
+	if (atomic_dec_and_test(&e->refcnt) && d)
+		t3_l2e_free(d, e);
+
+	rcu_read_unlock();
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+	if (d && atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */
+		atomic_dec(&d->nfree);
+}
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
new file mode 100644
index 0000000..338301b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+	IDT75P52100 = 4,
+	IDT75N43102 = 5
+};
+
+/* DBGI command mode */
+enum {
+	DBGI_MODE_MBUS = 0,
+	DBGI_MODE_IDT52100 = 5
+};
+
+/* IDT 75P52100 commands */
+#define IDT_CMD_READ   0
+#define IDT_CMD_WRITE  1
+#define IDT_CMD_SEARCH 2
+#define IDT_CMD_LEARN  3
+
+/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
+#define IDT_LAR_ADR0   	0x180006
+#define IDT_LAR_MODE144	0xffff0000
+
+/* IDT SCR and SSR addresses (low 32 bits) */
+#define IDT_SCR_ADR0  0x180000
+#define IDT_SSR0_ADR0 0x180002
+#define IDT_SSR1_ADR0 0x180004
+
+/* IDT GMR base address (low 32 bits) */
+#define IDT_GMR_BASE_ADR0 0x180020
+
+/* IDT data and mask array base addresses (low 32 bits) */
+#define IDT_DATARY_BASE_ADR0 0
+#define IDT_MSKARY_BASE_ADR0 0x80000
+
+/* IDT 75N43102 commands */
+#define IDT4_CMD_SEARCH144 3
+#define IDT4_CMD_WRITE     4
+#define IDT4_CMD_READ      5
+
+/* IDT 75N43102 SCR address (low 32 bits) */
+#define IDT4_SCR_ADR0  0x3
+
+/* IDT 75N43102 GMR base addresses (low 32 bits) */
+#define IDT4_GMR_BASE0 0x10
+#define IDT4_GMR_BASE1 0x20
+#define IDT4_GMR_BASE2 0x30
+
+/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
+#define IDT4_DATARY_BASE_ADR0 0x1000000
+#define IDT4_MSKARY_BASE_ADR0 0x2000000
+
+#define MAX_WRITE_ATTEMPTS 5
+
+#define MAX_ROUTES 2048
+
+/*
+ * Issue a command to the TCAM and wait for its completion.  The address and
+ * any data required by the command must have been setup by the caller.
+ */
+static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
+	return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
+			       F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
+}
+
+static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
+				 u32 v3)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
+}
+
+/*
+ * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
+ * command cmd.  The data to be written must have been set up by the caller.
+ * Returns -1 on failure, 0 on success.
+ */
+static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
+{
+	t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
+	if (mc5_cmd_write(adapter, cmd) == 0)
+		return 0;
+	CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
+	       addr_lo);
+	return -1;
+}
+
+static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
+				u32 data_array_base, u32 write_cmd,
+				int addr_shift)
+{
+	unsigned int i;
+	struct adapter *adap = mc5->adapter;
+
+	/*
+	 * We need the size of the TCAM data and mask arrays in terms of
+	 * 72-bit entries.
+	 */
+	unsigned int size72 = mc5->tcam_size;
+	unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
+
+	if (mc5->mode == MC5_MODE_144_BIT) {
+		size72 *= 2;	/* 1 144-bit entry is 2 72-bit entries */
+		server_base *= 2;
+	}
+
+	/* Clear the data array */
+	dbgi_wr_data3(adap, 0, 0, 0);
+	for (i = 0; i < size72; i++)
+		if (mc5_write(adap, data_array_base + (i << addr_shift),
+			      write_cmd))
+			return -1;
+
+	/* Initialize the mask array. */
+	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+	for (i = 0; i < size72; i++) {
+		if (i == server_base)	/* entering server or routing region */
+			t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
+				     mc5->mode == MC5_MODE_144_BIT ?
+				     0xfffffff9 : 0xfffffffd);
+		if (mc5_write(adap, mask_array_base + (i << addr_shift),
+			      write_cmd))
+			return -1;
+	}
+	return 0;
+}
+
+static int init_idt52100(struct mc5 *mc5)
+{
+	int i;
+	struct adapter *adap = mc5->adapter;
+
+	t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+		     V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
+	t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
+
+	/*
+	 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
+	 * GMRs 8-9 for ACK- and AOPEN searches.
+	 */
+	t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
+	t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
+	t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
+	t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
+	t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
+	t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
+	t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
+	t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
+	t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
+
+	/* Set DBGI command mode for IDT TCAM. */
+	t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+	/* Set up LAR */
+	dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
+	if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
+		goto err;
+
+	/* Set up SSRs */
+	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
+	if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
+	    mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
+		goto err;
+
+	/* Set up GMRs */
+	for (i = 0; i < 32; ++i) {
+		if (i >= 12 && i < 15)
+			dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+		else if (i == 15)
+			dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+		else
+			dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+
+		if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
+			goto err;
+	}
+
+	/* Set up SCR */
+	dbgi_wr_data3(adap, 1, 0, 0);
+	if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
+		goto err;
+
+	return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
+				    IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
+err:
+	return -EIO;
+}
+
+static int init_idt43102(struct mc5 *mc5)
+{
+	int i;
+	struct adapter *adap = mc5->adapter;
+
+	t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+		     adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
+		     V_RDLAT(0xd) | V_SRCHLAT(0x12));
+
+	/*
+	 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
+	 * for ACK- and AOPEN searches.
+	 */
+	t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
+		     IDT4_CMD_SEARCH144 | 0x3800);
+	t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
+	t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+	t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+	t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
+	t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
+	t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
+
+	t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
+
+	/* Set DBGI command mode for IDT TCAM. */
+	t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+	/* Set up GMRs */
+	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+	for (i = 0; i < 7; ++i)
+		if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
+			goto err;
+
+	for (i = 0; i < 4; ++i)
+		if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
+			goto err;
+
+	dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+	if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
+	    mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
+	    mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
+		goto err;
+
+	dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+	if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
+		goto err;
+
+	/* Set up SCR */
+	dbgi_wr_data3(adap, 0xf0000000, 0, 0);
+	if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
+		goto err;
+
+	return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
+				    IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
+err:
+	return -EIO;
+}
+
+/* Put MC5 in DBGI mode. */
+static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
+{
+	t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+		     V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
+}
+
+/* Put MC5 in M-Bus mode. */
+static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
+{
+	t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+		     V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
+		     V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
+		     V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
+}
+
+/*
+ * Initialization that requires the OS and protocol layers to already
+ * be initialized goes here.
+ */
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+		unsigned int nroutes)
+{
+	u32 cfg;
+	int err;
+	unsigned int tcam_size = mc5->tcam_size;
+	struct adapter *adap = mc5->adapter;
+
+	if (!tcam_size)
+		return 0;
+
+	if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
+		return -EINVAL;
+
+	/* Reset the TCAM */
+	cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
+	cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
+	t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
+	if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
+		CH_ERR(adap, "TCAM reset timed out\n");
+		return -1;
+	}
+
+	t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
+	t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
+		     tcam_size - nroutes - nfilters);
+	t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
+		     tcam_size - nroutes - nfilters - nservers);
+
+	mc5->parity_enabled = 1;
+
+	/* All the TCAM addresses we access have only the low 32 bits non 0 */
+	t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
+	t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
+
+	mc5_dbgi_mode_enable(mc5);
+
+	switch (mc5->part_type) {
+	case IDT75P52100:
+		err = init_idt52100(mc5);
+		break;
+	case IDT75N43102:
+		err = init_idt43102(mc5);
+		break;
+	default:
+		CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
+		err = -EINVAL;
+		break;
+	}
+
+	mc5_dbgi_mode_disable(mc5);
+	return err;
+}
+
+
+#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
+
+/*
+ * MC5 interrupt handler
+ */
+void t3_mc5_intr_handler(struct mc5 *mc5)
+{
+	struct adapter *adap = mc5->adapter;
+	u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
+
+	if ((cause & F_PARITYERR) && mc5->parity_enabled) {
+		CH_ALERT(adap, "MC5 parity error\n");
+		mc5->stats.parity_err++;
+	}
+
+	if (cause & F_REQQPARERR) {
+		CH_ALERT(adap, "MC5 request queue parity error\n");
+		mc5->stats.reqq_parity_err++;
+	}
+
+	if (cause & F_DISPQPARERR) {
+		CH_ALERT(adap, "MC5 dispatch queue parity error\n");
+		mc5->stats.dispq_parity_err++;
+	}
+
+	if (cause & F_ACTRGNFULL)
+		mc5->stats.active_rgn_full++;
+	if (cause & F_NFASRCHFAIL)
+		mc5->stats.nfa_srch_err++;
+	if (cause & F_UNKNOWNCMD)
+		mc5->stats.unknown_cmd++;
+	if (cause & F_DELACTEMPTY)
+		mc5->stats.del_act_empty++;
+	if (cause & MC5_INT_FATAL)
+		t3_fatal_err(adap);
+
+	t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
+}
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+{
+#define K * 1024
+
+	static unsigned int tcam_part_size[] = {	/* in K 72-bit entries */
+		64 K, 128 K, 256 K, 32 K
+	};
+
+#undef K
+
+	u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
+
+	mc5->adapter = adapter;
+	mc5->mode = (unsigned char)mode;
+	mc5->part_type = (unsigned char)G_TMTYPE(cfg);
+	if (cfg & F_TMTYPEHI)
+		mc5->part_type |= 4;
+
+	mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
+	if (mode == MC5_MODE_144_BIT)
+		mc5->tcam_size /= 2;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
new file mode 100644
index 0000000..81029b8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -0,0 +1,2563 @@
+#define A_SG_CONTROL 0x0
+
+#define S_CONGMODE    29
+#define V_CONGMODE(x) ((x) << S_CONGMODE)
+#define F_CONGMODE    V_CONGMODE(1U)
+
+#define S_TNLFLMODE    28
+#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE)
+#define F_TNLFLMODE    V_TNLFLMODE(1U)
+
+#define S_FATLPERREN    27
+#define V_FATLPERREN(x) ((x) << S_FATLPERREN)
+#define F_FATLPERREN    V_FATLPERREN(1U)
+
+#define S_DROPPKT    20
+#define V_DROPPKT(x) ((x) << S_DROPPKT)
+#define F_DROPPKT    V_DROPPKT(1U)
+
+#define S_EGRGENCTRL    19
+#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
+#define F_EGRGENCTRL    V_EGRGENCTRL(1U)
+
+#define S_USERSPACESIZE    14
+#define M_USERSPACESIZE    0x1f
+#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
+
+#define S_HOSTPAGESIZE    11
+#define M_HOSTPAGESIZE    0x7
+#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
+
+#define S_FLMODE    9
+#define V_FLMODE(x) ((x) << S_FLMODE)
+#define F_FLMODE    V_FLMODE(1U)
+
+#define S_PKTSHIFT    6
+#define M_PKTSHIFT    0x7
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+
+#define S_ONEINTMULTQ    5
+#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
+#define F_ONEINTMULTQ    V_ONEINTMULTQ(1U)
+
+#define S_BIGENDIANINGRESS    2
+#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
+#define F_BIGENDIANINGRESS    V_BIGENDIANINGRESS(1U)
+
+#define S_ISCSICOALESCING    1
+#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
+#define F_ISCSICOALESCING    V_ISCSICOALESCING(1U)
+
+#define S_GLOBALENABLE    0
+#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define F_GLOBALENABLE    V_GLOBALENABLE(1U)
+
+#define S_AVOIDCQOVFL    24
+#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
+#define F_AVOIDCQOVFL    V_AVOIDCQOVFL(1U)
+
+#define S_OPTONEINTMULTQ    23
+#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
+#define F_OPTONEINTMULTQ    V_OPTONEINTMULTQ(1U)
+
+#define S_CQCRDTCTRL    22
+#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
+#define F_CQCRDTCTRL    V_CQCRDTCTRL(1U)
+
+#define A_SG_KDOORBELL 0x4
+
+#define S_SELEGRCNTX    31
+#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
+#define F_SELEGRCNTX    V_SELEGRCNTX(1U)
+
+#define S_EGRCNTX    0
+#define M_EGRCNTX    0xffff
+#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
+
+#define A_SG_GTS 0x8
+
+#define S_RSPQ    29
+#define M_RSPQ    0x7
+#define V_RSPQ(x) ((x) << S_RSPQ)
+#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
+
+#define S_NEWTIMER    16
+#define M_NEWTIMER    0x1fff
+#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
+
+#define S_NEWINDEX    0
+#define M_NEWINDEX    0xffff
+#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
+
+#define A_SG_CONTEXT_CMD 0xc
+
+#define S_CONTEXT_CMD_OPCODE    28
+#define M_CONTEXT_CMD_OPCODE    0xf
+#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
+
+#define S_CONTEXT_CMD_BUSY    27
+#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
+#define F_CONTEXT_CMD_BUSY    V_CONTEXT_CMD_BUSY(1U)
+
+#define S_CQ_CREDIT    20
+
+#define M_CQ_CREDIT    0x7f
+
+#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
+
+#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
+
+#define S_CQ    19
+
+#define V_CQ(x) ((x) << S_CQ)
+#define F_CQ    V_CQ(1U)
+
+#define S_RESPONSEQ    18
+#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
+#define F_RESPONSEQ    V_RESPONSEQ(1U)
+
+#define S_EGRESS    17
+#define V_EGRESS(x) ((x) << S_EGRESS)
+#define F_EGRESS    V_EGRESS(1U)
+
+#define S_FREELIST    16
+#define V_FREELIST(x) ((x) << S_FREELIST)
+#define F_FREELIST    V_FREELIST(1U)
+
+#define S_CONTEXT    0
+#define M_CONTEXT    0xffff
+#define V_CONTEXT(x) ((x) << S_CONTEXT)
+
+#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
+
+#define A_SG_CONTEXT_DATA0 0x10
+
+#define A_SG_CONTEXT_DATA1 0x14
+
+#define A_SG_CONTEXT_DATA2 0x18
+
+#define A_SG_CONTEXT_DATA3 0x1c
+
+#define A_SG_CONTEXT_MASK0 0x20
+
+#define A_SG_CONTEXT_MASK1 0x24
+
+#define A_SG_CONTEXT_MASK2 0x28
+
+#define A_SG_CONTEXT_MASK3 0x2c
+
+#define A_SG_RSPQ_CREDIT_RETURN 0x30
+
+#define S_CREDITS    0
+#define M_CREDITS    0xffff
+#define V_CREDITS(x) ((x) << S_CREDITS)
+
+#define A_SG_DATA_INTR 0x34
+
+#define S_ERRINTR    31
+#define V_ERRINTR(x) ((x) << S_ERRINTR)
+#define F_ERRINTR    V_ERRINTR(1U)
+
+#define A_SG_HI_DRB_HI_THRSH 0x38
+
+#define A_SG_HI_DRB_LO_THRSH 0x3c
+
+#define A_SG_LO_DRB_HI_THRSH 0x40
+
+#define A_SG_LO_DRB_LO_THRSH 0x44
+
+#define A_SG_RSPQ_FL_STATUS 0x4c
+
+#define S_RSPQ0DISABLED    8
+
+#define S_FL0EMPTY    16
+#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY)
+#define F_FL0EMPTY    V_FL0EMPTY(1U)
+
+#define A_SG_EGR_RCQ_DRB_THRSH 0x54
+
+#define S_HIRCQDRBTHRSH    16
+#define M_HIRCQDRBTHRSH    0x7ff
+#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
+
+#define S_LORCQDRBTHRSH    0
+#define M_LORCQDRBTHRSH    0x7ff
+#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
+
+#define A_SG_EGR_CNTX_BADDR 0x58
+
+#define A_SG_INT_CAUSE 0x5c
+
+#define S_HIRCQPARITYERROR    31
+#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR)
+#define F_HIRCQPARITYERROR    V_HIRCQPARITYERROR(1U)
+
+#define S_LORCQPARITYERROR    30
+#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR)
+#define F_LORCQPARITYERROR    V_LORCQPARITYERROR(1U)
+
+#define S_HIDRBPARITYERROR    29
+#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR)
+#define F_HIDRBPARITYERROR    V_HIDRBPARITYERROR(1U)
+
+#define S_LODRBPARITYERROR    28
+#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR)
+#define F_LODRBPARITYERROR    V_LODRBPARITYERROR(1U)
+
+#define S_FLPARITYERROR    22
+#define M_FLPARITYERROR    0x3f
+#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR)
+#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR)
+
+#define S_ITPARITYERROR    20
+#define M_ITPARITYERROR    0x3
+#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR)
+#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR)
+
+#define S_IRPARITYERROR    19
+#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR)
+#define F_IRPARITYERROR    V_IRPARITYERROR(1U)
+
+#define S_RCPARITYERROR    18
+#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR)
+#define F_RCPARITYERROR    V_RCPARITYERROR(1U)
+
+#define S_OCPARITYERROR    17
+#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR)
+#define F_OCPARITYERROR    V_OCPARITYERROR(1U)
+
+#define S_CPPARITYERROR    16
+#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR)
+#define F_CPPARITYERROR    V_CPPARITYERROR(1U)
+
+#define S_R_REQ_FRAMINGERROR    15
+#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR)
+#define F_R_REQ_FRAMINGERROR    V_R_REQ_FRAMINGERROR(1U)
+
+#define S_UC_REQ_FRAMINGERROR    14
+#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR)
+#define F_UC_REQ_FRAMINGERROR    V_UC_REQ_FRAMINGERROR(1U)
+
+#define S_HICTLDRBDROPERR    13
+#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR)
+#define F_HICTLDRBDROPERR    V_HICTLDRBDROPERR(1U)
+
+#define S_LOCTLDRBDROPERR    12
+#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR)
+#define F_LOCTLDRBDROPERR    V_LOCTLDRBDROPERR(1U)
+
+#define S_HIPIODRBDROPERR    11
+#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
+#define F_HIPIODRBDROPERR    V_HIPIODRBDROPERR(1U)
+
+#define S_LOPIODRBDROPERR    10
+#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
+#define F_LOPIODRBDROPERR    V_LOPIODRBDROPERR(1U)
+
+#define S_HIPRIORITYDBFULL    7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL    V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY   6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY    V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL    5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL    V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY   4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY    V_LOPRIORITYDBEMPTY(1U)
+
+#define S_RSPQDISABLED    3
+#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
+#define F_RSPQDISABLED    V_RSPQDISABLED(1U)
+
+#define S_RSPQCREDITOVERFOW    2
+#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
+#define F_RSPQCREDITOVERFOW    V_RSPQCREDITOVERFOW(1U)
+
+#define S_FLEMPTY    1
+#define V_FLEMPTY(x) ((x) << S_FLEMPTY)
+#define F_FLEMPTY    V_FLEMPTY(1U)
+
+#define A_SG_INT_ENABLE 0x60
+
+#define A_SG_CMDQ_CREDIT_TH 0x64
+
+#define S_TIMEOUT    8
+#define M_TIMEOUT    0xffffff
+#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+
+#define S_THRESHOLD    0
+#define M_THRESHOLD    0xff
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+
+#define A_SG_TIMER_TICK 0x68
+
+#define A_SG_CQ_CONTEXT_BADDR 0x6c
+
+#define A_SG_OCO_BASE 0x70
+
+#define S_BASE1    16
+#define M_BASE1    0xffff
+#define V_BASE1(x) ((x) << S_BASE1)
+
+#define A_SG_DRB_PRI_THRESH 0x74
+
+#define A_PCIX_INT_ENABLE 0x80
+
+#define S_MSIXPARERR    22
+#define M_MSIXPARERR    0x7
+
+#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
+
+#define S_CFPARERR    18
+#define M_CFPARERR    0xf
+
+#define V_CFPARERR(x) ((x) << S_CFPARERR)
+
+#define S_RFPARERR    14
+#define M_RFPARERR    0xf
+
+#define V_RFPARERR(x) ((x) << S_RFPARERR)
+
+#define S_WFPARERR    12
+#define M_WFPARERR    0x3
+
+#define V_WFPARERR(x) ((x) << S_WFPARERR)
+
+#define S_PIOPARERR    11
+#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
+#define F_PIOPARERR    V_PIOPARERR(1U)
+
+#define S_DETUNCECCERR    10
+#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
+#define F_DETUNCECCERR    V_DETUNCECCERR(1U)
+
+#define S_DETCORECCERR    9
+#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
+#define F_DETCORECCERR    V_DETCORECCERR(1U)
+
+#define S_RCVSPLCMPERR    8
+#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
+#define F_RCVSPLCMPERR    V_RCVSPLCMPERR(1U)
+
+#define S_UNXSPLCMP    7
+#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
+#define F_UNXSPLCMP    V_UNXSPLCMP(1U)
+
+#define S_SPLCMPDIS    6
+#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
+#define F_SPLCMPDIS    V_SPLCMPDIS(1U)
+
+#define S_DETPARERR    5
+#define V_DETPARERR(x) ((x) << S_DETPARERR)
+#define F_DETPARERR    V_DETPARERR(1U)
+
+#define S_SIGSYSERR    4
+#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
+#define F_SIGSYSERR    V_SIGSYSERR(1U)
+
+#define S_RCVMSTABT    3
+#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
+#define F_RCVMSTABT    V_RCVMSTABT(1U)
+
+#define S_RCVTARABT    2
+#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
+#define F_RCVTARABT    V_RCVTARABT(1U)
+
+#define S_SIGTARABT    1
+#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
+#define F_SIGTARABT    V_SIGTARABT(1U)
+
+#define S_MSTDETPARERR    0
+#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
+#define F_MSTDETPARERR    V_MSTDETPARERR(1U)
+
+#define A_PCIX_INT_CAUSE 0x84
+
+#define A_PCIX_CFG 0x88
+
+#define S_DMASTOPEN    19
+#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
+#define F_DMASTOPEN    V_DMASTOPEN(1U)
+
+#define S_CLIDECEN    18
+#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define F_CLIDECEN    V_CLIDECEN(1U)
+
+#define A_PCIX_MODE 0x8c
+
+#define S_PCLKRANGE    6
+#define M_PCLKRANGE    0x3
+#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
+#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
+
+#define S_PCIXINITPAT    2
+#define M_PCIXINITPAT    0xf
+#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
+#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
+
+#define S_64BIT    0
+#define V_64BIT(x) ((x) << S_64BIT)
+#define F_64BIT    V_64BIT(1U)
+
+#define A_PCIE_INT_ENABLE 0x80
+
+#define S_BISTERR    15
+#define M_BISTERR    0xff
+
+#define V_BISTERR(x) ((x) << S_BISTERR)
+
+#define S_TXPARERR    18
+#define V_TXPARERR(x) ((x) << S_TXPARERR)
+#define F_TXPARERR    V_TXPARERR(1U)
+
+#define S_RXPARERR    17
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR    V_RXPARERR(1U)
+
+#define S_RETRYLUTPARERR    16
+#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR)
+#define F_RETRYLUTPARERR    V_RETRYLUTPARERR(1U)
+
+#define S_RETRYBUFPARERR    15
+#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR)
+#define F_RETRYBUFPARERR    V_RETRYBUFPARERR(1U)
+
+#define S_PCIE_MSIXPARERR    12
+#define M_PCIE_MSIXPARERR    0x7
+
+#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
+
+#define S_PCIE_CFPARERR    11
+#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
+#define F_PCIE_CFPARERR    V_PCIE_CFPARERR(1U)
+
+#define S_PCIE_RFPARERR    10
+#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
+#define F_PCIE_RFPARERR    V_PCIE_RFPARERR(1U)
+
+#define S_PCIE_WFPARERR    9
+#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
+#define F_PCIE_WFPARERR    V_PCIE_WFPARERR(1U)
+
+#define S_PCIE_PIOPARERR    8
+#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
+#define F_PCIE_PIOPARERR    V_PCIE_PIOPARERR(1U)
+
+#define S_UNXSPLCPLERRC    7
+#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
+#define F_UNXSPLCPLERRC    V_UNXSPLCPLERRC(1U)
+
+#define S_UNXSPLCPLERRR    6
+#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
+#define F_UNXSPLCPLERRR    V_UNXSPLCPLERRR(1U)
+
+#define S_PEXERR    0
+#define V_PEXERR(x) ((x) << S_PEXERR)
+#define F_PEXERR    V_PEXERR(1U)
+
+#define A_PCIE_INT_CAUSE 0x84
+
+#define S_PCIE_DMASTOPEN    24
+#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN)
+#define F_PCIE_DMASTOPEN    V_PCIE_DMASTOPEN(1U)
+
+#define A_PCIE_CFG 0x88
+
+#define S_ENABLELINKDWNDRST    21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST    V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST    20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST    V_ENABLELINKDOWNRST(1U)
+
+#define S_PCIE_CLIDECEN    16
+#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
+#define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
+
+#define S_CRSTWRMMODE    0
+#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
+#define F_CRSTWRMMODE    V_CRSTWRMMODE(1U)
+
+#define A_PCIE_MODE 0x8c
+
+#define S_NUMFSTTRNSEQRX    10
+#define M_NUMFSTTRNSEQRX    0xff
+#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
+#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
+
+#define A_PCIE_PEX_CTRL0 0x98
+
+#define S_NUMFSTTRNSEQ    22
+#define M_NUMFSTTRNSEQ    0xff
+#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
+#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
+
+#define S_REPLAYLMT    2
+#define M_REPLAYLMT    0xfffff
+
+#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
+
+#define A_PCIE_PEX_CTRL1 0x9c
+
+#define S_T3A_ACKLAT    0
+#define M_T3A_ACKLAT    0x7ff
+
+#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+
+#define S_ACKLAT    0
+#define M_ACKLAT    0x1fff
+
+#define V_ACKLAT(x) ((x) << S_ACKLAT)
+
+#define A_PCIE_PEX_ERR 0xa4
+
+#define A_T3DBG_GPIO_EN 0xd0
+
+#define S_GPIO11_OEN    27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN    V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN    26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN    V_GPIO10_OEN(1U)
+
+#define S_GPIO7_OEN    23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN    V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN    22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN    V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN    21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN    V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN    20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN    V_GPIO4_OEN(1U)
+
+#define S_GPIO2_OEN    18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN    V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN    17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN    V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN    16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN    V_GPIO0_OEN(1U)
+
+#define S_GPIO10_OUT_VAL    10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL    V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL    7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL    V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL    6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL    V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL    5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL    V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL    4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL    V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL    2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL    V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL    1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL    V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL    0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL    V_GPIO0_OUT_VAL(1U)
+
+#define A_T3DBG_INT_ENABLE 0xd8
+
+#define S_GPIO11    11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11    V_GPIO11(1U)
+
+#define S_GPIO10    10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10    V_GPIO10(1U)
+
+#define S_GPIO9    9
+#define V_GPIO9(x) ((x) << S_GPIO9)
+#define F_GPIO9    V_GPIO9(1U)
+
+#define S_GPIO7    7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7    V_GPIO7(1U)
+
+#define S_GPIO6    6
+#define V_GPIO6(x) ((x) << S_GPIO6)
+#define F_GPIO6    V_GPIO6(1U)
+
+#define S_GPIO5    5
+#define V_GPIO5(x) ((x) << S_GPIO5)
+#define F_GPIO5    V_GPIO5(1U)
+
+#define S_GPIO4    4
+#define V_GPIO4(x) ((x) << S_GPIO4)
+#define F_GPIO4    V_GPIO4(1U)
+
+#define S_GPIO3    3
+#define V_GPIO3(x) ((x) << S_GPIO3)
+#define F_GPIO3    V_GPIO3(1U)
+
+#define S_GPIO2    2
+#define V_GPIO2(x) ((x) << S_GPIO2)
+#define F_GPIO2    V_GPIO2(1U)
+
+#define S_GPIO1    1
+#define V_GPIO1(x) ((x) << S_GPIO1)
+#define F_GPIO1    V_GPIO1(1U)
+
+#define S_GPIO0    0
+#define V_GPIO0(x) ((x) << S_GPIO0)
+#define F_GPIO0    V_GPIO0(1U)
+
+#define A_T3DBG_INT_CAUSE 0xdc
+
+#define A_T3DBG_GPIO_ACT_LOW 0xf0
+
+#define MC7_PMRX_BASE_ADDR 0x100
+
+#define A_MC7_CFG 0x100
+
+#define S_IFEN    13
+#define V_IFEN(x) ((x) << S_IFEN)
+#define F_IFEN    V_IFEN(1U)
+
+#define S_TERM150    11
+#define V_TERM150(x) ((x) << S_TERM150)
+#define F_TERM150    V_TERM150(1U)
+
+#define S_SLOW    10
+#define V_SLOW(x) ((x) << S_SLOW)
+#define F_SLOW    V_SLOW(1U)
+
+#define S_WIDTH    8
+#define M_WIDTH    0x3
+#define V_WIDTH(x) ((x) << S_WIDTH)
+#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+
+#define S_BKS    6
+#define V_BKS(x) ((x) << S_BKS)
+#define F_BKS    V_BKS(1U)
+
+#define S_ORG    5
+#define V_ORG(x) ((x) << S_ORG)
+#define F_ORG    V_ORG(1U)
+
+#define S_DEN    2
+#define M_DEN    0x7
+#define V_DEN(x) ((x) << S_DEN)
+#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
+
+#define S_RDY    1
+#define V_RDY(x) ((x) << S_RDY)
+#define F_RDY    V_RDY(1U)
+
+#define S_CLKEN    0
+#define V_CLKEN(x) ((x) << S_CLKEN)
+#define F_CLKEN    V_CLKEN(1U)
+
+#define A_MC7_MODE 0x104
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define A_MC7_EXT_MODE1 0x108
+
+#define A_MC7_EXT_MODE2 0x10c
+
+#define A_MC7_EXT_MODE3 0x110
+
+#define A_MC7_PRE 0x114
+
+#define A_MC7_REF 0x118
+
+#define S_PREREFDIV    1
+#define M_PREREFDIV    0x3fff
+#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
+
+#define S_PERREFEN    0
+#define V_PERREFEN(x) ((x) << S_PERREFEN)
+#define F_PERREFEN    V_PERREFEN(1U)
+
+#define A_MC7_DLL 0x11c
+
+#define S_DLLENB    1
+#define V_DLLENB(x) ((x) << S_DLLENB)
+#define F_DLLENB    V_DLLENB(1U)
+
+#define S_DLLRST    0
+#define V_DLLRST(x) ((x) << S_DLLRST)
+#define F_DLLRST    V_DLLRST(1U)
+
+#define A_MC7_PARM 0x120
+
+#define S_ACTTOPREDLY    26
+#define M_ACTTOPREDLY    0xf
+#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
+
+#define S_ACTTORDWRDLY    23
+#define M_ACTTORDWRDLY    0x7
+#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
+
+#define S_PRECYC    20
+#define M_PRECYC    0x7
+#define V_PRECYC(x) ((x) << S_PRECYC)
+
+#define S_REFCYC    13
+#define M_REFCYC    0x7f
+#define V_REFCYC(x) ((x) << S_REFCYC)
+
+#define S_BKCYC    8
+#define M_BKCYC    0x1f
+#define V_BKCYC(x) ((x) << S_BKCYC)
+
+#define S_WRTORDDLY    4
+#define M_WRTORDDLY    0xf
+#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
+
+#define S_RDTOWRDLY    0
+#define M_RDTOWRDLY    0xf
+#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
+
+#define A_MC7_CAL 0x128
+
+#define S_CAL_FAULT    30
+#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
+#define F_CAL_FAULT    V_CAL_FAULT(1U)
+
+#define S_SGL_CAL_EN    20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN    V_SGL_CAL_EN(1U)
+
+#define A_MC7_ERR_ADDR 0x12c
+
+#define A_MC7_ECC 0x130
+
+#define S_ECCCHKEN    1
+#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
+#define F_ECCCHKEN    V_ECCCHKEN(1U)
+
+#define S_ECCGENEN    0
+#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
+#define F_ECCGENEN    V_ECCGENEN(1U)
+
+#define A_MC7_CE_ADDR 0x134
+
+#define A_MC7_CE_DATA0 0x138
+
+#define A_MC7_CE_DATA1 0x13c
+
+#define A_MC7_CE_DATA2 0x140
+
+#define S_DATA    0
+#define M_DATA    0xff
+
+#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
+
+#define A_MC7_UE_ADDR 0x144
+
+#define A_MC7_UE_DATA0 0x148
+
+#define A_MC7_UE_DATA1 0x14c
+
+#define A_MC7_UE_DATA2 0x150
+
+#define A_MC7_BD_ADDR 0x154
+
+#define S_ADDR    3
+
+#define M_ADDR    0x1fffffff
+
+#define A_MC7_BD_DATA0 0x158
+
+#define A_MC7_BD_DATA1 0x15c
+
+#define A_MC7_BD_OP 0x164
+
+#define S_OP    0
+
+#define V_OP(x) ((x) << S_OP)
+#define F_OP    V_OP(1U)
+
+#define A_MC7_BIST_ADDR_BEG 0x168
+
+#define A_MC7_BIST_ADDR_END 0x16c
+
+#define A_MC7_BIST_DATA 0x170
+
+#define A_MC7_BIST_OP 0x174
+
+#define S_CONT    3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT    V_CONT(1U)
+
+#define A_MC7_INT_ENABLE 0x178
+
+#define S_AE    17
+#define V_AE(x) ((x) << S_AE)
+#define F_AE    V_AE(1U)
+
+#define S_PE    2
+#define M_PE    0x7fff
+
+#define V_PE(x) ((x) << S_PE)
+
+#define G_PE(x) (((x) >> S_PE) & M_PE)
+
+#define S_UE    1
+#define V_UE(x) ((x) << S_UE)
+#define F_UE    V_UE(1U)
+
+#define S_CE    0
+#define V_CE(x) ((x) << S_CE)
+#define F_CE    V_CE(1U)
+
+#define A_MC7_INT_CAUSE 0x17c
+
+#define MC7_PMTX_BASE_ADDR 0x180
+
+#define MC7_CM_BASE_ADDR 0x200
+
+#define A_CIM_BOOT_CFG 0x280
+
+#define S_BOOTADDR    2
+#define M_BOOTADDR    0x3fffffff
+#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+
+#define A_CIM_SDRAM_BASE_ADDR 0x28c
+
+#define A_CIM_SDRAM_ADDR_SIZE 0x290
+
+#define A_CIM_HOST_INT_ENABLE 0x298
+
+#define S_DTAGPARERR    28
+#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR)
+#define F_DTAGPARERR    V_DTAGPARERR(1U)
+
+#define S_ITAGPARERR    27
+#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR)
+#define F_ITAGPARERR    V_ITAGPARERR(1U)
+
+#define S_IBQTPPARERR    26
+#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR)
+#define F_IBQTPPARERR    V_IBQTPPARERR(1U)
+
+#define S_IBQULPPARERR    25
+#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR)
+#define F_IBQULPPARERR    V_IBQULPPARERR(1U)
+
+#define S_IBQSGEHIPARERR    24
+#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR)
+#define F_IBQSGEHIPARERR    V_IBQSGEHIPARERR(1U)
+
+#define S_IBQSGELOPARERR    23
+#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR)
+#define F_IBQSGELOPARERR    V_IBQSGELOPARERR(1U)
+
+#define S_OBQULPLOPARERR    22
+#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR)
+#define F_OBQULPLOPARERR    V_OBQULPLOPARERR(1U)
+
+#define S_OBQULPHIPARERR    21
+#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR)
+#define F_OBQULPHIPARERR    V_OBQULPHIPARERR(1U)
+
+#define S_OBQSGEPARERR    20
+#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR)
+#define F_OBQSGEPARERR    V_OBQSGEPARERR(1U)
+
+#define S_DCACHEPARERR    19
+#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR)
+#define F_DCACHEPARERR    V_DCACHEPARERR(1U)
+
+#define S_ICACHEPARERR    18
+#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR)
+#define F_ICACHEPARERR    V_ICACHEPARERR(1U)
+
+#define S_DRAMPARERR    17
+#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR)
+#define F_DRAMPARERR    V_DRAMPARERR(1U)
+
+#define A_CIM_HOST_INT_CAUSE 0x29c
+
+#define S_BLKWRPLINT    12
+#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
+#define F_BLKWRPLINT    V_BLKWRPLINT(1U)
+
+#define S_BLKRDPLINT    11
+#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
+#define F_BLKRDPLINT    V_BLKRDPLINT(1U)
+
+#define S_BLKWRCTLINT    10
+#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
+#define F_BLKWRCTLINT    V_BLKWRCTLINT(1U)
+
+#define S_BLKRDCTLINT    9
+#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
+#define F_BLKRDCTLINT    V_BLKRDCTLINT(1U)
+
+#define S_BLKWRFLASHINT    8
+#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
+#define F_BLKWRFLASHINT    V_BLKWRFLASHINT(1U)
+
+#define S_BLKRDFLASHINT    7
+#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
+#define F_BLKRDFLASHINT    V_BLKRDFLASHINT(1U)
+
+#define S_SGLWRFLASHINT    6
+#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
+#define F_SGLWRFLASHINT    V_SGLWRFLASHINT(1U)
+
+#define S_WRBLKFLASHINT    5
+#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
+#define F_WRBLKFLASHINT    V_WRBLKFLASHINT(1U)
+
+#define S_BLKWRBOOTINT    4
+#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
+#define F_BLKWRBOOTINT    V_BLKWRBOOTINT(1U)
+
+#define S_FLASHRANGEINT    2
+#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
+#define F_FLASHRANGEINT    V_FLASHRANGEINT(1U)
+
+#define S_SDRAMRANGEINT    1
+#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
+#define F_SDRAMRANGEINT    V_SDRAMRANGEINT(1U)
+
+#define S_RSVDSPACEINT    0
+#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
+#define F_RSVDSPACEINT    V_RSVDSPACEINT(1U)
+
+#define A_CIM_HOST_ACC_CTRL 0x2b0
+
+#define S_HOSTBUSY    17
+#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
+#define F_HOSTBUSY    V_HOSTBUSY(1U)
+
+#define A_CIM_HOST_ACC_DATA 0x2b4
+
+#define A_CIM_IBQ_DBG_CFG 0x2c0
+
+#define S_IBQDBGADDR    16
+#define M_IBQDBGADDR    0x1ff
+#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
+#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
+
+#define S_IBQDBGQID    3
+#define M_IBQDBGQID    0x3
+#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID)
+#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID)
+
+#define S_IBQDBGWR    2
+#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
+#define F_IBQDBGWR    V_IBQDBGWR(1U)
+
+#define S_IBQDBGBUSY    1
+#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
+#define F_IBQDBGBUSY    V_IBQDBGBUSY(1U)
+
+#define S_IBQDBGEN    0
+#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
+#define F_IBQDBGEN    V_IBQDBGEN(1U)
+
+#define A_CIM_IBQ_DBG_DATA 0x2c8
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_RXFBARBPRIO    25
+#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO)
+#define F_RXFBARBPRIO    V_RXFBARBPRIO(1U)
+
+#define S_TXFBARBPRIO    24
+#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
+#define F_TXFBARBPRIO    V_TXFBARBPRIO(1U)
+
+#define S_NICMODE    14
+#define V_NICMODE(x) ((x) << S_NICMODE)
+#define F_NICMODE    V_NICMODE(1U)
+
+#define S_IPV6ENABLE    15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE    V_IPV6ENABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_VLANEXTRACTIONENABLE    12
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_TXPACINGENABLE    24
+#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
+#define F_TXPACINGENABLE    V_TXPACINGENABLE(1U)
+
+#define S_PATHMTU    15
+#define V_PATHMTU(x) ((x) << S_PATHMTU)
+#define F_PATHMTU    V_PATHMTU(1U)
+
+#define S_IPCHECKSUMOFFLOAD    13
+#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
+#define F_IPCHECKSUMOFFLOAD    V_IPCHECKSUMOFFLOAD(1U)
+
+#define S_UDPCHECKSUMOFFLOAD    12
+#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
+#define F_UDPCHECKSUMOFFLOAD    V_UDPCHECKSUMOFFLOAD(1U)
+
+#define S_TCPCHECKSUMOFFLOAD    11
+#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
+#define F_TCPCHECKSUMOFFLOAD    V_TCPCHECKSUMOFFLOAD(1U)
+
+#define S_IPTTL    0
+#define M_IPTTL    0xff
+#define V_IPTTL(x) ((x) << S_IPTTL)
+
+#define A_TP_CMM_MM_BASE 0x314
+
+#define A_TP_CMM_TIMER_BASE 0x318
+
+#define S_CMTIMERMAXNUM    28
+#define M_CMTIMERMAXNUM    0x3
+#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+
+#define A_TP_PMM_SIZE 0x31c
+
+#define A_TP_PMM_TX_BASE 0x320
+
+#define A_TP_PMM_RX_BASE 0x328
+
+#define A_TP_PMM_RX_PAGE_SIZE 0x32c
+
+#define A_TP_PMM_RX_MAX_PAGE 0x330
+
+#define A_TP_PMM_TX_PAGE_SIZE 0x334
+
+#define A_TP_PMM_TX_MAX_PAGE 0x338
+
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_MTUDEFAULT    16
+#define M_MTUDEFAULT    0xffff
+#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+
+#define S_MTUENABLE    10
+#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
+#define F_MTUENABLE    V_MTUENABLE(1U)
+
+#define S_SACKRX    8
+#define V_SACKRX(x) ((x) << S_SACKRX)
+#define F_SACKRX    V_SACKRX(1U)
+
+#define S_SACKMODE    4
+
+#define M_SACKMODE    0x3
+
+#define V_SACKMODE(x) ((x) << S_SACKMODE)
+
+#define S_WINDOWSCALEMODE    2
+#define M_WINDOWSCALEMODE    0x3
+#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+
+#define S_TIMESTAMPSMODE    0
+
+#define M_TIMESTAMPSMODE    0x3
+
+#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_AUTOSTATE3    30
+#define M_AUTOSTATE3    0x3
+#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+
+#define S_AUTOSTATE2    28
+#define M_AUTOSTATE2    0x3
+#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+
+#define S_AUTOSTATE1    26
+#define M_AUTOSTATE1    0x3
+#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+
+#define S_BYTETHRESHOLD    5
+#define M_BYTETHRESHOLD    0xfffff
+#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+
+#define S_MSSTHRESHOLD    3
+#define M_MSSTHRESHOLD    0x3
+#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+
+#define S_AUTOCAREFUL    2
+#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
+#define F_AUTOCAREFUL    V_AUTOCAREFUL(1U)
+
+#define S_AUTOENABLE    1
+#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
+#define F_AUTOENABLE    V_AUTOENABLE(1U)
+
+#define S_DACK_MODE    0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE    V_DACK_MODE(1U)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TXTOSQUEUEMAPMODE    26
+#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
+#define F_TXTOSQUEUEMAPMODE    V_TXTOSQUEUEMAPMODE(1U)
+
+#define S_ENABLEEPCMDAFULL    23
+#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
+#define F_ENABLEEPCMDAFULL    V_ENABLEEPCMDAFULL(1U)
+
+#define S_MODULATEUNIONMODE    22
+#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
+#define F_MODULATEUNIONMODE    V_MODULATEUNIONMODE(1U)
+
+#define S_TXDEFERENABLE    20
+#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
+#define F_TXDEFERENABLE    V_TXDEFERENABLE(1U)
+
+#define S_RXCONGESTIONMODE    19
+#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
+#define F_RXCONGESTIONMODE    V_RXCONGESTIONMODE(1U)
+
+#define S_HEARBEATDACK    16
+#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
+#define F_HEARBEATDACK    V_HEARBEATDACK(1U)
+
+#define S_TXCONGESTIONMODE    15
+#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
+#define F_TXCONGESTIONMODE    V_TXCONGESTIONMODE(1U)
+
+#define S_ENABLEOCSPIFULL    30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL    V_ENABLEOCSPIFULL(1U)
+
+#define S_LOCKTID    28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID    V_LOCKTID(1U)
+
+#define S_TABLELATENCYDELTA    0
+#define M_TABLELATENCYDELTA    0xf
+#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
+#define G_TABLELATENCYDELTA(x) \
+	(((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
+
+#define A_TP_PC_CONFIG2 0x34c
+
+#define S_DISBLEDAPARBIT0    15
+#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0)
+#define F_DISBLEDAPARBIT0    V_DISBLEDAPARBIT0(1U)
+
+#define S_ENABLEARPMISS    13
+#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS)
+#define F_ENABLEARPMISS    V_ENABLEARPMISS(1U)
+
+#define S_ENABLENONOFDTNLSYN    12
+#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN)
+#define F_ENABLENONOFDTNLSYN    V_ENABLENONOFDTNLSYN(1U)
+
+#define S_ENABLEIPV6RSS    11
+#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
+#define F_ENABLEIPV6RSS    V_ENABLEIPV6RSS(1U)
+
+#define S_CHDRAFULL    4
+#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
+#define F_CHDRAFULL    V_CHDRAFULL(1U)
+
+#define A_TP_TCP_BACKOFF_REG0 0x350
+
+#define A_TP_TCP_BACKOFF_REG1 0x354
+
+#define A_TP_TCP_BACKOFF_REG2 0x358
+
+#define A_TP_TCP_BACKOFF_REG3 0x35c
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_MAXRXDATA    16
+#define M_MAXRXDATA    0xffff
+#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+
+#define S_RXCOALESCESIZE    0
+#define M_RXCOALESCESIZE    0xffff
+#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_TXDATAACKIDX    16
+#define M_TXDATAACKIDX    0xf
+
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+
+#define S_TXPACEAUTOSTRICT    10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT    V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED    9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED    V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO    8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO    V_TXPACEAUTO(1U)
+
+#define S_RXCOALESCEENABLE    1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE    V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN    0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN    V_RXCOALESCEPSHEN(1U)
+
+#define A_TP_PARA_REG4 0x370
+
+#define A_TP_PARA_REG5 0x374
+
+#define S_RXDDPOFFINIT    3
+#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
+#define F_RXDDPOFFINIT    V_RXDDPOFFINIT(1U)
+
+#define A_TP_PARA_REG6 0x378
+
+#define S_T3A_ENABLEESND    13
+#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
+#define F_T3A_ENABLEESND    V_T3A_ENABLEESND(1U)
+
+#define S_ENABLEESND    11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND    V_ENABLEESND(1U)
+
+#define A_TP_PARA_REG7 0x37c
+
+#define S_PMMAXXFERLEN1    16
+#define M_PMMAXXFERLEN1    0xffff
+#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+
+#define S_PMMAXXFERLEN0    0
+#define M_PMMAXXFERLEN0    0xffff
+#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_TIMERRESOLUTION    16
+#define M_TIMERRESOLUTION    0xff
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+
+#define S_TIMESTAMPRESOLUTION    8
+#define M_TIMESTAMPRESOLUTION    0xff
+#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION    0
+#define M_DELAYEDACKRESOLUTION    0xff
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+
+#define A_TP_MSL 0x394
+
+#define A_TP_RXT_MIN 0x398
+
+#define A_TP_RXT_MAX 0x39c
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define A_TP_KEEP_IDLE 0x3a8
+
+#define A_TP_KEEP_INTVL 0x3ac
+
+#define A_TP_INIT_SRTT 0x3b0
+
+#define A_TP_DACK_TIMER 0x3b4
+
+#define A_TP_FINWAIT2_TIMER 0x3b8
+
+#define A_TP_SHIFT_CNT 0x3c0
+
+#define S_SYNSHIFTMAX    24
+
+#define M_SYNSHIFTMAX    0xff
+
+#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+
+#define S_RXTSHIFTMAXR1    20
+
+#define M_RXTSHIFTMAXR1    0xf
+
+#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+
+#define S_RXTSHIFTMAXR2    16
+
+#define M_RXTSHIFTMAXR2    0xf
+
+#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+
+#define S_PERSHIFTBACKOFFMAX    12
+#define M_PERSHIFTBACKOFFMAX    0xf
+#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+
+#define S_PERSHIFTMAX    8
+#define M_PERSHIFTMAX    0xf
+#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+
+#define S_KEEPALIVEMAX    0
+
+#define M_KEEPALIVEMAX    0xff
+
+#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
+
+#define A_TP_MTU_PORT_TABLE 0x3d0
+
+#define A_TP_CCTRL_TABLE 0x3dc
+
+#define A_TP_MTU_TABLE 0x3e4
+
+#define A_TP_RSS_MAP_TABLE 0x3e8
+
+#define A_TP_RSS_LKP_TABLE 0x3ec
+
+#define A_TP_RSS_CONFIG 0x3f0
+
+#define S_TNL4TUPEN    29
+#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
+#define F_TNL4TUPEN    V_TNL4TUPEN(1U)
+
+#define S_TNL2TUPEN    28
+#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
+#define F_TNL2TUPEN    V_TNL2TUPEN(1U)
+
+#define S_TNLPRTEN    26
+#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
+#define F_TNLPRTEN    V_TNLPRTEN(1U)
+
+#define S_TNLMAPEN    25
+#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
+#define F_TNLMAPEN    V_TNLMAPEN(1U)
+
+#define S_TNLLKPEN    24
+#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
+#define F_TNLLKPEN    V_TNLLKPEN(1U)
+
+#define S_RRCPLMAPEN    7
+#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
+#define F_RRCPLMAPEN    V_RRCPLMAPEN(1U)
+
+#define S_RRCPLCPUSIZE    4
+#define M_RRCPLCPUSIZE    0x7
+#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
+
+#define S_RQFEEDBACKENABLE    3
+#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
+#define F_RQFEEDBACKENABLE    V_RQFEEDBACKENABLE(1U)
+
+#define S_HASHTOEPLITZ    2
+#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
+#define F_HASHTOEPLITZ    V_HASHTOEPLITZ(1U)
+
+#define S_DISABLE    0
+
+#define A_TP_TM_PIO_ADDR 0x418
+
+#define A_TP_TM_PIO_DATA 0x41c
+
+#define A_TP_TX_MOD_QUE_TABLE 0x420
+
+#define A_TP_TX_RESOURCE_LIMIT 0x424
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
+
+#define S_TX_MOD_QUEUE_REQ_MAP    0
+#define M_TX_MOD_QUEUE_REQ_MAP    0xff
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
+
+#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+
+#define A_TP_MOD_RATE_LIMIT 0x438
+
+#define A_TP_PIO_ADDR 0x440
+
+#define A_TP_PIO_DATA 0x444
+
+#define A_TP_RESET 0x44c
+
+#define S_FLSTINITENABLE    1
+#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
+#define F_FLSTINITENABLE    V_FLSTINITENABLE(1U)
+
+#define S_TPRESET    0
+#define V_TPRESET(x) ((x) << S_TPRESET)
+#define F_TPRESET    V_TPRESET(1U)
+
+#define A_TP_CMM_MM_RX_FLST_BASE 0x460
+
+#define A_TP_CMM_MM_TX_FLST_BASE 0x464
+
+#define A_TP_CMM_MM_PS_FLST_BASE 0x468
+
+#define A_TP_MIB_INDEX 0x450
+
+#define A_TP_MIB_RDATA 0x454
+
+#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
+
+#define A_TP_INT_ENABLE 0x470
+
+#define S_FLMTXFLSTEMPTY    30
+#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY)
+#define F_FLMTXFLSTEMPTY    V_FLMTXFLSTEMPTY(1U)
+
+#define S_FLMRXFLSTEMPTY    29
+#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY)
+#define F_FLMRXFLSTEMPTY    V_FLMRXFLSTEMPTY(1U)
+
+#define S_ARPLUTPERR    26
+#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
+#define F_ARPLUTPERR    V_ARPLUTPERR(1U)
+
+#define S_CMCACHEPERR    24
+#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
+#define F_CMCACHEPERR    V_CMCACHEPERR(1U)
+
+#define A_TP_INT_CAUSE 0x474
+
+#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+
+#define A_TP_TX_DROP_CFG_CH0 0x12b
+
+#define A_TP_TX_DROP_MODE 0x12f
+
+#define A_TP_EGRESS_CONFIG 0x145
+
+#define S_REWRITEFORCETOSIZE    0
+#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
+#define F_REWRITEFORCETOSIZE    V_REWRITEFORCETOSIZE(1U)
+
+#define A_TP_TX_TRC_KEY0 0x20
+
+#define A_TP_RX_TRC_KEY0 0x120
+
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0RCVD    0
+#define M_TXDROPCNTCH0RCVD    0xffff
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
+			       M_TXDROPCNTCH0RCVD)
+
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+
+#define A_ULPRX_CTL 0x500
+
+#define S_ROUND_ROBIN    4
+#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
+#define F_ROUND_ROBIN    V_ROUND_ROBIN(1U)
+
+#define A_ULPRX_INT_ENABLE 0x504
+
+#define S_DATASELFRAMEERR0    7
+#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0)
+#define F_DATASELFRAMEERR0    V_DATASELFRAMEERR0(1U)
+
+#define S_DATASELFRAMEERR1    6
+#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1)
+#define F_DATASELFRAMEERR1    V_DATASELFRAMEERR1(1U)
+
+#define S_PCMDMUXPERR    5
+#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR)
+#define F_PCMDMUXPERR    V_PCMDMUXPERR(1U)
+
+#define S_ARBFPERR    4
+#define V_ARBFPERR(x) ((x) << S_ARBFPERR)
+#define F_ARBFPERR    V_ARBFPERR(1U)
+
+#define S_ARBPF0PERR    3
+#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR)
+#define F_ARBPF0PERR    V_ARBPF0PERR(1U)
+
+#define S_ARBPF1PERR    2
+#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR)
+#define F_ARBPF1PERR    V_ARBPF1PERR(1U)
+
+#define S_PARERRPCMD    1
+#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD)
+#define F_PARERRPCMD    V_PARERRPCMD(1U)
+
+#define S_PARERRDATA    0
+#define V_PARERRDATA(x) ((x) << S_PARERRDATA)
+#define F_PARERRDATA    V_PARERRDATA(1U)
+
+#define A_ULPRX_INT_CAUSE 0x508
+
+#define A_ULPRX_ISCSI_LLIMIT 0x50c
+
+#define A_ULPRX_ISCSI_ULIMIT 0x510
+
+#define A_ULPRX_ISCSI_TAGMASK 0x514
+
+#define A_ULPRX_ISCSI_PSZ 0x518
+
+#define A_ULPRX_TDDP_LLIMIT 0x51c
+
+#define A_ULPRX_TDDP_ULIMIT 0x520
+#define A_ULPRX_TDDP_PSZ 0x528
+
+#define S_HPZ0    0
+#define M_HPZ0    0xf
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULPRX_STAG_LLIMIT 0x52c
+
+#define A_ULPRX_STAG_ULIMIT 0x530
+
+#define A_ULPRX_RQ_LLIMIT 0x534
+
+#define A_ULPRX_RQ_ULIMIT 0x538
+
+#define A_ULPRX_PBL_LLIMIT 0x53c
+
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+#define A_ULPRX_TDDP_TAGMASK 0x524
+
+#define A_ULPTX_CONFIG 0x580
+
+#define S_CFG_CQE_SOP_MASK    1
+#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK)
+#define F_CFG_CQE_SOP_MASK    V_CFG_CQE_SOP_MASK(1U)
+
+#define S_CFG_RR_ARB    0
+#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
+#define F_CFG_RR_ARB    V_CFG_RR_ARB(1U)
+
+#define A_ULPTX_INT_ENABLE 0x584
+
+#define S_PBL_BOUND_ERR_CH1    1
+#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
+#define F_PBL_BOUND_ERR_CH1    V_PBL_BOUND_ERR_CH1(1U)
+
+#define S_PBL_BOUND_ERR_CH0    0
+#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
+#define F_PBL_BOUND_ERR_CH0    V_PBL_BOUND_ERR_CH0(1U)
+
+#define A_ULPTX_INT_CAUSE 0x588
+
+#define A_ULPTX_TPT_LLIMIT 0x58c
+
+#define A_ULPTX_TPT_ULIMIT 0x590
+
+#define A_ULPTX_PBL_LLIMIT 0x594
+
+#define A_ULPTX_PBL_ULIMIT 0x598
+
+#define A_ULPTX_DMA_WEIGHT 0x5ac
+
+#define S_D1_WEIGHT    16
+#define M_D1_WEIGHT    0xffff
+#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
+
+#define S_D0_WEIGHT    0
+#define M_D0_WEIGHT    0xffff
+#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
+
+#define A_PM1_RX_CFG 0x5c0
+#define A_PM1_RX_MODE 0x5c4
+
+#define A_PM1_RX_INT_ENABLE 0x5d8
+
+#define S_ZERO_E_CMD_ERROR    18
+#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
+#define F_ZERO_E_CMD_ERROR    V_ZERO_E_CMD_ERROR(1U)
+
+#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR    17
+#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR    V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR    16
+#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR    V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_RX_FRAMING_ERROR    15
+#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
+#define F_IESPI0_RX_FRAMING_ERROR    V_IESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_RX_FRAMING_ERROR    14
+#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
+#define F_IESPI1_RX_FRAMING_ERROR    V_IESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_TX_FRAMING_ERROR    13
+#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
+#define F_IESPI0_TX_FRAMING_ERROR    V_IESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_TX_FRAMING_ERROR    12
+#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
+#define F_IESPI1_TX_FRAMING_ERROR    V_IESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_RX_FRAMING_ERROR    11
+#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
+#define F_OCSPI0_RX_FRAMING_ERROR    V_OCSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_RX_FRAMING_ERROR    10
+#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
+#define F_OCSPI1_RX_FRAMING_ERROR    V_OCSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_TX_FRAMING_ERROR    9
+#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
+#define F_OCSPI0_TX_FRAMING_ERROR    V_OCSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_TX_FRAMING_ERROR    8
+#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
+#define F_OCSPI1_TX_FRAMING_ERROR    V_OCSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR    7
+#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR    V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR    6
+#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR    V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI_PAR_ERROR    3
+#define M_IESPI_PAR_ERROR    0x7
+
+#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+
+#define S_OCSPI_PAR_ERROR    0
+#define M_OCSPI_PAR_ERROR    0x7
+
+#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+
+#define A_PM1_RX_INT_CAUSE 0x5dc
+
+#define A_PM1_TX_CFG 0x5e0
+#define A_PM1_TX_MODE 0x5e4
+
+#define A_PM1_TX_INT_ENABLE 0x5f8
+
+#define S_ZERO_C_CMD_ERROR    18
+#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
+#define F_ZERO_C_CMD_ERROR    V_ZERO_C_CMD_ERROR(1U)
+
+#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR    17
+#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR    V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR    16
+#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR    V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_RX_FRAMING_ERROR    15
+#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
+#define F_ICSPI0_RX_FRAMING_ERROR    V_ICSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_RX_FRAMING_ERROR    14
+#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
+#define F_ICSPI1_RX_FRAMING_ERROR    V_ICSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_TX_FRAMING_ERROR    13
+#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
+#define F_ICSPI0_TX_FRAMING_ERROR    V_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_TX_FRAMING_ERROR    12
+#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
+#define F_ICSPI1_TX_FRAMING_ERROR    V_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_RX_FRAMING_ERROR    11
+#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
+#define F_OESPI0_RX_FRAMING_ERROR    V_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_RX_FRAMING_ERROR    10
+#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
+#define F_OESPI1_RX_FRAMING_ERROR    V_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_TX_FRAMING_ERROR    9
+#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
+#define F_OESPI0_TX_FRAMING_ERROR    V_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_TX_FRAMING_ERROR    8
+#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
+#define F_OESPI1_TX_FRAMING_ERROR    V_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR    7
+#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR    V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR    6
+#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR    V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI_PAR_ERROR    3
+#define M_ICSPI_PAR_ERROR    0x7
+
+#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+
+#define S_OESPI_PAR_ERROR    0
+#define M_OESPI_PAR_ERROR    0x7
+
+#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+
+#define A_PM1_TX_INT_CAUSE 0x5fc
+
+#define A_MPS_CFG 0x600
+
+#define S_TPRXPORTEN    4
+#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
+#define F_TPRXPORTEN    V_TPRXPORTEN(1U)
+
+#define S_TPTXPORT1EN    3
+#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
+#define F_TPTXPORT1EN    V_TPTXPORT1EN(1U)
+
+#define S_TPTXPORT0EN    2
+#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
+#define F_TPTXPORT0EN    V_TPTXPORT0EN(1U)
+
+#define S_PORT1ACTIVE    1
+#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
+#define F_PORT1ACTIVE    V_PORT1ACTIVE(1U)
+
+#define S_PORT0ACTIVE    0
+#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
+#define F_PORT0ACTIVE    V_PORT0ACTIVE(1U)
+
+#define S_ENFORCEPKT    11
+#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
+#define F_ENFORCEPKT    V_ENFORCEPKT(1U)
+
+#define A_MPS_INT_ENABLE 0x61c
+
+#define S_MCAPARERRENB    6
+#define M_MCAPARERRENB    0x7
+
+#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
+
+#define S_RXTPPARERRENB    4
+#define M_RXTPPARERRENB    0x3
+
+#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
+
+#define S_TX1TPPARERRENB    2
+#define M_TX1TPPARERRENB    0x3
+
+#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
+
+#define S_TX0TPPARERRENB    0
+#define M_TX0TPPARERRENB    0x3
+
+#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
+
+#define A_MPS_INT_CAUSE 0x620
+
+#define S_MCAPARERR    6
+#define M_MCAPARERR    0x7
+
+#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
+
+#define S_RXTPPARERR    4
+#define M_RXTPPARERR    0x3
+
+#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
+
+#define S_TX1TPPARERR    2
+#define M_TX1TPPARERR    0x3
+
+#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
+
+#define S_TX0TPPARERR    0
+#define M_TX0TPPARERR    0x3
+
+#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
+
+#define A_CPL_SWITCH_CNTRL 0x640
+
+#define A_CPL_INTR_ENABLE 0x650
+
+#define S_CIM_OP_MAP_PERR    5
+#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR)
+#define F_CIM_OP_MAP_PERR    V_CIM_OP_MAP_PERR(1U)
+
+#define S_CIM_OVFL_ERROR    4
+#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
+#define F_CIM_OVFL_ERROR    V_CIM_OVFL_ERROR(1U)
+
+#define S_TP_FRAMING_ERROR    3
+#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
+#define F_TP_FRAMING_ERROR    V_TP_FRAMING_ERROR(1U)
+
+#define S_SGE_FRAMING_ERROR    2
+#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
+#define F_SGE_FRAMING_ERROR    V_SGE_FRAMING_ERROR(1U)
+
+#define S_CIM_FRAMING_ERROR    1
+#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
+#define F_CIM_FRAMING_ERROR    V_CIM_FRAMING_ERROR(1U)
+
+#define S_ZERO_SWITCH_ERROR    0
+#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
+#define F_ZERO_SWITCH_ERROR    V_ZERO_SWITCH_ERROR(1U)
+
+#define A_CPL_INTR_CAUSE 0x654
+
+#define A_CPL_MAP_TBL_DATA 0x65c
+
+#define A_SMB_GLOBAL_TIME_CFG 0x660
+
+#define A_I2C_CFG 0x6a0
+
+#define S_I2C_CLKDIV    0
+#define M_I2C_CLKDIV    0xfff
+#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+
+#define A_MI1_CFG 0x6b0
+
+#define S_CLKDIV    5
+#define M_CLKDIV    0xff
+#define V_CLKDIV(x) ((x) << S_CLKDIV)
+
+#define S_ST    3
+
+#define M_ST    0x3
+
+#define V_ST(x) ((x) << S_ST)
+
+#define G_ST(x) (((x) >> S_ST) & M_ST)
+
+#define S_PREEN    2
+#define V_PREEN(x) ((x) << S_PREEN)
+#define F_PREEN    V_PREEN(1U)
+
+#define S_MDIINV    1
+#define V_MDIINV(x) ((x) << S_MDIINV)
+#define F_MDIINV    V_MDIINV(1U)
+
+#define S_MDIEN    0
+#define V_MDIEN(x) ((x) << S_MDIEN)
+#define F_MDIEN    V_MDIEN(1U)
+
+#define A_MI1_ADDR 0x6b4
+
+#define S_PHYADDR    5
+#define M_PHYADDR    0x1f
+#define V_PHYADDR(x) ((x) << S_PHYADDR)
+
+#define S_REGADDR    0
+#define M_REGADDR    0x1f
+#define V_REGADDR(x) ((x) << S_REGADDR)
+
+#define A_MI1_DATA 0x6b8
+
+#define A_MI1_OP 0x6bc
+
+#define S_MDI_OP    0
+#define M_MDI_OP    0x3
+#define V_MDI_OP(x) ((x) << S_MDI_OP)
+
+#define A_SF_DATA 0x6d8
+
+#define A_SF_OP 0x6dc
+
+#define S_BYTECNT    1
+#define M_BYTECNT    0x3
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+
+#define A_PL_INT_ENABLE0 0x6e0
+
+#define S_T3DBG    23
+#define V_T3DBG(x) ((x) << S_T3DBG)
+#define F_T3DBG    V_T3DBG(1U)
+
+#define S_XGMAC0_1    20
+#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
+#define F_XGMAC0_1    V_XGMAC0_1(1U)
+
+#define S_XGMAC0_0    19
+#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
+#define F_XGMAC0_0    V_XGMAC0_0(1U)
+
+#define S_MC5A    18
+#define V_MC5A(x) ((x) << S_MC5A)
+#define F_MC5A    V_MC5A(1U)
+
+#define S_CPL_SWITCH    12
+#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
+#define F_CPL_SWITCH    V_CPL_SWITCH(1U)
+
+#define S_MPS0    11
+#define V_MPS0(x) ((x) << S_MPS0)
+#define F_MPS0    V_MPS0(1U)
+
+#define S_PM1_TX    10
+#define V_PM1_TX(x) ((x) << S_PM1_TX)
+#define F_PM1_TX    V_PM1_TX(1U)
+
+#define S_PM1_RX    9
+#define V_PM1_RX(x) ((x) << S_PM1_RX)
+#define F_PM1_RX    V_PM1_RX(1U)
+
+#define S_ULP2_TX    8
+#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
+#define F_ULP2_TX    V_ULP2_TX(1U)
+
+#define S_ULP2_RX    7
+#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
+#define F_ULP2_RX    V_ULP2_RX(1U)
+
+#define S_TP1    6
+#define V_TP1(x) ((x) << S_TP1)
+#define F_TP1    V_TP1(1U)
+
+#define S_CIM    5
+#define V_CIM(x) ((x) << S_CIM)
+#define F_CIM    V_CIM(1U)
+
+#define S_MC7_CM    4
+#define V_MC7_CM(x) ((x) << S_MC7_CM)
+#define F_MC7_CM    V_MC7_CM(1U)
+
+#define S_MC7_PMTX    3
+#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
+#define F_MC7_PMTX    V_MC7_PMTX(1U)
+
+#define S_MC7_PMRX    2
+#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
+#define F_MC7_PMRX    V_MC7_PMRX(1U)
+
+#define S_PCIM0    1
+#define V_PCIM0(x) ((x) << S_PCIM0)
+#define F_PCIM0    V_PCIM0(1U)
+
+#define S_SGE3    0
+#define V_SGE3(x) ((x) << S_SGE3)
+#define F_SGE3    V_SGE3(1U)
+
+#define A_PL_INT_CAUSE0 0x6e4
+
+#define A_PL_RST 0x6f0
+
+#define S_FATALPERREN    4
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN    V_FATALPERREN(1U)
+
+#define S_CRSTWRM    1
+#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
+#define F_CRSTWRM    V_CRSTWRM(1U)
+
+#define A_PL_REV 0x6f4
+
+#define A_PL_CLI 0x6f8
+
+#define A_MC5_DB_CONFIG 0x704
+
+#define S_TMTYPEHI    30
+#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
+#define F_TMTYPEHI    V_TMTYPEHI(1U)
+
+#define S_TMPARTSIZE    28
+#define M_TMPARTSIZE    0x3
+#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
+#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
+
+#define S_TMTYPE    26
+#define M_TMTYPE    0x3
+#define V_TMTYPE(x) ((x) << S_TMTYPE)
+#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
+
+#define S_COMPEN    17
+#define V_COMPEN(x) ((x) << S_COMPEN)
+#define F_COMPEN    V_COMPEN(1U)
+
+#define S_PRTYEN    6
+#define V_PRTYEN(x) ((x) << S_PRTYEN)
+#define F_PRTYEN    V_PRTYEN(1U)
+
+#define S_MBUSEN    5
+#define V_MBUSEN(x) ((x) << S_MBUSEN)
+#define F_MBUSEN    V_MBUSEN(1U)
+
+#define S_DBGIEN    4
+#define V_DBGIEN(x) ((x) << S_DBGIEN)
+#define F_DBGIEN    V_DBGIEN(1U)
+
+#define S_TMRDY    2
+#define V_TMRDY(x) ((x) << S_TMRDY)
+#define F_TMRDY    V_TMRDY(1U)
+
+#define S_TMRST    1
+#define V_TMRST(x) ((x) << S_TMRST)
+#define F_TMRST    V_TMRST(1U)
+
+#define S_TMMODE    0
+#define V_TMMODE(x) ((x) << S_TMMODE)
+#define F_TMMODE    V_TMMODE(1U)
+
+#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
+
+#define A_MC5_DB_FILTER_TABLE 0x710
+
+#define A_MC5_DB_SERVER_INDEX 0x714
+
+#define A_MC5_DB_RSP_LATENCY 0x720
+
+#define S_RDLAT    16
+#define M_RDLAT    0x1f
+#define V_RDLAT(x) ((x) << S_RDLAT)
+
+#define S_LRNLAT    8
+#define M_LRNLAT    0x1f
+#define V_LRNLAT(x) ((x) << S_LRNLAT)
+
+#define S_SRCHLAT    0
+#define M_SRCHLAT    0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+
+#define A_MC5_DB_PART_ID_INDEX 0x72c
+
+#define A_MC5_DB_INT_ENABLE 0x740
+
+#define S_DELACTEMPTY    18
+#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
+#define F_DELACTEMPTY    V_DELACTEMPTY(1U)
+
+#define S_DISPQPARERR    17
+#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
+#define F_DISPQPARERR    V_DISPQPARERR(1U)
+
+#define S_REQQPARERR    16
+#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
+#define F_REQQPARERR    V_REQQPARERR(1U)
+
+#define S_UNKNOWNCMD    15
+#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
+#define F_UNKNOWNCMD    V_UNKNOWNCMD(1U)
+
+#define S_NFASRCHFAIL    8
+#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
+#define F_NFASRCHFAIL    V_NFASRCHFAIL(1U)
+
+#define S_ACTRGNFULL    7
+#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
+#define F_ACTRGNFULL    V_ACTRGNFULL(1U)
+
+#define S_PARITYERR    6
+#define V_PARITYERR(x) ((x) << S_PARITYERR)
+#define F_PARITYERR    V_PARITYERR(1U)
+
+#define A_MC5_DB_INT_CAUSE 0x744
+
+#define A_MC5_DB_DBGI_CONFIG 0x774
+
+#define A_MC5_DB_DBGI_REQ_CMD 0x778
+
+#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+
+#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
+
+#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
+
+#define A_MC5_DB_DBGI_REQ_DATA0 0x788
+
+#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
+
+#define A_MC5_DB_DBGI_REQ_DATA2 0x790
+
+#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
+
+#define S_DBGIRSPVALID    0
+#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
+#define F_DBGIRSPVALID    V_DBGIRSPVALID(1U)
+
+#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
+
+#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
+
+#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
+
+#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
+
+#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
+
+#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
+
+#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
+
+#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
+
+#define A_MC5_DB_SYN_LRN_CMD 0x7e0
+
+#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
+
+#define A_MC5_DB_ACK_LRN_CMD 0x7e8
+
+#define A_MC5_DB_ILOOKUP_CMD 0x7ec
+
+#define A_MC5_DB_ELOOKUP_CMD 0x7f0
+
+#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
+
+#define A_MC5_DB_DATA_READ_CMD 0x7f8
+
+#define XGMAC0_0_BASE_ADDR 0x800
+
+#define A_XGM_TX_CTRL 0x800
+
+#define S_TXEN    0
+#define V_TXEN(x) ((x) << S_TXEN)
+#define F_TXEN    V_TXEN(1U)
+
+#define A_XGM_TX_CFG 0x804
+
+#define S_TXPAUSEEN    0
+#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
+#define F_TXPAUSEEN    V_TXPAUSEEN(1U)
+
+#define A_XGM_TX_PAUSE_QUANTA 0x808
+
+#define A_XGM_RX_CTRL 0x80c
+
+#define S_RXEN    0
+#define V_RXEN(x) ((x) << S_RXEN)
+#define F_RXEN    V_RXEN(1U)
+
+#define A_XGM_RX_CFG 0x810
+
+#define S_DISPAUSEFRAMES    9
+#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
+#define F_DISPAUSEFRAMES    V_DISPAUSEFRAMES(1U)
+
+#define S_EN1536BFRAMES    8
+#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
+#define F_EN1536BFRAMES    V_EN1536BFRAMES(1U)
+
+#define S_ENJUMBO    7
+#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
+#define F_ENJUMBO    V_ENJUMBO(1U)
+
+#define S_RMFCS    6
+#define V_RMFCS(x) ((x) << S_RMFCS)
+#define F_RMFCS    V_RMFCS(1U)
+
+#define S_ENHASHMCAST    2
+#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
+#define F_ENHASHMCAST    V_ENHASHMCAST(1U)
+
+#define S_COPYALLFRAMES    0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES    V_COPYALLFRAMES(1U)
+
+#define S_DISBCAST    1
+#define V_DISBCAST(x) ((x) << S_DISBCAST)
+#define F_DISBCAST    V_DISBCAST(1U)
+
+#define A_XGM_RX_HASH_LOW 0x814
+
+#define A_XGM_RX_HASH_HIGH 0x818
+
+#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
+
+#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
+
+#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+
+#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
+
+#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
+
+#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
+
+#define A_XGM_INT_STATUS 0x86c
+
+#define S_LINKFAULTCHANGE    9
+#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
+#define F_LINKFAULTCHANGE    V_LINKFAULTCHANGE(1U)
+
+#define A_XGM_XGM_INT_ENABLE 0x874
+#define A_XGM_XGM_INT_DISABLE 0x878
+
+#define A_XGM_STAT_CTRL 0x880
+
+#define S_CLRSTATS    2
+#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
+#define F_CLRSTATS    V_CLRSTATS(1U)
+
+#define A_XGM_RXFIFO_CFG 0x884
+
+#define S_RXFIFO_EMPTY    31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY    V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFOPAUSEHWM    17
+#define M_RXFIFOPAUSEHWM    0xfff
+
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM    5
+#define M_RXFIFOPAUSELWM    0xfff
+
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_RXSTRFRWRD    1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD    V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES    0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES    V_DISERRFRAMES(1U)
+
+#define A_XGM_TXFIFO_CFG 0x888
+
+#define S_UNDERUNFIX    22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX    V_UNDERUNFIX(1U)
+
+#define S_TXIPG    13
+#define M_TXIPG    0xff
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH    4
+#define M_TXFIFOTHRESH    0x1ff
+
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+
+#define S_ENDROPPKT    21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT    V_ENDROPPKT(1U)
+
+#define A_XGM_SERDES_CTRL 0x890
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_SERDESRESET_    24
+#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
+#define F_SERDESRESET_    V_SERDESRESET_(1U)
+
+#define S_RXENABLE    4
+#define V_RXENABLE(x) ((x) << S_RXENABLE)
+#define F_RXENABLE    V_RXENABLE(1U)
+
+#define S_TXENABLE    3
+#define V_TXENABLE(x) ((x) << S_TXENABLE)
+#define F_TXENABLE    V_TXENABLE(1U)
+
+#define A_XGM_PAUSE_TIMER 0x890
+
+#define A_XGM_RGMII_IMP 0x89c
+
+#define S_XGM_IMPSETUPDATE    6
+#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
+#define F_XGM_IMPSETUPDATE    V_XGM_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD    3
+#define M_RGMIIIMPPD    0x7
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU    0
+#define M_RGMIIIMPPU    0x7
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+
+#define S_CALRESET    8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET    V_CALRESET(1U)
+
+#define S_CALUPDATE    7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE    V_CALUPDATE(1U)
+
+#define A_XGM_XAUI_IMP 0x8a0
+
+#define S_CALBUSY    31
+#define V_CALBUSY(x) ((x) << S_CALBUSY)
+#define F_CALBUSY    V_CALBUSY(1U)
+
+#define S_XGM_CALFAULT    29
+#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
+#define F_XGM_CALFAULT    V_XGM_CALFAULT(1U)
+
+#define S_CALIMP    24
+#define M_CALIMP    0x1f
+#define V_CALIMP(x) ((x) << S_CALIMP)
+#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
+
+#define S_XAUIIMP    0
+#define M_XAUIIMP    0x7
+#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
+
+#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
+
+#define S_RXMAXFRAMERSIZE    17
+#define M_RXMAXFRAMERSIZE    0x3fff
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENFRAMER    14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER    V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE    0
+#define M_RXMAXPKTSIZE    0x3fff
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_XGM_RESET_CTRL 0x8ac
+
+#define S_XGMAC_STOP_EN    4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN    V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_    3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_    V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_    2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_    V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_    1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_    V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_    0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_    V_MAC_RESET_(1U)
+
+#define A_XGM_PORT_CFG 0x8b8
+
+#define S_CLKDIVRESET_    3
+#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
+#define F_CLKDIVRESET_    V_CLKDIVRESET_(1U)
+
+#define S_PORTSPEED    1
+#define M_PORTSPEED    0x3
+
+#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+
+#define S_ENRGMII    0
+#define V_ENRGMII(x) ((x) << S_ENRGMII)
+#define F_ENRGMII    V_ENRGMII(1U)
+
+#define A_XGM_INT_ENABLE 0x8d4
+
+#define S_TXFIFO_PRTY_ERR    17
+#define M_TXFIFO_PRTY_ERR    0x7
+
+#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+
+#define S_RXFIFO_PRTY_ERR    14
+#define M_RXFIFO_PRTY_ERR    0x7
+
+#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN    13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN    V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW    12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW    V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDES_LOS    4
+#define M_SERDES_LOS    0xf
+
+#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+
+#define S_XAUIPCSCTCERR    3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR    V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE    2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE    V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_XGM_INT    0
+#define V_XGM_INT(x) ((x) << S_XGM_INT)
+#define F_XGM_INT    V_XGM_INT(1U)
+
+#define A_XGM_INT_CAUSE 0x8d8
+
+#define A_XGM_XAUI_ACT_CTRL 0x8dc
+
+#define S_TXACTENABLE    1
+#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
+#define F_TXACTENABLE    V_TXACTENABLE(1U)
+
+#define S_RESET3    23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3    V_RESET3(1U)
+
+#define S_RESET2    22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2    V_RESET2(1U)
+
+#define S_RESET1    21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1    V_RESET1(1U)
+
+#define S_RESET0    20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0    V_RESET0(1U)
+
+#define S_PWRDN3    19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3    V_PWRDN3(1U)
+
+#define S_PWRDN2    18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2    V_PWRDN2(1U)
+
+#define S_PWRDN1    17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1    V_PWRDN1(1U)
+
+#define S_PWRDN0    16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0    V_PWRDN0(1U)
+
+#define S_RESETPLL23    15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23    V_RESETPLL23(1U)
+
+#define S_RESETPLL01    14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01    V_RESETPLL01(1U)
+
+#define A_XGM_SERDES_STAT0 0x8f0
+#define A_XGM_SERDES_STAT1 0x8f4
+#define A_XGM_SERDES_STAT2 0x8f8
+
+#define S_LOWSIG0    0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0    V_LOWSIG0(1U)
+
+#define A_XGM_SERDES_STAT3 0x8fc
+
+#define A_XGM_STAT_TX_BYTE_LOW 0x900
+
+#define A_XGM_STAT_TX_BYTE_HIGH 0x904
+
+#define A_XGM_STAT_TX_FRAME_LOW 0x908
+
+#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
+
+#define A_XGM_STAT_TX_BCAST 0x910
+
+#define A_XGM_STAT_TX_MCAST 0x914
+
+#define A_XGM_STAT_TX_PAUSE 0x918
+
+#define A_XGM_STAT_TX_64B_FRAMES 0x91c
+
+#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
+
+#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
+
+#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
+
+#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
+
+#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
+
+#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
+
+#define A_XGM_STAT_TX_ERR_FRAMES 0x938
+
+#define A_XGM_STAT_RX_BYTES_LOW 0x93c
+
+#define A_XGM_STAT_RX_BYTES_HIGH 0x940
+
+#define A_XGM_STAT_RX_FRAMES_LOW 0x944
+
+#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
+
+#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
+
+#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
+
+#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
+
+#define A_XGM_STAT_RX_64B_FRAMES 0x958
+
+#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
+
+#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
+
+#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
+
+#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
+
+#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
+
+#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
+
+#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
+
+#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
+
+#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
+
+#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
+
+#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
+
+#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
+
+#define A_XGM_SERDES_STATUS0 0x98c
+
+#define A_XGM_SERDES_STATUS1 0x990
+
+#define S_CMULOCK    31
+#define V_CMULOCK(x) ((x) << S_CMULOCK)
+#define F_CMULOCK    V_CMULOCK(1U)
+
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+
+#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
+
+#define S_TXSPI4SOPCNT    16
+#define M_TXSPI4SOPCNT    0xffff
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
+
+#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
new file mode 100644
index 0000000..e4b5b05
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -0,0 +1,3305 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <net/arp.h>
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+#define USE_GTS 0
+
+#define SGE_RX_SM_BUF_SIZE 1536
+
+#define SGE_RX_COPY_THRES  256
+#define SGE_RX_PULL_LEN    128
+
+#define SGE_PG_RSVD SMP_CACHE_BYTES
+/*
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
+ * directly.
+ */
+#define FL0_PG_CHUNK_SIZE  2048
+#define FL0_PG_ORDER 0
+#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
+#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
+#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
+
+#define SGE_RX_DROP_THRES 16
+#define RX_RECLAIM_PERIOD (HZ/4)
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 16U
+/*
+ * Period of the Tx buffer reclaim timer.  This timer does not need to run
+ * frequently as Tx buffers are usually reclaimed by new Tx packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+#define TX_RECLAIM_TIMER_CHUNK 64U
+#define TX_RECLAIM_CHUNK 16U
+
+/* WR size in bytes */
+#define WR_LEN (WR_FLITS * 8)
+
+/*
+ * Types of Tx queues in each queue set.  Order here matters, do not change.
+ */
+enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
+
+/* Values for sge_txq.flags */
+enum {
+	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
+	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
+};
+
+struct tx_desc {
+	__be64 flit[TX_DESC_FLITS];
+};
+
+struct rx_desc {
+	__be32 addr_lo;
+	__be32 len_gen;
+	__be32 gen2;
+	__be32 addr_hi;
+};
+
+struct tx_sw_desc {		/* SW state per Tx descriptor */
+	struct sk_buff *skb;
+	u8 eop;       /* set if last descriptor for packet */
+	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
+	u8 fragidx;   /* first page fragment associated with descriptor */
+	s8 sflit;     /* start flit of first SGL entry in descriptor */
+};
+
+struct rx_sw_desc {                /* SW state per Rx descriptor */
+	union {
+		struct sk_buff *skb;
+		struct fl_pg_chunk pg_chunk;
+	};
+	DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct rsp_desc {		/* response queue descriptor */
+	struct rss_header rss_hdr;
+	__be32 flags;
+	__be32 len_cq;
+	u8 imm_data[47];
+	u8 intr_gen;
+};
+
+/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+	struct pci_dev *pdev;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/*
+ * Maps a number of flits to the number of Tx descriptors that can hold them.
+ * The formula is
+ *
+ * desc = 1 + (flits - 2) / (WR_FLITS - 1).
+ *
+ * HW allows up to 4 descriptors to be combined into a WR.
+ */
+static u8 flit_desc_map[] = {
+	0,
+#if SGE_NUM_GENBITS == 1
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+#elif SGE_NUM_GENBITS == 2
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+#else
+# error "SGE_NUM_GENBITS must be 1 or 2"
+#endif
+};
+
+static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
+{
+	return container_of(q, struct sge_qset, fl[qidx]);
+}
+
+static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
+{
+	return container_of(q, struct sge_qset, rspq);
+}
+
+static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
+{
+	return container_of(q, struct sge_qset, txq[qidx]);
+}
+
+/**
+ *	refill_rspq - replenish an SGE response queue
+ *	@adapter: the adapter
+ *	@q: the response queue to replenish
+ *	@credits: how many new responses to make available
+ *
+ *	Replenishes a response queue by making the supplied number of responses
+ *	available to HW.
+ */
+static inline void refill_rspq(struct adapter *adapter,
+			       const struct sge_rspq *q, unsigned int credits)
+{
+	rmb();
+	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
+		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
+}
+
+/**
+ *	need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ *	Returns true if the platform needs sk_buff unmapping.  The compiler
+ *	optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+	return 1;
+#else
+	return 0;
+#endif
+}
+
+/**
+ *	unmap_skb - unmap a packet main body and its page fragments
+ *	@skb: the packet
+ *	@q: the Tx queue containing Tx descriptors for the packet
+ *	@cidx: index of Tx descriptor
+ *	@pdev: the PCI device
+ *
+ *	Unmap the main body of an sk_buff and its page fragments, if any.
+ *	Because of the fairly complicated structure of our SGLs and the desire
+ *	to conserve space for metadata, the information necessary to unmap an
+ *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ *	descriptors (the physical addresses of the various data buffers), and
+ *	the SW descriptor state (assorted indices).  The send functions
+ *	initialize the indices for the first packet descriptor so we can unmap
+ *	the buffers held in the first Tx descriptor here, and we have enough
+ *	information at this point to set the state for the next Tx descriptor.
+ *
+ *	Note that it is possible to clean up the first descriptor of a packet
+ *	before the send routines have written the next descriptors, but this
+ *	race does not cause any problem.  We just end up writing the unmapping
+ *	info for the descriptor first.
+ */
+static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
+			     unsigned int cidx, struct pci_dev *pdev)
+{
+	const struct sg_ent *sgp;
+	struct tx_sw_desc *d = &q->sdesc[cidx];
+	int nfrags, frag_idx, curflit, j = d->addr_idx;
+
+	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+	frag_idx = d->fragidx;
+
+	if (frag_idx == 0 && skb_headlen(skb)) {
+		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
+				 skb_headlen(skb), PCI_DMA_TODEVICE);
+		j = 1;
+	}
+
+	curflit = d->sflit + 1 + j;
+	nfrags = skb_shinfo(skb)->nr_frags;
+
+	while (frag_idx < nfrags && curflit < WR_FLITS) {
+		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
+			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
+			       PCI_DMA_TODEVICE);
+		j ^= 1;
+		if (j == 0) {
+			sgp++;
+			curflit++;
+		}
+		curflit++;
+		frag_idx++;
+	}
+
+	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
+		d = cidx + 1 == q->size ? q->sdesc : d + 1;
+		d->fragidx = frag_idx;
+		d->addr_idx = j;
+		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
+	}
+}
+
+/**
+ *	free_tx_desc - reclaims Tx descriptors and their buffers
+ *	@adapter: the adapter
+ *	@q: the Tx queue to reclaim descriptors from
+ *	@n: the number of descriptors to reclaim
+ *
+ *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ *	Tx buffers.  Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
+			 unsigned int n)
+{
+	struct tx_sw_desc *d;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned int cidx = q->cidx;
+
+	const int need_unmap = need_skb_unmap() &&
+			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
+	d = &q->sdesc[cidx];
+	while (n--) {
+		if (d->skb) {	/* an SGL is present */
+			if (need_unmap)
+				unmap_skb(d->skb, q, cidx, pdev);
+			if (d->eop) {
+				dev_consume_skb_any(d->skb);
+				d->skb = NULL;
+			}
+		}
+		++d;
+		if (++cidx == q->size) {
+			cidx = 0;
+			d = q->sdesc;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/**
+ *	reclaim_completed_tx - reclaims completed Tx descriptors
+ *	@adapter: the adapter
+ *	@q: the Tx queue to reclaim completed descriptors from
+ *	@chunk: maximum number of descriptors to reclaim
+ *
+ *	Reclaims Tx descriptors that the SGE has indicated it has processed,
+ *	and frees the associated buffers if possible.  Called with the Tx
+ *	queue's lock held.
+ */
+static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
+						struct sge_txq *q,
+						unsigned int chunk)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	reclaim = min(chunk, reclaim);
+	if (reclaim) {
+		free_tx_desc(adapter, q, reclaim);
+		q->cleaned += reclaim;
+		q->in_use -= reclaim;
+	}
+	return q->processed - q->cleaned;
+}
+
+/**
+ *	should_restart_tx - are there enough resources to restart a Tx queue?
+ *	@q: the Tx queue
+ *
+ *	Checks if there are enough descriptors to restart a suspended Tx queue.
+ */
+static inline int should_restart_tx(const struct sge_txq *q)
+{
+	unsigned int r = q->processed - q->cleaned;
+
+	return q->in_use - r < (q->size >> 1);
+}
+
+static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
+			  struct rx_sw_desc *d)
+{
+	if (q->use_pages && d->pg_chunk.page) {
+		(*d->pg_chunk.p_cnt)--;
+		if (!*d->pg_chunk.p_cnt)
+			pci_unmap_page(pdev,
+				       d->pg_chunk.mapping,
+				       q->alloc_size, PCI_DMA_FROMDEVICE);
+
+		put_page(d->pg_chunk.page);
+		d->pg_chunk.page = NULL;
+	} else {
+		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
+				 q->buf_size, PCI_DMA_FROMDEVICE);
+		kfree_skb(d->skb);
+		d->skb = NULL;
+	}
+}
+
+/**
+ *	free_rx_bufs - free the Rx buffers on an SGE free list
+ *	@pdev: the PCI device associated with the adapter
+ *	@rxq: the SGE free list to clean up
+ *
+ *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
+ *	this queue should be stopped before calling this function.
+ */
+static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
+{
+	unsigned int cidx = q->cidx;
+
+	while (q->credits--) {
+		struct rx_sw_desc *d = &q->sdesc[cidx];
+
+
+		clear_rx_desc(pdev, q, d);
+		if (++cidx == q->size)
+			cidx = 0;
+	}
+
+	if (q->pg_chunk.page) {
+		__free_pages(q->pg_chunk.page, q->order);
+		q->pg_chunk.page = NULL;
+	}
+}
+
+/**
+ *	add_one_rx_buf - add a packet buffer to a free-buffer list
+ *	@va:  buffer start VA
+ *	@len: the buffer length
+ *	@d: the HW Rx descriptor to write
+ *	@sd: the SW Rx descriptor to write
+ *	@gen: the generation bit value
+ *	@pdev: the PCI device associated with the adapter
+ *
+ *	Add a buffer of the given length to the supplied HW and SW Rx
+ *	descriptors.
+ */
+static inline int add_one_rx_buf(void *va, unsigned int len,
+				 struct rx_desc *d, struct rx_sw_desc *sd,
+				 unsigned int gen, struct pci_dev *pdev)
+{
+	dma_addr_t mapping;
+
+	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
+		return -ENOMEM;
+
+	dma_unmap_addr_set(sd, dma_addr, mapping);
+
+	d->addr_lo = cpu_to_be32(mapping);
+	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+	dma_wmb();
+	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+	return 0;
+}
+
+static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
+				   unsigned int gen)
+{
+	d->addr_lo = cpu_to_be32(mapping);
+	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+	dma_wmb();
+	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+	return 0;
+}
+
+static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
+			  struct rx_sw_desc *sd, gfp_t gfp,
+			  unsigned int order)
+{
+	if (!q->pg_chunk.page) {
+		dma_addr_t mapping;
+
+		q->pg_chunk.page = alloc_pages(gfp, order);
+		if (unlikely(!q->pg_chunk.page))
+			return -ENOMEM;
+		q->pg_chunk.va = page_address(q->pg_chunk.page);
+		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
+				    SGE_PG_RSVD;
+		q->pg_chunk.offset = 0;
+		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
+				       0, q->alloc_size, PCI_DMA_FROMDEVICE);
+		q->pg_chunk.mapping = mapping;
+	}
+	sd->pg_chunk = q->pg_chunk;
+
+	prefetch(sd->pg_chunk.p_cnt);
+
+	q->pg_chunk.offset += q->buf_size;
+	if (q->pg_chunk.offset == (PAGE_SIZE << order))
+		q->pg_chunk.page = NULL;
+	else {
+		q->pg_chunk.va += q->buf_size;
+		get_page(q->pg_chunk.page);
+	}
+
+	if (sd->pg_chunk.offset == 0)
+		*sd->pg_chunk.p_cnt = 1;
+	else
+		*sd->pg_chunk.p_cnt += 1;
+
+	return 0;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+	if (q->pend_cred >= q->credits / 4) {
+		q->pend_cred = 0;
+		wmb();
+		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+	}
+}
+
+/**
+ *	refill_fl - refill an SGE free-buffer list
+ *	@adapter: the adapter
+ *	@q: the free-list to refill
+ *	@n: the number of new buffers to allocate
+ *	@gfp: the gfp flags for allocating new buffers
+ *
+ *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
+ *	allocated with the supplied gfp flags.  The caller must assure that
+ *	@n does not exceed the queue's capacity.
+ */
+static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+{
+	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+	struct rx_desc *d = &q->desc[q->pidx];
+	unsigned int count = 0;
+
+	while (n--) {
+		dma_addr_t mapping;
+		int err;
+
+		if (q->use_pages) {
+			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
+						    q->order))) {
+nomem:				q->alloc_failed++;
+				break;
+			}
+			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
+			dma_unmap_addr_set(sd, dma_addr, mapping);
+
+			add_one_rx_chunk(mapping, d, q->gen);
+			pci_dma_sync_single_for_device(adap->pdev, mapping,
+						q->buf_size - SGE_PG_RSVD,
+						PCI_DMA_FROMDEVICE);
+		} else {
+			void *buf_start;
+
+			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+			if (!skb)
+				goto nomem;
+
+			sd->skb = skb;
+			buf_start = skb->data;
+			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
+					     q->gen, adap->pdev);
+			if (unlikely(err)) {
+				clear_rx_desc(adap->pdev, q, sd);
+				break;
+			}
+		}
+
+		d++;
+		sd++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			q->gen ^= 1;
+			sd = q->sdesc;
+			d = q->desc;
+		}
+		count++;
+	}
+
+	q->credits += count;
+	q->pend_cred += count;
+	ring_fl_db(adap, q);
+
+	return count;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
+		  GFP_ATOMIC | __GFP_COMP);
+}
+
+/**
+ *	recycle_rx_buf - recycle a receive buffer
+ *	@adapter: the adapter
+ *	@q: the SGE free list
+ *	@idx: index of buffer to recycle
+ *
+ *	Recycles the specified buffer on the given free list by adding it at
+ *	the next available slot on the list.
+ */
+static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
+			   unsigned int idx)
+{
+	struct rx_desc *from = &q->desc[idx];
+	struct rx_desc *to = &q->desc[q->pidx];
+
+	q->sdesc[q->pidx] = q->sdesc[idx];
+	to->addr_lo = from->addr_lo;	/* already big endian */
+	to->addr_hi = from->addr_hi;	/* likewise */
+	dma_wmb();
+	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
+	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
+
+	if (++q->pidx == q->size) {
+		q->pidx = 0;
+		q->gen ^= 1;
+	}
+
+	q->credits++;
+	q->pend_cred++;
+	ring_fl_db(adap, q);
+}
+
+/**
+ *	alloc_ring - allocate resources for an SGE descriptor ring
+ *	@pdev: the PCI device
+ *	@nelem: the number of descriptors
+ *	@elem_size: the size of each descriptor
+ *	@sw_size: the size of the SW state associated with each ring element
+ *	@phys: the physical address of the allocated ring
+ *	@metadata: address of the array holding the SW state for the ring
+ *
+ *	Allocates resources for an SGE descriptor ring, such as Tx queues,
+ *	free buffer lists, or response queues.  Each SGE ring requires
+ *	space for its HW descriptors plus, optionally, space for the SW state
+ *	associated with each HW entry (the metadata).  The function returns
+ *	three values: the virtual address for the HW ring (the return value
+ *	of the function), the physical address of the HW ring, and the address
+ *	of the SW ring.
+ */
+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
+			size_t sw_size, dma_addr_t * phys, void *metadata)
+{
+	size_t len = nelem * elem_size;
+	void *s = NULL;
+	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+
+	if (!p)
+		return NULL;
+	if (sw_size && metadata) {
+		s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+		if (!s) {
+			dma_free_coherent(&pdev->dev, len, p, *phys);
+			return NULL;
+		}
+		*(void **)metadata = s;
+	}
+	memset(p, 0, len);
+	return p;
+}
+
+/**
+ *	t3_reset_qset - reset a sge qset
+ *	@q: the queue set
+ *
+ *	Reset the qset structure.
+ *	the NAPI structure is preserved in the event of
+ *	the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+	if (q->adap &&
+	    !(q->adap->flags & NAPI_INIT)) {
+		memset(q, 0, sizeof(*q));
+		return;
+	}
+
+	q->adap = NULL;
+	memset(&q->rspq, 0, sizeof(q->rspq));
+	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+	q->txq_stopped = 0;
+	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
+	q->rx_reclaim_timer.function = NULL;
+	q->nomem = 0;
+	napi_free_frags(&q->napi);
+}
+
+
+/**
+ *	free_qset - free the resources of an SGE queue set
+ *	@adapter: the adapter owning the queue set
+ *	@q: the queue set
+ *
+ *	Release the HW and SW resources associated with an SGE queue set, such
+ *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
+ *	queue set must be quiesced prior to calling this.
+ */
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+{
+	int i;
+	struct pci_dev *pdev = adapter->pdev;
+
+	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
+		if (q->fl[i].desc) {
+			spin_lock_irq(&adapter->sge.reg_lock);
+			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
+			spin_unlock_irq(&adapter->sge.reg_lock);
+			free_rx_bufs(pdev, &q->fl[i]);
+			kfree(q->fl[i].sdesc);
+			dma_free_coherent(&pdev->dev,
+					  q->fl[i].size *
+					  sizeof(struct rx_desc), q->fl[i].desc,
+					  q->fl[i].phys_addr);
+		}
+
+	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
+		if (q->txq[i].desc) {
+			spin_lock_irq(&adapter->sge.reg_lock);
+			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
+			spin_unlock_irq(&adapter->sge.reg_lock);
+			if (q->txq[i].sdesc) {
+				free_tx_desc(adapter, &q->txq[i],
+					     q->txq[i].in_use);
+				kfree(q->txq[i].sdesc);
+			}
+			dma_free_coherent(&pdev->dev,
+					  q->txq[i].size *
+					  sizeof(struct tx_desc),
+					  q->txq[i].desc, q->txq[i].phys_addr);
+			__skb_queue_purge(&q->txq[i].sendq);
+		}
+
+	if (q->rspq.desc) {
+		spin_lock_irq(&adapter->sge.reg_lock);
+		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
+		spin_unlock_irq(&adapter->sge.reg_lock);
+		dma_free_coherent(&pdev->dev,
+				  q->rspq.size * sizeof(struct rsp_desc),
+				  q->rspq.desc, q->rspq.phys_addr);
+	}
+
+	t3_reset_qset(q);
+}
+
+/**
+ *	init_qset_cntxt - initialize an SGE queue set context info
+ *	@qs: the queue set
+ *	@id: the queue set id
+ *
+ *	Initializes the TIDs and context ids for the queues of a queue set.
+ */
+static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
+{
+	qs->rspq.cntxt_id = id;
+	qs->fl[0].cntxt_id = 2 * id;
+	qs->fl[1].cntxt_id = 2 * id + 1;
+	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
+	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
+	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
+	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
+	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
+}
+
+/**
+ *	sgl_len - calculates the size of an SGL of the given capacity
+ *	@n: the number of SGL entries
+ *
+ *	Calculates the number of flits needed for a scatter/gather list that
+ *	can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
+	return (3 * n) / 2 + (n & 1);
+}
+
+/**
+ *	flits_to_desc - returns the num of Tx descriptors for the given flits
+ *	@n: the number of flits
+ *
+ *	Calculates the number of Tx descriptors needed for the supplied number
+ *	of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
+	return flit_desc_map[n];
+}
+
+/**
+ *	get_packet - return the next ingress packet buffer from a free list
+ *	@adap: the adapter that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the packet length including any SGE padding
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list and complete setup of the
+ *	sk_buff.  If the packet is small we make a copy and recycle the
+ *	original buffer, otherwise we use the original buffer itself.  If a
+ *	positive drop threshold is supplied packets are dropped and their
+ *	buffers recycled if (a) the number of remaining buffers is under the
+ *	threshold and the packet is too big to copy, or (b) the packet should
+ *	be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+				  unsigned int len, unsigned int drop_thres)
+{
+	struct sk_buff *skb = NULL;
+	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+	prefetch(sd->skb->data);
+	fl->credits--;
+
+	if (len <= SGE_RX_COPY_THRES) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (likely(skb != NULL)) {
+			__skb_put(skb, len);
+			pci_dma_sync_single_for_cpu(adap->pdev,
+					    dma_unmap_addr(sd, dma_addr), len,
+					    PCI_DMA_FROMDEVICE);
+			memcpy(skb->data, sd->skb->data, len);
+			pci_dma_sync_single_for_device(adap->pdev,
+					    dma_unmap_addr(sd, dma_addr), len,
+					    PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			goto use_orig_buf;
+recycle:
+		recycle_rx_buf(adap, fl, fl->cidx);
+		return skb;
+	}
+
+	if (unlikely(fl->credits < drop_thres) &&
+	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
+		      GFP_ATOMIC | __GFP_COMP) == 0)
+		goto recycle;
+
+use_orig_buf:
+	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
+			 fl->buf_size, PCI_DMA_FROMDEVICE);
+	skb = sd->skb;
+	skb_put(skb, len);
+	__refill_fl(adap, fl);
+	return skb;
+}
+
+/**
+ *	get_packet_pg - return the next ingress packet buffer from a free list
+ *	@adap: the adapter that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the packet length including any SGE padding
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list populated with page chunks.
+ *	If the packet is small we make a copy and recycle the original buffer,
+ *	otherwise we attach the original buffer as a page fragment to a fresh
+ *	sk_buff.  If a positive drop threshold is supplied packets are dropped
+ *	and their buffers recycled if (a) the number of remaining buffers is
+ *	under the threshold and the packet is too big to copy, or (b) there's
+ *	no system memory.
+ *
+ * 	Note: this function is similar to @get_packet but deals with Rx buffers
+ * 	that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+				     struct sge_rspq *q, unsigned int len,
+				     unsigned int drop_thres)
+{
+	struct sk_buff *newskb, *skb;
+	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
+
+	newskb = skb = q->pg_skb;
+	if (!skb && (len <= SGE_RX_COPY_THRES)) {
+		newskb = alloc_skb(len, GFP_ATOMIC);
+		if (likely(newskb != NULL)) {
+			__skb_put(newskb, len);
+			pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+					    PCI_DMA_FROMDEVICE);
+			memcpy(newskb->data, sd->pg_chunk.va, len);
+			pci_dma_sync_single_for_device(adap->pdev, dma_addr,
+						       len,
+						       PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			return NULL;
+recycle:
+		fl->credits--;
+		recycle_rx_buf(adap, fl, fl->cidx);
+		q->rx_recycle_buf++;
+		return newskb;
+	}
+
+	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
+		goto recycle;
+
+	prefetch(sd->pg_chunk.p_cnt);
+
+	if (!skb)
+		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+
+	if (unlikely(!newskb)) {
+		if (!drop_thres)
+			return NULL;
+		goto recycle;
+	}
+
+	pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+				    PCI_DMA_FROMDEVICE);
+	(*sd->pg_chunk.p_cnt)--;
+	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+		pci_unmap_page(adap->pdev,
+			       sd->pg_chunk.mapping,
+			       fl->alloc_size,
+			       PCI_DMA_FROMDEVICE);
+	if (!skb) {
+		__skb_put(newskb, SGE_RX_PULL_LEN);
+		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
+				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+				   len - SGE_RX_PULL_LEN);
+		newskb->len = len;
+		newskb->data_len = len - SGE_RX_PULL_LEN;
+		newskb->truesize += newskb->data_len;
+	} else {
+		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
+				   sd->pg_chunk.page,
+				   sd->pg_chunk.offset, len);
+		newskb->len += len;
+		newskb->data_len += len;
+		newskb->truesize += len;
+	}
+
+	fl->credits--;
+	/*
+	 * We do not refill FLs here, we let the caller do it to overlap a
+	 * prefetch.
+	 */
+	return newskb;
+}
+
+/**
+ *	get_imm_packet - return the next ingress packet buffer from a response
+ *	@resp: the response descriptor containing the packet data
+ *
+ *	Return a packet containing the immediate data of the given response.
+ */
+static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
+{
+	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
+
+	if (skb) {
+		__skb_put(skb, IMMED_PKT_SIZE);
+		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+	}
+	return skb;
+}
+
+/**
+ *	calc_tx_descs - calculate the number of Tx descriptors for a packet
+ *	@skb: the packet
+ *
+ * 	Returns the number of Tx descriptors needed for the given Ethernet
+ * 	packet.  Ethernet packets require addition of WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+	unsigned int flits;
+
+	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
+		return 1;
+
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
+	if (skb_shinfo(skb)->gso_size)
+		flits++;
+	return flits_to_desc(flits);
+}
+
+/**
+ *	make_sgl - populate a scatter/gather list for a packet
+ *	@skb: the packet
+ *	@sgp: the SGL to populate
+ *	@start: start address of skb main body data to include in the SGL
+ *	@len: length of skb main body data to include in the SGL
+ *	@pdev: the PCI device
+ *
+ *	Generates a scatter/gather list for the buffers that make up a packet
+ *	and returns the SGL size in 8-byte words.  The caller must size the SGL
+ *	appropriately.
+ */
+static inline unsigned int make_sgl(const struct sk_buff *skb,
+				    struct sg_ent *sgp, unsigned char *start,
+				    unsigned int len, struct pci_dev *pdev)
+{
+	dma_addr_t mapping;
+	unsigned int i, j = 0, nfrags;
+
+	if (len) {
+		mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
+		sgp->len[0] = cpu_to_be32(len);
+		sgp->addr[0] = cpu_to_be64(mapping);
+		j = 1;
+	}
+
+	nfrags = skb_shinfo(skb)->nr_frags;
+	for (i = 0; i < nfrags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+					   DMA_TO_DEVICE);
+		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
+		sgp->addr[j] = cpu_to_be64(mapping);
+		j ^= 1;
+		if (j == 0)
+			++sgp;
+	}
+	if (j)
+		sgp->len[j] = 0;
+	return ((nfrags + (len != 0)) * 3) / 2 + j;
+}
+
+/**
+ *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
+ *	@adap: the adapter
+ *	@q: the Tx queue
+ *
+ *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
+ *	where the HW is going to sleep just after we checked, however,
+ *	then the interrupt handler will detect the outstanding TX packet
+ *	and ring the doorbell for us.
+ *
+ *	When GTS is disabled we unconditionally ring the doorbell.
+ */
+static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+#if USE_GTS
+	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
+	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
+		set_bit(TXQ_LAST_PKT_DB, &q->flags);
+		t3_write_reg(adap, A_SG_KDOORBELL,
+			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+	}
+#else
+	wmb();			/* write descriptors before telling HW */
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+#endif
+}
+
+static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
+{
+#if SGE_NUM_GENBITS == 2
+	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
+#endif
+}
+
+/**
+ *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
+ *	@ndesc: number of Tx descriptors spanned by the SGL
+ *	@skb: the packet corresponding to the WR
+ *	@d: first Tx descriptor to be written
+ *	@pidx: index of above descriptors
+ *	@q: the SGE Tx queue
+ *	@sgl: the SGL
+ *	@flits: number of flits to the start of the SGL in the first descriptor
+ *	@sgl_flits: the SGL size in flits
+ *	@gen: the Tx descriptor generation
+ *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
+ *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
+ *
+ *	Write a work request header and an associated SGL.  If the SGL is
+ *	small enough to fit into one Tx descriptor it has already been written
+ *	and we just need to write the WR header.  Otherwise we distribute the
+ *	SGL across the number of descriptors it spans.
+ */
+static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
+			     struct tx_desc *d, unsigned int pidx,
+			     const struct sge_txq *q,
+			     const struct sg_ent *sgl,
+			     unsigned int flits, unsigned int sgl_flits,
+			     unsigned int gen, __be32 wr_hi,
+			     __be32 wr_lo)
+{
+	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
+	struct tx_sw_desc *sd = &q->sdesc[pidx];
+
+	sd->skb = skb;
+	if (need_skb_unmap()) {
+		sd->fragidx = 0;
+		sd->addr_idx = 0;
+		sd->sflit = flits;
+	}
+
+	if (likely(ndesc == 1)) {
+		sd->eop = 1;
+		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
+				   V_WR_SGLSFLT(flits)) | wr_hi;
+		dma_wmb();
+		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
+				   V_WR_GEN(gen)) | wr_lo;
+		wr_gen2(d, gen);
+	} else {
+		unsigned int ogen = gen;
+		const u64 *fp = (const u64 *)sgl;
+		struct work_request_hdr *wp = wrp;
+
+		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
+				   V_WR_SGLSFLT(flits)) | wr_hi;
+
+		while (sgl_flits) {
+			unsigned int avail = WR_FLITS - flits;
+
+			if (avail > sgl_flits)
+				avail = sgl_flits;
+			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
+			sgl_flits -= avail;
+			ndesc--;
+			if (!sgl_flits)
+				break;
+
+			fp += avail;
+			d++;
+			sd->eop = 0;
+			sd++;
+			if (++pidx == q->size) {
+				pidx = 0;
+				gen ^= 1;
+				d = q->desc;
+				sd = q->sdesc;
+			}
+
+			sd->skb = skb;
+			wrp = (struct work_request_hdr *)d;
+			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
+					   V_WR_SGLSFLT(1)) | wr_hi;
+			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
+							sgl_flits + 1)) |
+					   V_WR_GEN(gen)) | wr_lo;
+			wr_gen2(d, gen);
+			flits = 1;
+		}
+		sd->eop = 1;
+		wrp->wr_hi |= htonl(F_WR_EOP);
+		dma_wmb();
+		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
+		wr_gen2((struct tx_desc *)wp, ogen);
+		WARN_ON(ndesc != 0);
+	}
+}
+
+/**
+ *	write_tx_pkt_wr - write a TX_PKT work request
+ *	@adap: the adapter
+ *	@skb: the packet to send
+ *	@pi: the egress interface
+ *	@pidx: index of the first Tx descriptor to write
+ *	@gen: the generation value to use
+ *	@q: the Tx queue
+ *	@ndesc: number of descriptors the packet will occupy
+ *	@compl: the value of the COMPL bit to use
+ *
+ *	Generate a TX_PKT work request to send the supplied packet.
+ */
+static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
+			    const struct port_info *pi,
+			    unsigned int pidx, unsigned int gen,
+			    struct sge_txq *q, unsigned int ndesc,
+			    unsigned int compl)
+{
+	unsigned int flits, sgl_flits, cntrl, tso_info;
+	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+	struct tx_desc *d = &q->desc[pidx];
+	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
+
+	cpl->len = htonl(skb->len);
+	cntrl = V_TXPKT_INTF(pi->port_id);
+
+	if (skb_vlan_tag_present(skb))
+		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
+
+	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
+	if (tso_info) {
+		int eth_type;
+		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
+
+		d->flit[2] = 0;
+		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
+		hdr->cntrl = htonl(cntrl);
+		eth_type = skb_network_offset(skb) == ETH_HLEN ?
+		    CPL_ETH_II : CPL_ETH_II_VLAN;
+		tso_info |= V_LSO_ETH_TYPE(eth_type) |
+		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
+		hdr->lso_info = htonl(tso_info);
+		flits = 3;
+	} else {
+		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
+		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
+		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
+		cpl->cntrl = htonl(cntrl);
+
+		if (skb->len <= WR_LEN - sizeof(*cpl)) {
+			q->sdesc[pidx].skb = NULL;
+			if (!skb->data_len)
+				skb_copy_from_linear_data(skb, &d->flit[2],
+							  skb->len);
+			else
+				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
+
+			flits = (skb->len + 7) / 8 + 2;
+			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
+					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
+					      | F_WR_SOP | F_WR_EOP | compl);
+			dma_wmb();
+			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
+					      V_WR_TID(q->token));
+			wr_gen2(d, gen);
+			dev_consume_skb_any(skb);
+			return;
+		}
+
+		flits = 2;
+	}
+
+	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+	sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+
+	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
+			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
+			 htonl(V_WR_TID(q->token)));
+}
+
+static inline void t3_stop_tx_queue(struct netdev_queue *txq,
+				    struct sge_qset *qs, struct sge_txq *q)
+{
+	netif_tx_stop_queue(txq);
+	set_bit(TXQ_ETH, &qs->txq_stopped);
+	q->stops++;
+}
+
+/**
+ *	eth_xmit - add a packet to the Ethernet Tx queue
+ *	@skb: the packet
+ *	@dev: the egress net device
+ *
+ *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
+ */
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int qidx;
+	unsigned int ndesc, pidx, credits, gen, compl;
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	struct netdev_queue *txq;
+	struct sge_qset *qs;
+	struct sge_txq *q;
+
+	/*
+	 * The chip min packet length is 9 octets but play safe and reject
+	 * anything shorter than an Ethernet header.
+	 */
+	if (unlikely(skb->len < ETH_HLEN)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	qidx = skb_get_queue_mapping(skb);
+	qs = &pi->qs[qidx];
+	q = &qs->txq[TXQ_ETH];
+	txq = netdev_get_tx_queue(dev, qidx);
+
+	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+	credits = q->size - q->in_use;
+	ndesc = calc_tx_descs(skb);
+
+	if (unlikely(credits < ndesc)) {
+		t3_stop_tx_queue(txq, qs, q);
+		dev_err(&adap->pdev->dev,
+			"%s: Tx ring %u full while queue awake!\n",
+			dev->name, q->cntxt_id & 7);
+		return NETDEV_TX_BUSY;
+	}
+
+	q->in_use += ndesc;
+	if (unlikely(credits - ndesc < q->stop_thres)) {
+		t3_stop_tx_queue(txq, qs, q);
+
+		if (should_restart_tx(q) &&
+		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+			q->restarts++;
+			netif_tx_start_queue(txq);
+		}
+	}
+
+	gen = q->gen;
+	q->unacked += ndesc;
+	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
+	q->unacked &= 7;
+	pidx = q->pidx;
+	q->pidx += ndesc;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->gen ^= 1;
+	}
+
+	/* update port statistics */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
+	if (skb_shinfo(skb)->gso_size)
+		qs->port_stats[SGE_PSTAT_TSO]++;
+	if (skb_vlan_tag_present(skb))
+		qs->port_stats[SGE_PSTAT_VLANINS]++;
+
+	/*
+	 * We do not use Tx completion interrupts to free DMAd Tx packets.
+	 * This is good for performance but means that we rely on new Tx
+	 * packets arriving to run the destructors of completed packets,
+	 * which open up space in their sockets' send queues.  Sometimes
+	 * we do not get such new packets causing Tx to stall.  A single
+	 * UDP transmitter is a good example of this situation.  We have
+	 * a clean up timer that periodically reclaims completed packets
+	 * but it doesn't run often enough (nor do we want it to) to prevent
+	 * lengthy stalls.  A solution to this problem is to run the
+	 * destructor early, after the packet is queued but before it's DMAd.
+	 * A cons is that we lie to socket memory accounting, but the amount
+	 * of extra memory is reasonable (limited by the number of Tx
+	 * descriptors), the packets do actually get freed quickly by new
+	 * packets almost always, and for protocols like TCP that wait for
+	 * acks to really free up the data the extra memory is even less.
+	 * On the positive side we run the destructors on the sending CPU
+	 * rather than on a potentially different completing CPU, usually a
+	 * good thing.  We also run them without holding our Tx queue lock,
+	 * unlike what reclaim_completed_tx() would otherwise do.
+	 *
+	 * Run the destructor before telling the DMA engine about the packet
+	 * to make sure it doesn't complete and get freed prematurely.
+	 */
+	if (likely(!skb_shared(skb)))
+		skb_orphan(skb);
+
+	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+	check_ring_tx_db(adap, q);
+	return NETDEV_TX_OK;
+}
+
+/**
+ *	write_imm - write a packet into a Tx descriptor as immediate data
+ *	@d: the Tx descriptor to write
+ *	@skb: the packet
+ *	@len: the length of packet data to write as immediate data
+ *	@gen: the generation bit value to write
+ *
+ *	Writes a packet as immediate data into a Tx descriptor.  The packet
+ *	contains a work request at its beginning.  We must write the packet
+ *	carefully so the SGE doesn't read it accidentally before it's written
+ *	in its entirety.
+ */
+static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
+			     unsigned int len, unsigned int gen)
+{
+	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
+	struct work_request_hdr *to = (struct work_request_hdr *)d;
+
+	if (likely(!skb->data_len))
+		memcpy(&to[1], &from[1], len - sizeof(*from));
+	else
+		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
+
+	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
+					V_WR_BCNTLFLT(len & 7));
+	dma_wmb();
+	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
+					V_WR_LEN((len + 7) / 8));
+	wr_gen2(d, gen);
+	kfree_skb(skb);
+}
+
+/**
+ *	check_desc_avail - check descriptor availability on a send queue
+ *	@adap: the adapter
+ *	@q: the send queue
+ *	@skb: the packet needing the descriptors
+ *	@ndesc: the number of Tx descriptors needed
+ *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
+ *
+ *	Checks if the requested number of Tx descriptors is available on an
+ *	SGE send queue.  If the queue is already suspended or not enough
+ *	descriptors are available the packet is queued for later transmission.
+ *	Must be called with the Tx queue locked.
+ *
+ *	Returns 0 if enough descriptors are available, 1 if there aren't
+ *	enough descriptors and the packet has been queued, and 2 if the caller
+ *	needs to retry because there weren't enough descriptors at the
+ *	beginning of the call but some freed up in the mean time.
+ */
+static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
+				   struct sk_buff *skb, unsigned int ndesc,
+				   unsigned int qid)
+{
+	if (unlikely(!skb_queue_empty(&q->sendq))) {
+	      addq_exit:__skb_queue_tail(&q->sendq, skb);
+		return 1;
+	}
+	if (unlikely(q->size - q->in_use < ndesc)) {
+		struct sge_qset *qs = txq_to_qset(q, qid);
+
+		set_bit(qid, &qs->txq_stopped);
+		smp_mb__after_atomic();
+
+		if (should_restart_tx(q) &&
+		    test_and_clear_bit(qid, &qs->txq_stopped))
+			return 2;
+
+		q->stops++;
+		goto addq_exit;
+	}
+	return 0;
+}
+
+/**
+ *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ *	@q: the SGE control Tx queue
+ *
+ *	This is a variant of reclaim_completed_tx() that is used for Tx queues
+ *	that send only immediate data (presently just the control queues) and
+ *	thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	q->in_use -= reclaim;
+	q->cleaned += reclaim;
+}
+
+static inline int immediate(const struct sk_buff *skb)
+{
+	return skb->len <= WR_LEN;
+}
+
+/**
+ *	ctrl_xmit - send a packet through an SGE control Tx queue
+ *	@adap: the adapter
+ *	@q: the control queue
+ *	@skb: the packet
+ *
+ *	Send a packet through an SGE control Tx queue.  Packets sent through
+ *	a control queue must fit entirely as immediate data in a single Tx
+ *	descriptor and have no page fragments.
+ */
+static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
+		     struct sk_buff *skb)
+{
+	int ret;
+	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
+
+	if (unlikely(!immediate(skb))) {
+		WARN_ON(1);
+		dev_kfree_skb(skb);
+		return NET_XMIT_SUCCESS;
+	}
+
+	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
+	wrp->wr_lo = htonl(V_WR_TID(q->token));
+
+	spin_lock(&q->lock);
+      again:reclaim_completed_tx_imm(q);
+
+	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
+	if (unlikely(ret)) {
+		if (ret == 1) {
+			spin_unlock(&q->lock);
+			return NET_XMIT_CN;
+		}
+		goto again;
+	}
+
+	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+	q->in_use++;
+	if (++q->pidx >= q->size) {
+		q->pidx = 0;
+		q->gen ^= 1;
+	}
+	spin_unlock(&q->lock);
+	wmb();
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_ctrlq - restart a suspended control queue
+ *	@qs: the queue set cotaining the control queue
+ *
+ *	Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct sge_txq *q = &qs->txq[TXQ_CTRL];
+
+	spin_lock(&q->lock);
+      again:reclaim_completed_tx_imm(q);
+
+	while (q->in_use < q->size &&
+	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
+
+		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+		if (++q->pidx >= q->size) {
+			q->pidx = 0;
+			q->gen ^= 1;
+		}
+		q->in_use++;
+	}
+
+	if (!skb_queue_empty(&q->sendq)) {
+		set_bit(TXQ_CTRL, &qs->txq_stopped);
+		smp_mb__after_atomic();
+
+		if (should_restart_tx(q) &&
+		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
+			goto again;
+		q->stops++;
+	}
+
+	spin_unlock(&q->lock);
+	wmb();
+	t3_write_reg(qs->adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/*
+ * Send a management message through control queue 0
+ */
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+	int ret;
+	local_bh_disable();
+	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+	local_bh_enable();
+
+	return ret;
+}
+
+/**
+ *	deferred_unmap_destructor - unmap a packet when it is freed
+ *	@skb: the packet
+ *
+ *	This is the packet destructor used for Tx packets that need to remain
+ *	mapped until they are freed rather than until their Tx descriptors are
+ *	freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+	int i;
+	const dma_addr_t *p;
+	const struct skb_shared_info *si;
+	const struct deferred_unmap_info *dui;
+
+	dui = (struct deferred_unmap_info *)skb->head;
+	p = dui->addr;
+
+	if (skb_tail_pointer(skb) - skb_transport_header(skb))
+		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
+				 skb_transport_header(skb), PCI_DMA_TODEVICE);
+
+	si = skb_shinfo(skb);
+	for (i = 0; i < si->nr_frags; i++)
+		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
+			       PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+				     const struct sg_ent *sgl, int sgl_flits)
+{
+	dma_addr_t *p;
+	struct deferred_unmap_info *dui;
+
+	dui = (struct deferred_unmap_info *)skb->head;
+	dui->pdev = pdev;
+	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+		*p++ = be64_to_cpu(sgl->addr[0]);
+		*p++ = be64_to_cpu(sgl->addr[1]);
+	}
+	if (sgl_flits)
+		*p = be64_to_cpu(sgl->addr[0]);
+}
+
+/**
+ *	write_ofld_wr - write an offload work request
+ *	@adap: the adapter
+ *	@skb: the packet to send
+ *	@q: the Tx queue
+ *	@pidx: index of the first Tx descriptor to write
+ *	@gen: the generation value to use
+ *	@ndesc: number of descriptors the packet will occupy
+ *
+ *	Write an offload work request to send the supplied packet.  The packet
+ *	data already carry the work request with most fields populated.
+ */
+static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+			  struct sge_txq *q, unsigned int pidx,
+			  unsigned int gen, unsigned int ndesc)
+{
+	unsigned int sgl_flits, flits;
+	struct work_request_hdr *from;
+	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+	struct tx_desc *d = &q->desc[pidx];
+
+	if (immediate(skb)) {
+		q->sdesc[pidx].skb = NULL;
+		write_imm(d, skb, skb->len, gen);
+		return;
+	}
+
+	/* Only TX_DATA builds SGLs */
+
+	from = (struct work_request_hdr *)skb->data;
+	memcpy(&d->flit[1], &from[1],
+	       skb_transport_offset(skb) - sizeof(*from));
+
+	flits = skb_transport_offset(skb) / 8;
+	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+	sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+			     skb_tail_pointer(skb) -
+			     skb_transport_header(skb),
+			     adap->pdev);
+	if (need_skb_unmap()) {
+		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+		skb->destructor = deferred_unmap_destructor;
+	}
+
+	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
+			 gen, from->wr_hi, from->wr_lo);
+}
+
+/**
+ *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
+ *	@skb: the packet
+ *
+ * 	Returns the number of Tx descriptors needed for the given offload
+ * 	packet.  These packets are already fully constructed.
+ */
+static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt;
+
+	if (skb->len <= WR_LEN)
+		return 1;	/* packet fits as immediate data */
+
+	flits = skb_transport_offset(skb) / 8;	/* headers */
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb_tail_pointer(skb) != skb_transport_header(skb))
+		cnt++;
+	return flits_to_desc(flits + sgl_len(cnt));
+}
+
+/**
+ *	ofld_xmit - send a packet through an offload queue
+ *	@adap: the adapter
+ *	@q: the Tx offload queue
+ *	@skb: the packet
+ *
+ *	Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
+		     struct sk_buff *skb)
+{
+	int ret;
+	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
+
+	spin_lock(&q->lock);
+again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
+	if (unlikely(ret)) {
+		if (ret == 1) {
+			skb->priority = ndesc;	/* save for restart */
+			spin_unlock(&q->lock);
+			return NET_XMIT_CN;
+		}
+		goto again;
+	}
+
+	gen = q->gen;
+	q->in_use += ndesc;
+	pidx = q->pidx;
+	q->pidx += ndesc;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->gen ^= 1;
+	}
+	spin_unlock(&q->lock);
+
+	write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+	check_ring_tx_db(adap, q);
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_offloadq - restart a suspended offload queue
+ *	@qs: the queue set cotaining the offload queue
+ *
+ *	Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_offloadq(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct sge_txq *q = &qs->txq[TXQ_OFLD];
+	const struct port_info *pi = netdev_priv(qs->netdev);
+	struct adapter *adap = pi->adapter;
+
+	spin_lock(&q->lock);
+again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+	while ((skb = skb_peek(&q->sendq)) != NULL) {
+		unsigned int gen, pidx;
+		unsigned int ndesc = skb->priority;
+
+		if (unlikely(q->size - q->in_use < ndesc)) {
+			set_bit(TXQ_OFLD, &qs->txq_stopped);
+			smp_mb__after_atomic();
+
+			if (should_restart_tx(q) &&
+			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
+				goto again;
+			q->stops++;
+			break;
+		}
+
+		gen = q->gen;
+		q->in_use += ndesc;
+		pidx = q->pidx;
+		q->pidx += ndesc;
+		if (q->pidx >= q->size) {
+			q->pidx -= q->size;
+			q->gen ^= 1;
+		}
+		__skb_unlink(skb, &q->sendq);
+		spin_unlock(&q->lock);
+
+		write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+		spin_lock(&q->lock);
+	}
+	spin_unlock(&q->lock);
+
+#if USE_GTS
+	set_bit(TXQ_RUNNING, &q->flags);
+	set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#endif
+	wmb();
+	t3_write_reg(adap, A_SG_KDOORBELL,
+		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/**
+ *	queue_set - return the queue set a packet should use
+ *	@skb: the packet
+ *
+ *	Maps a packet to the SGE queue set it should use.  The desired queue
+ *	set is carried in bits 1-3 in the packet's priority.
+ */
+static inline int queue_set(const struct sk_buff *skb)
+{
+	return skb->priority >> 1;
+}
+
+/**
+ *	is_ctrl_pkt - return whether an offload packet is a control packet
+ *	@skb: the packet
+ *
+ *	Determines whether an offload packet should use an OFLD or a CTRL
+ *	Tx queue.  This is indicated by bit 0 in the packet's priority.
+ */
+static inline int is_ctrl_pkt(const struct sk_buff *skb)
+{
+	return skb->priority & 1;
+}
+
+/**
+ *	t3_offload_tx - send an offload packet
+ *	@tdev: the offload device to send to
+ *	@skb: the packet
+ *
+ *	Sends an offload packet.  We use the packet priority to select the
+ *	appropriate Tx queue as follows: bit 0 indicates whether the packet
+ *	should be sent as regular or control, bits 1-3 select the queue set.
+ */
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+	struct adapter *adap = tdev2adap(tdev);
+	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
+
+	if (unlikely(is_ctrl_pkt(skb)))
+		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
+
+	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
+}
+
+/**
+ *	offload_enqueue - add an offload packet to an SGE offload receive queue
+ *	@q: the SGE response queue
+ *	@skb: the packet
+ *
+ *	Add a new offload packet to an SGE response queue's offload packet
+ *	queue.  If the packet is the first on the queue it schedules the RX
+ *	softirq to process the queue.
+ */
+static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
+{
+	int was_empty = skb_queue_empty(&q->rx_queue);
+
+	__skb_queue_tail(&q->rx_queue, skb);
+
+	if (was_empty) {
+		struct sge_qset *qs = rspq_to_qset(q);
+
+		napi_schedule(&qs->napi);
+	}
+}
+
+/**
+ *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
+ *	@tdev: the offload device that will be receiving the packets
+ *	@q: the SGE response queue that assembled the bundle
+ *	@skbs: the partial bundle
+ *	@n: the number of packets in the bundle
+ *
+ *	Delivers a (partial) bundle of Rx offload packets to an offload device.
+ */
+static inline void deliver_partial_bundle(struct t3cdev *tdev,
+					  struct sge_rspq *q,
+					  struct sk_buff *skbs[], int n)
+{
+	if (n) {
+		q->offload_bundles++;
+		tdev->recv(tdev, skbs, n);
+	}
+}
+
+/**
+ *	ofld_poll - NAPI handler for offload packets in interrupt mode
+ *	@dev: the network device doing the polling
+ *	@budget: polling budget
+ *
+ *	The NAPI handler for offload packets when a response queue is serviced
+ *	by the hard interrupt handler, i.e., when it's operating in non-polling
+ *	mode.  Creates small packet batches and sends them through the offload
+ *	receive handler.  Batches need to be of modest size as we do prefetches
+ *	on the packets in each.
+ */
+static int ofld_poll(struct napi_struct *napi, int budget)
+{
+	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+	struct sge_rspq *q = &qs->rspq;
+	struct adapter *adapter = qs->adap;
+	int work_done = 0;
+
+	while (work_done < budget) {
+		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+		struct sk_buff_head queue;
+		int ngathered;
+
+		spin_lock_irq(&q->lock);
+		__skb_queue_head_init(&queue);
+		skb_queue_splice_init(&q->rx_queue, &queue);
+		if (skb_queue_empty(&queue)) {
+			napi_complete(napi);
+			spin_unlock_irq(&q->lock);
+			return work_done;
+		}
+		spin_unlock_irq(&q->lock);
+
+		ngathered = 0;
+		skb_queue_walk_safe(&queue, skb, tmp) {
+			if (work_done >= budget)
+				break;
+			work_done++;
+
+			__skb_unlink(skb, &queue);
+			prefetch(skb->data);
+			skbs[ngathered] = skb;
+			if (++ngathered == RX_BUNDLE_SIZE) {
+				q->offload_bundles++;
+				adapter->tdev.recv(&adapter->tdev, skbs,
+						   ngathered);
+				ngathered = 0;
+			}
+		}
+		if (!skb_queue_empty(&queue)) {
+			/* splice remaining packets back onto Rx queue */
+			spin_lock_irq(&q->lock);
+			skb_queue_splice(&queue, &q->rx_queue);
+			spin_unlock_irq(&q->lock);
+		}
+		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
+	}
+
+	return work_done;
+}
+
+/**
+ *	rx_offload - process a received offload packet
+ *	@tdev: the offload device receiving the packet
+ *	@rq: the response queue that received the packet
+ *	@skb: the packet
+ *	@rx_gather: a gather list of packets if we are building a bundle
+ *	@gather_idx: index of the next available slot in the bundle
+ *
+ *	Process an ingress offload pakcet and add it to the offload ingress
+ *	queue. 	Returns the index of the next available slot in the bundle.
+ */
+static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
+			     struct sk_buff *skb, struct sk_buff *rx_gather[],
+			     unsigned int gather_idx)
+{
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb_reset_transport_header(skb);
+
+	if (rq->polling) {
+		rx_gather[gather_idx++] = skb;
+		if (gather_idx == RX_BUNDLE_SIZE) {
+			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
+			gather_idx = 0;
+			rq->offload_bundles++;
+		}
+	} else
+		offload_enqueue(rq, skb);
+
+	return gather_idx;
+}
+
+/**
+ *	restart_tx - check whether to restart suspended Tx queues
+ *	@qs: the queue set to resume
+ *
+ *	Restarts suspended Tx queues of an SGE queue set if they have enough
+ *	free resources to resume operation.
+ */
+static void restart_tx(struct sge_qset *qs)
+{
+	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
+	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
+	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+		qs->txq[TXQ_ETH].restarts++;
+		if (netif_running(qs->netdev))
+			netif_tx_wake_queue(qs->tx_q);
+	}
+
+	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
+	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
+	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
+		qs->txq[TXQ_OFLD].restarts++;
+		tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
+	}
+	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
+	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
+	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
+		qs->txq[TXQ_CTRL].restarts++;
+		tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
+	}
+}
+
+/**
+ *	cxgb3_arp_process - process an ARP request probing a private IP address
+ *	@adapter: the adapter
+ *	@skb: the skbuff containing the ARP request
+ *
+ *	Check if the ARP request is probing the private IP address
+ *	dedicated to iSCSI, generate an ARP reply if so.
+ */
+static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	struct arphdr *arp;
+	unsigned char *arp_ptr;
+	unsigned char *sha;
+	__be32 sip, tip;
+
+	if (!dev)
+		return;
+
+	skb_reset_network_header(skb);
+	arp = arp_hdr(skb);
+
+	if (arp->ar_op != htons(ARPOP_REQUEST))
+		return;
+
+	arp_ptr = (unsigned char *)(arp + 1);
+	sha = arp_ptr;
+	arp_ptr += dev->addr_len;
+	memcpy(&sip, arp_ptr, sizeof(sip));
+	arp_ptr += sizeof(sip);
+	arp_ptr += dev->addr_len;
+	memcpy(&tip, arp_ptr, sizeof(tip));
+
+	if (tip != pi->iscsi_ipv4addr)
+		return;
+
+	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+		 pi->iscsic.mac_addr, sha);
+
+}
+
+static inline int is_arp(struct sk_buff *skb)
+{
+	return skb->protocol == htons(ETH_P_ARP);
+}
+
+static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
+					struct sk_buff *skb)
+{
+	if (is_arp(skb)) {
+		cxgb3_arp_process(pi, skb);
+		return;
+	}
+
+	if (pi->iscsic.recv)
+		pi->iscsic.recv(pi, skb);
+
+}
+
+/**
+ *	rx_eth - process an ingress ethernet packet
+ *	@adap: the adapter
+ *	@rq: the response queue that received the packet
+ *	@skb: the packet
+ *	@pad: amount of padding at the start of the buffer
+ *
+ *	Process an ingress ethernet pakcet and deliver it to the stack.
+ *	The padding is 2 if the packet was delivered in an Rx buffer and 0
+ *	if it was immediate data in a response.
+ */
+static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+		   struct sk_buff *skb, int pad, int lro)
+{
+	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+	struct sge_qset *qs = rspq_to_qset(rq);
+	struct port_info *pi;
+
+	skb_pull(skb, sizeof(*p) + pad);
+	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
+	pi = netdev_priv(skb->dev);
+	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
+	    p->csum == htons(0xffff) && !p->fragment) {
+		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else
+		skb_checksum_none_assert(skb);
+	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+	if (p->vlan_valid) {
+		qs->port_stats[SGE_PSTAT_VLANEX]++;
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
+	}
+	if (rq->polling) {
+		if (lro)
+			napi_gro_receive(&qs->napi, skb);
+		else {
+			if (unlikely(pi->iscsic.flags))
+				cxgb3_process_iscsi_prov_pack(pi, skb);
+			netif_receive_skb(skb);
+		}
+	} else
+		netif_rx(skb);
+}
+
+static inline int is_eth_tcp(u32 rss)
+{
+	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+}
+
+/**
+ *	lro_add_page - add a page chunk to an LRO session
+ *	@adap: the adapter
+ *	@qs: the associated queue set
+ *	@fl: the free list containing the page chunk to add
+ *	@len: packet length
+ *	@complete: Indicates the last fragment of a frame
+ *
+ *	Add a received packet contained in a page chunk to an existing LRO
+ *	session.
+ */
+static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+			 struct sge_fl *fl, int len, int complete)
+{
+	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+	struct port_info *pi = netdev_priv(qs->netdev);
+	struct sk_buff *skb = NULL;
+	struct cpl_rx_pkt *cpl;
+	struct skb_frag_struct *rx_frag;
+	int nr_frags;
+	int offset = 0;
+
+	if (!qs->nomem) {
+		skb = napi_get_frags(&qs->napi);
+		qs->nomem = !skb;
+	}
+
+	fl->credits--;
+
+	pci_dma_sync_single_for_cpu(adap->pdev,
+				    dma_unmap_addr(sd, dma_addr),
+				    fl->buf_size - SGE_PG_RSVD,
+				    PCI_DMA_FROMDEVICE);
+
+	(*sd->pg_chunk.p_cnt)--;
+	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+		pci_unmap_page(adap->pdev,
+			       sd->pg_chunk.mapping,
+			       fl->alloc_size,
+			       PCI_DMA_FROMDEVICE);
+
+	if (!skb) {
+		put_page(sd->pg_chunk.page);
+		if (complete)
+			qs->nomem = 0;
+		return;
+	}
+
+	rx_frag = skb_shinfo(skb)->frags;
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
+	if (!nr_frags) {
+		offset = 2 + sizeof(struct cpl_rx_pkt);
+		cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
+		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+		} else
+			skb->ip_summed = CHECKSUM_NONE;
+	} else
+		cpl = qs->lro_va;
+
+	len -= offset;
+
+	rx_frag += nr_frags;
+	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
+	rx_frag->page_offset = sd->pg_chunk.offset + offset;
+	skb_frag_size_set(rx_frag, len);
+
+	skb->len += len;
+	skb->data_len += len;
+	skb->truesize += len;
+	skb_shinfo(skb)->nr_frags++;
+
+	if (!complete)
+		return;
+
+	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+	if (cpl->vlan_valid) {
+		qs->port_stats[SGE_PSTAT_VLANEX]++;
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
+	}
+	napi_gro_frags(&qs->napi);
+}
+
+/**
+ *	handle_rsp_cntrl_info - handles control information in a response
+ *	@qs: the queue set corresponding to the response
+ *	@flags: the response control flags
+ *
+ *	Handles the control information of an SGE response, such as GTS
+ *	indications and completion credits for the queue set's Tx queues.
+ *	HW coalesces credits, we don't do any extra SW coalescing.
+ */
+static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
+{
+	unsigned int credits;
+
+#if USE_GTS
+	if (flags & F_RSPD_TXQ0_GTS)
+		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
+#endif
+
+	credits = G_RSPD_TXQ0_CR(flags);
+	if (credits)
+		qs->txq[TXQ_ETH].processed += credits;
+
+	credits = G_RSPD_TXQ2_CR(flags);
+	if (credits)
+		qs->txq[TXQ_CTRL].processed += credits;
+
+# if USE_GTS
+	if (flags & F_RSPD_TXQ1_GTS)
+		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
+# endif
+	credits = G_RSPD_TXQ1_CR(flags);
+	if (credits)
+		qs->txq[TXQ_OFLD].processed += credits;
+}
+
+/**
+ *	check_ring_db - check if we need to ring any doorbells
+ *	@adapter: the adapter
+ *	@qs: the queue set whose Tx queues are to be examined
+ *	@sleeping: indicates which Tx queue sent GTS
+ *
+ *	Checks if some of a queue set's Tx queues need to ring their doorbells
+ *	to resume transmission after idling while they still have unprocessed
+ *	descriptors.
+ */
+static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
+			  unsigned int sleeping)
+{
+	if (sleeping & F_RSPD_TXQ0_GTS) {
+		struct sge_txq *txq = &qs->txq[TXQ_ETH];
+
+		if (txq->cleaned + txq->in_use != txq->processed &&
+		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+			set_bit(TXQ_RUNNING, &txq->flags);
+			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+				     V_EGRCNTX(txq->cntxt_id));
+		}
+	}
+
+	if (sleeping & F_RSPD_TXQ1_GTS) {
+		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
+
+		if (txq->cleaned + txq->in_use != txq->processed &&
+		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+			set_bit(TXQ_RUNNING, &txq->flags);
+			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+				     V_EGRCNTX(txq->cntxt_id));
+		}
+	}
+}
+
+/**
+ *	is_new_response - check if a response is newly written
+ *	@r: the response descriptor
+ *	@q: the response queue
+ *
+ *	Returns true if a response descriptor contains a yet unprocessed
+ *	response.
+ */
+static inline int is_new_response(const struct rsp_desc *r,
+				  const struct sge_rspq *q)
+{
+	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
+}
+
+static inline void clear_rspq_bufstate(struct sge_rspq * const q)
+{
+	q->pg_skb = NULL;
+	q->rx_recycle_buf = 0;
+}
+
+#define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
+#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
+			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
+			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
+			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
+
+/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
+#define NOMEM_INTR_DELAY 2500
+
+/**
+ *	process_responses - process responses from an SGE response queue
+ *	@adap: the adapter
+ *	@qs: the queue set to which the response queue belongs
+ *	@budget: how many responses can be processed in this round
+ *
+ *	Process responses from an SGE response queue up to the supplied budget.
+ *	Responses include received packets as well as credits and other events
+ *	for the queues that belong to the response queue's queue set.
+ *	A negative budget is effectively unlimited.
+ *
+ *	Additionally choose the interrupt holdoff time for the next interrupt
+ *	on this queue.  If the system is under memory shortage use a fairly
+ *	long delay to help recovery.
+ */
+static int process_responses(struct adapter *adap, struct sge_qset *qs,
+			     int budget)
+{
+	struct sge_rspq *q = &qs->rspq;
+	struct rsp_desc *r = &q->desc[q->cidx];
+	int budget_left = budget;
+	unsigned int sleeping = 0;
+	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
+	int ngathered = 0;
+
+	q->next_holdoff = q->holdoff_tmr;
+
+	while (likely(budget_left && is_new_response(r, q))) {
+		int packet_complete, eth, ethpad = 2;
+		int lro = !!(qs->netdev->features & NETIF_F_GRO);
+		struct sk_buff *skb = NULL;
+		u32 len, flags;
+		__be32 rss_hi, rss_lo;
+
+		dma_rmb();
+		eth = r->rss_hdr.opcode == CPL_RX_PKT;
+		rss_hi = *(const __be32 *)r;
+		rss_lo = r->rss_hdr.rss_hash_val;
+		flags = ntohl(r->flags);
+
+		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
+			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
+			if (!skb)
+				goto no_mem;
+
+			memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+			skb->data[0] = CPL_ASYNC_NOTIF;
+			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
+			q->async_notif++;
+		} else if (flags & F_RSPD_IMM_DATA_VALID) {
+			skb = get_imm_packet(r);
+			if (unlikely(!skb)) {
+no_mem:
+				q->next_holdoff = NOMEM_INTR_DELAY;
+				q->nomem++;
+				/* consume one credit since we tried */
+				budget_left--;
+				break;
+			}
+			q->imm_data++;
+			ethpad = 0;
+		} else if ((len = ntohl(r->len_cq)) != 0) {
+			struct sge_fl *fl;
+
+			lro &= eth && is_eth_tcp(rss_hi);
+
+			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+			if (fl->use_pages) {
+				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+
+				prefetch(addr);
+#if L1_CACHE_BYTES < 128
+				prefetch(addr + L1_CACHE_BYTES);
+#endif
+				__refill_fl(adap, fl);
+				if (lro > 0) {
+					lro_add_page(adap, qs, fl,
+						     G_RSPD_LEN(len),
+						     flags & F_RSPD_EOP);
+					 goto next_fl;
+				}
+
+				skb = get_packet_pg(adap, fl, q,
+						    G_RSPD_LEN(len),
+						    eth ?
+						    SGE_RX_DROP_THRES : 0);
+				q->pg_skb = skb;
+			} else
+				skb = get_packet(adap, fl, G_RSPD_LEN(len),
+						 eth ? SGE_RX_DROP_THRES : 0);
+			if (unlikely(!skb)) {
+				if (!eth)
+					goto no_mem;
+				q->rx_drops++;
+			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+				__skb_pull(skb, 2);
+next_fl:
+			if (++fl->cidx == fl->size)
+				fl->cidx = 0;
+		} else
+			q->pure_rsps++;
+
+		if (flags & RSPD_CTRL_MASK) {
+			sleeping |= flags & RSPD_GTS_MASK;
+			handle_rsp_cntrl_info(qs, flags);
+		}
+
+		r++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->gen ^= 1;
+			r = q->desc;
+		}
+		prefetch(r);
+
+		if (++q->credits >= (q->size / 4)) {
+			refill_rspq(adap, q, q->credits);
+			q->credits = 0;
+		}
+
+		packet_complete = flags &
+				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
+				   F_RSPD_ASYNC_NOTIF);
+
+		if (skb != NULL && packet_complete) {
+			if (eth)
+				rx_eth(adap, q, skb, ethpad, lro);
+			else {
+				q->offload_pkts++;
+				/* Preserve the RSS info in csum & priority */
+				skb->csum = rss_hi;
+				skb->priority = rss_lo;
+				ngathered = rx_offload(&adap->tdev, q, skb,
+						       offload_skbs,
+						       ngathered);
+			}
+
+			if (flags & F_RSPD_EOP)
+				clear_rspq_bufstate(q);
+		}
+		--budget_left;
+	}
+
+	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+
+	if (sleeping)
+		check_ring_db(adap, qs, sleeping);
+
+	smp_mb();		/* commit Tx queue .processed updates */
+	if (unlikely(qs->txq_stopped != 0))
+		restart_tx(qs);
+
+	budget -= budget_left;
+	return budget;
+}
+
+static inline int is_pure_response(const struct rsp_desc *r)
+{
+	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+
+	return (n | r->len_cq) == 0;
+}
+
+/**
+ *	napi_rx_handler - the NAPI handler for Rx processing
+ *	@napi: the napi instance
+ *	@budget: how many packets we can process in this round
+ *
+ *	Handler for new data events when using NAPI.
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+	struct adapter *adap = qs->adap;
+	int work_done = process_responses(adap, qs, budget);
+
+	if (likely(work_done < budget)) {
+		napi_complete(napi);
+
+		/*
+		 * Because we don't atomically flush the following
+		 * write it is possible that in very rare cases it can
+		 * reach the device in a way that races with a new
+		 * response being written plus an error interrupt
+		 * causing the NAPI interrupt handler below to return
+		 * unhandled status to the OS.  To protect against
+		 * this would require flushing the write and doing
+		 * both the write and the flush with interrupts off.
+		 * Way too expensive and unjustifiable given the
+		 * rarity of the race.
+		 *
+		 * The race cannot happen at all with MSI-X.
+		 */
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+			     V_NEWTIMER(qs->rspq.next_holdoff) |
+			     V_NEWINDEX(qs->rspq.cidx));
+	}
+	return work_done;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct napi_struct *napi)
+{
+	return test_bit(NAPI_STATE_SCHED, &napi->state);
+}
+
+/**
+ *	process_pure_responses - process pure responses from a response queue
+ *	@adap: the adapter
+ *	@qs: the queue set owning the response queue
+ *	@r: the first pure response to process
+ *
+ *	A simpler version of process_responses() that handles only pure (i.e.,
+ *	non data-carrying) responses.  Such respones are too light-weight to
+ *	justify calling a softirq under NAPI, so we handle them specially in
+ *	the interrupt handler.  The function is called with a pointer to a
+ *	response, which the caller must ensure is a valid pure response.
+ *
+ *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
+				  struct rsp_desc *r)
+{
+	struct sge_rspq *q = &qs->rspq;
+	unsigned int sleeping = 0;
+
+	do {
+		u32 flags = ntohl(r->flags);
+
+		r++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->gen ^= 1;
+			r = q->desc;
+		}
+		prefetch(r);
+
+		if (flags & RSPD_CTRL_MASK) {
+			sleeping |= flags & RSPD_GTS_MASK;
+			handle_rsp_cntrl_info(qs, flags);
+		}
+
+		q->pure_rsps++;
+		if (++q->credits >= (q->size / 4)) {
+			refill_rspq(adap, q, q->credits);
+			q->credits = 0;
+		}
+		if (!is_new_response(r, q))
+			break;
+		dma_rmb();
+	} while (is_pure_response(r));
+
+	if (sleeping)
+		check_ring_db(adap, qs, sleeping);
+
+	smp_mb();		/* commit Tx queue .processed updates */
+	if (unlikely(qs->txq_stopped != 0))
+		restart_tx(qs);
+
+	return is_new_response(r, q);
+}
+
+/**
+ *	handle_responses - decide what to do with new responses in NAPI mode
+ *	@adap: the adapter
+ *	@q: the response queue
+ *
+ *	This is used by the NAPI interrupt handlers to decide what to do with
+ *	new SGE responses.  If there are no new responses it returns -1.  If
+ *	there are new responses and they are pure (i.e., non-data carrying)
+ *	it handles them straight in hard interrupt context as they are very
+ *	cheap and don't deliver any packets.  Finally, if there are any data
+ *	signaling responses it schedules the NAPI handler.  Returns 1 if it
+ *	schedules NAPI, 0 if all new responses were pure.
+ *
+ *	The caller must ascertain NAPI is not already running.
+ */
+static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+{
+	struct sge_qset *qs = rspq_to_qset(q);
+	struct rsp_desc *r = &q->desc[q->cidx];
+
+	if (!is_new_response(r, q))
+		return -1;
+	dma_rmb();
+	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+		return 0;
+	}
+	napi_schedule(&qs->napi);
+	return 1;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
+ * (i.e., response queue serviced in hard interrupt).
+ */
+static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+{
+	struct sge_qset *qs = cookie;
+	struct adapter *adap = qs->adap;
+	struct sge_rspq *q = &qs->rspq;
+
+	spin_lock(&q->lock);
+	if (process_responses(adap, qs, -1) == 0)
+		q->unhandled_irqs++;
+	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+{
+	struct sge_qset *qs = cookie;
+	struct sge_rspq *q = &qs->rspq;
+
+	spin_lock(&q->lock);
+
+	if (handle_responses(qs->adap, q) < 0)
+		q->unhandled_irqs++;
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * The non-NAPI MSI interrupt handler.  This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same MSI vector.  We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi(int irq, void *cookie)
+{
+	int new_packets = 0;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+	spin_lock(&q->lock);
+
+	if (process_responses(adap, &adap->sge.qs[0], -1)) {
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+		new_packets = 1;
+	}
+
+	if (adap->params.nports == 2 &&
+	    process_responses(adap, &adap->sge.qs[1], -1)) {
+		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
+			     V_NEWTIMER(q1->next_holdoff) |
+			     V_NEWINDEX(q1->cidx));
+		new_packets = 1;
+	}
+
+	if (!new_packets && t3_slow_intr_handler(adap) == 0)
+		q->unhandled_irqs++;
+
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+static int rspq_check_napi(struct sge_qset *qs)
+{
+	struct sge_rspq *q = &qs->rspq;
+
+	if (!napi_is_scheduled(&qs->napi) &&
+	    is_new_response(&q->desc[q->cidx], q)) {
+		napi_schedule(&qs->napi);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
+ * by NAPI polling).  Handles data events from SGE response queues as well as
+ * error and other async events as they all use the same MSI vector.  We use
+ * one SGE response queue per port in this mode and protect all response
+ * queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+{
+	int new_packets;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+	spin_lock(&q->lock);
+
+	new_packets = rspq_check_napi(&adap->sge.qs[0]);
+	if (adap->params.nports == 2)
+		new_packets += rspq_check_napi(&adap->sge.qs[1]);
+	if (!new_packets && t3_slow_intr_handler(adap) == 0)
+		q->unhandled_irqs++;
+
+	spin_unlock(&q->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * A helper function that processes responses and issues GTS.
+ */
+static inline int process_responses_gts(struct adapter *adap,
+					struct sge_rspq *rq)
+{
+	int work;
+
+	work = process_responses(adap, rspq_to_qset(rq), -1);
+	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
+		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
+	return work;
+}
+
+/*
+ * The legacy INTx interrupt handler.  This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same interrupt pin.  We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr(int irq, void *cookie)
+{
+	int work_done, w0, w1;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+	spin_lock(&q0->lock);
+
+	w0 = is_new_response(&q0->desc[q0->cidx], q0);
+	w1 = adap->params.nports == 2 &&
+	    is_new_response(&q1->desc[q1->cidx], q1);
+
+	if (likely(w0 | w1)) {
+		t3_write_reg(adap, A_PL_CLI, 0);
+		t3_read_reg(adap, A_PL_CLI);	/* flush */
+
+		if (likely(w0))
+			process_responses_gts(adap, q0);
+
+		if (w1)
+			process_responses_gts(adap, q1);
+
+		work_done = w0 | w1;
+	} else
+		work_done = t3_slow_intr_handler(adap);
+
+	spin_unlock(&q0->lock);
+	return IRQ_RETVAL(work_done != 0);
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin.  We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr(int irq, void *cookie)
+{
+	u32 map;
+	struct adapter *adap = cookie;
+	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+
+	t3_write_reg(adap, A_PL_CLI, 0);
+	map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+	if (unlikely(!map))	/* shared interrupt, most likely */
+		return IRQ_NONE;
+
+	spin_lock(&q0->lock);
+
+	if (unlikely(map & F_ERRINTR))
+		t3_slow_intr_handler(adap);
+
+	if (likely(map & 1))
+		process_responses_gts(adap, q0);
+
+	if (map & 2)
+		process_responses_gts(adap, &adap->sge.qs[1].rspq);
+
+	spin_unlock(&q0->lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin.  We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+{
+	u32 map;
+	struct adapter *adap = cookie;
+	struct sge_qset *qs0 = &adap->sge.qs[0];
+	struct sge_rspq *q0 = &qs0->rspq;
+
+	t3_write_reg(adap, A_PL_CLI, 0);
+	map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+	if (unlikely(!map))	/* shared interrupt, most likely */
+		return IRQ_NONE;
+
+	spin_lock(&q0->lock);
+
+	if (unlikely(map & F_ERRINTR))
+		t3_slow_intr_handler(adap);
+
+	if (likely(map & 1))
+		napi_schedule(&qs0->napi);
+
+	if (map & 2)
+		napi_schedule(&adap->sge.qs[1].napi);
+
+	spin_unlock(&q0->lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	t3_intr_handler - select the top-level interrupt handler
+ *	@adap: the adapter
+ *	@polling: whether using NAPI to service response queues
+ *
+ *	Selects the top-level interrupt handler based on the type of interrupts
+ *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
+ *	response queues.
+ */
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
+{
+	if (adap->flags & USING_MSIX)
+		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
+	if (adap->flags & USING_MSI)
+		return polling ? t3_intr_msi_napi : t3_intr_msi;
+	if (adap->params.rev > 0)
+		return polling ? t3b_intr_napi : t3b_intr;
+	return t3_intr;
+}
+
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+		    F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+		      F_RSPQDISABLED)
+
+/**
+ *	t3_sge_err_intr_handler - SGE async event interrupt handler
+ *	@adapter: the adapter
+ *
+ *	Interrupt handler for SGE asynchronous (non-data) events.
+ */
+void t3_sge_err_intr_handler(struct adapter *adapter)
+{
+	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
+				 ~F_FLEMPTY;
+
+	if (status & SGE_PARERR)
+		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+			 status & SGE_PARERR);
+	if (status & SGE_FRAMINGERR)
+		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+			 status & SGE_FRAMINGERR);
+
+	if (status & F_RSPQCREDITOVERFOW)
+		CH_ALERT(adapter, "SGE response queue credit overflow\n");
+
+	if (status & F_RSPQDISABLED) {
+		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
+
+		CH_ALERT(adapter,
+			 "packet delivered to disabled response queue "
+			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
+	}
+
+	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
+		queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+		queue_work(cxgb3_wq, &adapter->db_full_task);
+
+	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+		queue_work(cxgb3_wq, &adapter->db_empty_task);
+
+	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
+	if (status &  SGE_FATALERR)
+		t3_fatal_err(adapter);
+}
+
+/**
+ *	sge_timer_tx - perform periodic maintenance of an SGE qset
+ *	@data: the SGE queue set to maintain
+ *
+ *	Runs periodically from a timer to perform maintenance of an SGE queue
+ *	set.  It performs two tasks:
+ *
+ *	Cleans up any completed Tx descriptors that may still be pending.
+ *	Normal descriptor cleanup happens when new packets are added to a Tx
+ *	queue so this timer is relatively infrequent and does any cleanup only
+ *	if the Tx queue has not seen any new packets in a while.  We make a
+ *	best effort attempt to reclaim descriptors, in that we don't wait
+ *	around if we cannot get a queue's lock (which most likely is because
+ *	someone else is queueing new packets and so will also handle the clean
+ *	up).  Since control queues use immediate data exclusively we don't
+ *	bother cleaning them up here.
+ *
+ */
+static void sge_timer_tx(unsigned long data)
+{
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct port_info *pi = netdev_priv(qs->netdev);
+	struct adapter *adap = pi->adapter;
+	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
+	unsigned long next_period;
+
+	if (__netif_tx_trylock(qs->tx_q)) {
+                tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+                                                     TX_RECLAIM_TIMER_CHUNK);
+		__netif_tx_unlock(qs->tx_q);
+	}
+
+	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
+		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
+						     TX_RECLAIM_TIMER_CHUNK);
+		spin_unlock(&qs->txq[TXQ_OFLD].lock);
+	}
+
+	next_period = TX_RECLAIM_PERIOD >>
+                      (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+                      TX_RECLAIM_TIMER_CHUNK);
+	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
+}
+
+/**
+ *	sge_timer_rx - perform periodic maintenance of an SGE qset
+ *	@data: the SGE queue set to maintain
+ *
+ *	a) Replenishes Rx queues that have run out due to memory shortage.
+ *	Normally new Rx buffers are added when existing ones are consumed but
+ *	when out of memory a queue can become empty.  We try to add only a few
+ *	buffers here, the queue will be replenished fully as these new buffers
+ *	are used up if memory shortage has subsided.
+ *
+ *	b) Return coalesced response queue credits in case a response queue is
+ *	starved.
+ *
+ */
+static void sge_timer_rx(unsigned long data)
+{
+	spinlock_t *lock;
+	struct sge_qset *qs = (struct sge_qset *)data;
+	struct port_info *pi = netdev_priv(qs->netdev);
+	struct adapter *adap = pi->adapter;
+	u32 status;
+
+	lock = adap->params.rev > 0 ?
+	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
+
+	if (!spin_trylock_irq(lock))
+		goto out;
+
+	if (napi_is_scheduled(&qs->napi))
+		goto unlock;
+
+	if (adap->params.rev < 4) {
+		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
+		if (status & (1 << qs->rspq.cntxt_id)) {
+			qs->rspq.starved++;
+			if (qs->rspq.credits) {
+				qs->rspq.credits--;
+				refill_rspq(adap, &qs->rspq, 1);
+				qs->rspq.restarted++;
+				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+					     1 << qs->rspq.cntxt_id);
+			}
+		}
+	}
+
+	if (qs->fl[0].credits < qs->fl[0].size)
+		__refill_fl(adap, &qs->fl[0]);
+	if (qs->fl[1].credits < qs->fl[1].size)
+		__refill_fl(adap, &qs->fl[1]);
+
+unlock:
+	spin_unlock_irq(lock);
+out:
+	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+}
+
+/**
+ *	t3_update_qset_coalesce - update coalescing settings for a queue set
+ *	@qs: the SGE queue set
+ *	@p: new queue set parameters
+ *
+ *	Update the coalescing settings for an SGE queue set.  Nothing is done
+ *	if the queue set is not initialized yet.
+ */
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+{
+	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+	qs->rspq.polling = p->polling;
+	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
+}
+
+/**
+ *	t3_sge_alloc_qset - initialize an SGE queue set
+ *	@adapter: the adapter
+ *	@id: the queue set id
+ *	@nports: how many Ethernet ports will be using this queue set
+ *	@irq_vec_idx: the IRQ vector index for response queue interrupts
+ *	@p: configuration parameters for this queue set
+ *	@ntxq: number of Tx queues for the queue set
+ *	@netdev: net device associated with this queue set
+ *	@netdevq: net device TX queue associated with this queue set
+ *
+ *	Allocate resources and initialize an SGE queue set.  A queue set
+ *	comprises a response queue, two Rx free-buffer queues, and up to 3
+ *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
+ *	queue, offload queue, and control queue.
+ */
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+		      int irq_vec_idx, const struct qset_params *p,
+		      int ntxq, struct net_device *dev,
+		      struct netdev_queue *netdevq)
+{
+	int i, avail, ret = -ENOMEM;
+	struct sge_qset *q = &adapter->sge.qs[id];
+
+	init_qset_cntxt(q, id);
+	setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
+	setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
+
+	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
+				   sizeof(struct rx_desc),
+				   sizeof(struct rx_sw_desc),
+				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
+	if (!q->fl[0].desc)
+		goto err;
+
+	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
+				   sizeof(struct rx_desc),
+				   sizeof(struct rx_sw_desc),
+				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
+	if (!q->fl[1].desc)
+		goto err;
+
+	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
+				  sizeof(struct rsp_desc), 0,
+				  &q->rspq.phys_addr, NULL);
+	if (!q->rspq.desc)
+		goto err;
+
+	for (i = 0; i < ntxq; ++i) {
+		/*
+		 * The control queue always uses immediate data so does not
+		 * need to keep track of any sk_buffs.
+		 */
+		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
+
+		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
+					    sizeof(struct tx_desc), sz,
+					    &q->txq[i].phys_addr,
+					    &q->txq[i].sdesc);
+		if (!q->txq[i].desc)
+			goto err;
+
+		q->txq[i].gen = 1;
+		q->txq[i].size = p->txq_size[i];
+		spin_lock_init(&q->txq[i].lock);
+		skb_queue_head_init(&q->txq[i].sendq);
+	}
+
+	tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
+		     (unsigned long)q);
+	tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
+		     (unsigned long)q);
+
+	q->fl[0].gen = q->fl[1].gen = 1;
+	q->fl[0].size = p->fl_size;
+	q->fl[1].size = p->jumbo_size;
+
+	q->rspq.gen = 1;
+	q->rspq.size = p->rspq_size;
+	spin_lock_init(&q->rspq.lock);
+	skb_queue_head_init(&q->rspq.rx_queue);
+
+	q->txq[TXQ_ETH].stop_thres = nports *
+	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
+
+#if FL0_PG_CHUNK_SIZE > 0
+	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
+#else
+	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
+#endif
+#if FL1_PG_CHUNK_SIZE > 0
+	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
+#else
+	q->fl[1].buf_size = is_offload(adapter) ?
+		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
+#endif
+
+	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
+	q->fl[0].order = FL0_PG_ORDER;
+	q->fl[1].order = FL1_PG_ORDER;
+	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
+	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
+
+	spin_lock_irq(&adapter->sge.reg_lock);
+
+	/* FL threshold comparison uses < */
+	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
+				   q->rspq.phys_addr, q->rspq.size,
+				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
+	if (ret)
+		goto err_unlock;
+
+	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
+					  q->fl[i].phys_addr, q->fl[i].size,
+					  q->fl[i].buf_size - SGE_PG_RSVD,
+					  p->cong_thres, 1, 0);
+		if (ret)
+			goto err_unlock;
+	}
+
+	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
+				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
+				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
+				 1, 0);
+	if (ret)
+		goto err_unlock;
+
+	if (ntxq > 1) {
+		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
+					 USE_GTS, SGE_CNTXT_OFLD, id,
+					 q->txq[TXQ_OFLD].phys_addr,
+					 q->txq[TXQ_OFLD].size, 0, 1, 0);
+		if (ret)
+			goto err_unlock;
+	}
+
+	if (ntxq > 2) {
+		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
+					 SGE_CNTXT_CTRL, id,
+					 q->txq[TXQ_CTRL].phys_addr,
+					 q->txq[TXQ_CTRL].size,
+					 q->txq[TXQ_CTRL].token, 1, 0);
+		if (ret)
+			goto err_unlock;
+	}
+
+	spin_unlock_irq(&adapter->sge.reg_lock);
+
+	q->adap = adapter;
+	q->netdev = dev;
+	q->tx_q = netdevq;
+	t3_update_qset_coalesce(q, p);
+
+	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
+			  GFP_KERNEL | __GFP_COMP);
+	if (!avail) {
+		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+		goto err;
+	}
+	if (avail < q->fl[0].size)
+		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+			avail);
+
+	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
+			  GFP_KERNEL | __GFP_COMP);
+	if (avail < q->fl[1].size)
+		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+			avail);
+	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
+
+	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
+		     V_NEWTIMER(q->rspq.holdoff_tmr));
+
+	return 0;
+
+err_unlock:
+	spin_unlock_irq(&adapter->sge.reg_lock);
+err:
+	t3_free_qset(adapter, q);
+	return ret;
+}
+
+/**
+ *      t3_start_sge_timers - start SGE timer call backs
+ *      @adap: the adapter
+ *
+ *      Starts each SGE queue set's timer call back
+ */
+void t3_start_sge_timers(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < SGE_QSETS; ++i) {
+		struct sge_qset *q = &adap->sge.qs[i];
+
+	if (q->tx_reclaim_timer.function)
+		mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+	if (q->rx_reclaim_timer.function)
+		mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+	}
+}
+
+/**
+ *	t3_stop_sge_timers - stop SGE timer call backs
+ *	@adap: the adapter
+ *
+ *	Stops each SGE queue set's timer call back
+ */
+void t3_stop_sge_timers(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < SGE_QSETS; ++i) {
+		struct sge_qset *q = &adap->sge.qs[i];
+
+		if (q->tx_reclaim_timer.function)
+			del_timer_sync(&q->tx_reclaim_timer);
+		if (q->rx_reclaim_timer.function)
+			del_timer_sync(&q->rx_reclaim_timer);
+	}
+}
+
+/**
+ *	t3_free_sge_resources - free SGE resources
+ *	@adap: the adapter
+ *
+ *	Frees resources used by the SGE queue sets.
+ */
+void t3_free_sge_resources(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < SGE_QSETS; ++i)
+		t3_free_qset(adap, &adap->sge.qs[i]);
+}
+
+/**
+ *	t3_sge_start - enable SGE
+ *	@adap: the adapter
+ *
+ *	Enables the SGE for DMAs.  This is the last step in starting packet
+ *	transfers.
+ */
+void t3_sge_start(struct adapter *adap)
+{
+	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
+}
+
+/**
+ *	t3_sge_stop - disable SGE operation
+ *	@adap: the adapter
+ *
+ *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
+ *	from error interrupts) or from normal process context.  In the latter
+ *	case it also disables any pending queue restart tasklets.  Note that
+ *	if it is called in interrupt context it cannot disable the restart
+ *	tasklets as it cannot wait, however the tasklets will have no effect
+ *	since the doorbells are disabled and the driver will call this again
+ *	later from process context, at which time the tasklets will be stopped
+ *	if they are still running.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
+	if (!in_interrupt()) {
+		int i;
+
+		for (i = 0; i < SGE_QSETS; ++i) {
+			struct sge_qset *qs = &adap->sge.qs[i];
+
+			tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
+			tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
+		}
+	}
+}
+
+/**
+ *	t3_sge_init - initialize SGE
+ *	@adap: the adapter
+ *	@p: the SGE parameters
+ *
+ *	Performs SGE initialization needed every time after a chip reset.
+ *	We do not initialize any of the queue sets here, instead the driver
+ *	top-level must request those individually.  We also do not enable DMA
+ *	here, that should be done after the queues have been set up.
+ */
+void t3_sge_init(struct adapter *adap, struct sge_params *p)
+{
+	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
+
+	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
+	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
+	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+#if SGE_NUM_GENBITS == 1
+	ctrl |= F_EGRGENCTRL;
+#endif
+	if (adap->params.rev > 0) {
+		if (!(adap->flags & (USING_MSIX | USING_MSI)))
+			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
+	}
+	t3_write_reg(adap, A_SG_CONTROL, ctrl);
+	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
+		     V_LORCQDRBTHRSH(512));
+	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
+	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
+		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
+	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+		     adap->params.rev < T3_REV_C ? 1000 : 500);
+	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
+	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
+	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
+	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
+	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
+}
+
+/**
+ *	t3_sge_prep - one-time SGE initialization
+ *	@adap: the associated adapter
+ *	@p: SGE parameters
+ *
+ *	Performs one-time initialization of SGE SW state.  Includes determining
+ *	defaults for the assorted SGE parameters, which admins can change until
+ *	they are used to initialize the SGE.
+ */
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
+{
+	int i;
+
+	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
+	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	for (i = 0; i < SGE_QSETS; ++i) {
+		struct qset_params *q = p->qset + i;
+
+		q->polling = adap->params.rev > 0;
+		q->coalesce_usecs = 5;
+		q->rspq_size = 1024;
+		q->fl_size = 1024;
+ 		q->jumbo_size = 512;
+		q->txq_size[TXQ_ETH] = 1024;
+		q->txq_size[TXQ_OFLD] = 1024;
+		q->txq_size[TXQ_CTRL] = 256;
+		q->cong_thres = 0;
+	}
+
+	spin_lock_init(&adap->sge.reg_lock);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
new file mode 100644
index 0000000..29b6c80
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
@@ -0,0 +1,255 @@
+/*
+ * This file is automatically generated --- any changes will be lost.
+ */
+
+#ifndef _SGE_DEFS_H
+#define _SGE_DEFS_H
+
+#define S_EC_CREDITS    0
+#define M_EC_CREDITS    0x7FFF
+#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
+#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
+
+#define S_EC_GTS    15
+#define V_EC_GTS(x) ((x) << S_EC_GTS)
+#define F_EC_GTS    V_EC_GTS(1U)
+
+#define S_EC_INDEX    16
+#define M_EC_INDEX    0xFFFF
+#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
+#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
+
+#define S_EC_SIZE    0
+#define M_EC_SIZE    0xFFFF
+#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
+#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
+
+#define S_EC_BASE_LO    16
+#define M_EC_BASE_LO    0xFFFF
+#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
+#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
+
+#define S_EC_BASE_HI    0
+#define M_EC_BASE_HI    0xF
+#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
+#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
+
+#define S_EC_RESPQ    4
+#define M_EC_RESPQ    0x7
+#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
+#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
+
+#define S_EC_TYPE    7
+#define M_EC_TYPE    0x7
+#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
+#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
+
+#define S_EC_GEN    10
+#define V_EC_GEN(x) ((x) << S_EC_GEN)
+#define F_EC_GEN    V_EC_GEN(1U)
+
+#define S_EC_UP_TOKEN    11
+#define M_EC_UP_TOKEN    0xFFFFF
+#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
+#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
+
+#define S_EC_VALID    31
+#define V_EC_VALID(x) ((x) << S_EC_VALID)
+#define F_EC_VALID    V_EC_VALID(1U)
+
+#define S_RQ_MSI_VEC    20
+#define M_RQ_MSI_VEC    0x3F
+#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
+#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
+
+#define S_RQ_INTR_EN    26
+#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
+#define F_RQ_INTR_EN    V_RQ_INTR_EN(1U)
+
+#define S_RQ_GEN    28
+#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
+#define F_RQ_GEN    V_RQ_GEN(1U)
+
+#define S_CQ_INDEX    0
+#define M_CQ_INDEX    0xFFFF
+#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
+#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
+
+#define S_CQ_SIZE    16
+#define M_CQ_SIZE    0xFFFF
+#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
+#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
+
+#define S_CQ_BASE_HI    0
+#define M_CQ_BASE_HI    0xFFFFF
+#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
+#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
+
+#define S_CQ_RSPQ    20
+#define M_CQ_RSPQ    0x3F
+#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
+#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
+
+#define S_CQ_ASYNC_NOTIF    26
+#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
+#define F_CQ_ASYNC_NOTIF    V_CQ_ASYNC_NOTIF(1U)
+
+#define S_CQ_ARMED    27
+#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
+#define F_CQ_ARMED    V_CQ_ARMED(1U)
+
+#define S_CQ_ASYNC_NOTIF_SOL    28
+#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
+#define F_CQ_ASYNC_NOTIF_SOL    V_CQ_ASYNC_NOTIF_SOL(1U)
+
+#define S_CQ_GEN    29
+#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
+#define F_CQ_GEN    V_CQ_GEN(1U)
+
+#define S_CQ_ERR    30
+#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
+#define F_CQ_ERR    V_CQ_ERR(1U)
+
+#define S_CQ_OVERFLOW_MODE    31
+#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
+#define F_CQ_OVERFLOW_MODE    V_CQ_OVERFLOW_MODE(1U)
+
+#define S_CQ_CREDITS    0
+#define M_CQ_CREDITS    0xFFFF
+#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
+#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
+
+#define S_CQ_CREDIT_THRES    16
+#define M_CQ_CREDIT_THRES    0x1FFF
+#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
+#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
+
+#define S_FL_BASE_HI    0
+#define M_FL_BASE_HI    0xFFFFF
+#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
+#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
+
+#define S_FL_INDEX_LO    20
+#define M_FL_INDEX_LO    0xFFF
+#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
+#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
+
+#define S_FL_INDEX_HI    0
+#define M_FL_INDEX_HI    0xF
+#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
+#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
+
+#define S_FL_SIZE    4
+#define M_FL_SIZE    0xFFFF
+#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
+#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
+
+#define S_FL_GEN    20
+#define V_FL_GEN(x) ((x) << S_FL_GEN)
+#define F_FL_GEN    V_FL_GEN(1U)
+
+#define S_FL_ENTRY_SIZE_LO    21
+#define M_FL_ENTRY_SIZE_LO    0x7FF
+#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
+#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
+
+#define S_FL_ENTRY_SIZE_HI    0
+#define M_FL_ENTRY_SIZE_HI    0x1FFFFF
+#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
+#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
+
+#define S_FL_CONG_THRES    21
+#define M_FL_CONG_THRES    0x3FF
+#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
+#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
+
+#define S_FL_GTS    31
+#define V_FL_GTS(x) ((x) << S_FL_GTS)
+#define F_FL_GTS    V_FL_GTS(1U)
+
+#define S_FLD_GEN1    31
+#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
+#define F_FLD_GEN1    V_FLD_GEN1(1U)
+
+#define S_FLD_GEN2    0
+#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
+#define F_FLD_GEN2    V_FLD_GEN2(1U)
+
+#define S_RSPD_TXQ1_CR    0
+#define M_RSPD_TXQ1_CR    0x7F
+#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
+#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
+
+#define S_RSPD_TXQ1_GTS    7
+#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
+#define F_RSPD_TXQ1_GTS    V_RSPD_TXQ1_GTS(1U)
+
+#define S_RSPD_TXQ2_CR    8
+#define M_RSPD_TXQ2_CR    0x7F
+#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
+#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
+
+#define S_RSPD_TXQ2_GTS    15
+#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
+#define F_RSPD_TXQ2_GTS    V_RSPD_TXQ2_GTS(1U)
+
+#define S_RSPD_TXQ0_CR    16
+#define M_RSPD_TXQ0_CR    0x7F
+#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
+#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
+
+#define S_RSPD_TXQ0_GTS    23
+#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
+#define F_RSPD_TXQ0_GTS    V_RSPD_TXQ0_GTS(1U)
+
+#define S_RSPD_EOP    24
+#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
+#define F_RSPD_EOP    V_RSPD_EOP(1U)
+
+#define S_RSPD_SOP    25
+#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
+#define F_RSPD_SOP    V_RSPD_SOP(1U)
+
+#define S_RSPD_ASYNC_NOTIF    26
+#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
+#define F_RSPD_ASYNC_NOTIF    V_RSPD_ASYNC_NOTIF(1U)
+
+#define S_RSPD_FL0_GTS    27
+#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
+#define F_RSPD_FL0_GTS    V_RSPD_FL0_GTS(1U)
+
+#define S_RSPD_FL1_GTS    28
+#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
+#define F_RSPD_FL1_GTS    V_RSPD_FL1_GTS(1U)
+
+#define S_RSPD_IMM_DATA_VALID    29
+#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
+#define F_RSPD_IMM_DATA_VALID    V_RSPD_IMM_DATA_VALID(1U)
+
+#define S_RSPD_OFFLOAD    30
+#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
+#define F_RSPD_OFFLOAD    V_RSPD_OFFLOAD(1U)
+
+#define S_RSPD_GEN1    31
+#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
+#define F_RSPD_GEN1    V_RSPD_GEN1(1U)
+
+#define S_RSPD_LEN    0
+#define M_RSPD_LEN    0x7FFFFFFF
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_FLQ    31
+#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
+#define F_RSPD_FLQ    V_RSPD_FLQ(1U)
+
+#define S_RSPD_GEN2    0
+#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
+#define F_RSPD_GEN2    V_RSPD_GEN2(1U)
+
+#define S_RSPD_INR_VEC    1
+#define M_RSPD_INR_VEC    0x7F
+#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
+#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
+
+#endif				/* _SGE_DEFS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
new file mode 100644
index 0000000..852c399
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
@@ -0,0 +1,1495 @@
+/*
+ * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef T3_CPL_H
+#define T3_CPL_H
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+# include <asm/byteorder.h>
+#endif
+
+enum CPL_opcode {
+	CPL_PASS_OPEN_REQ = 0x1,
+	CPL_PASS_ACCEPT_RPL = 0x2,
+	CPL_ACT_OPEN_REQ = 0x3,
+	CPL_SET_TCB = 0x4,
+	CPL_SET_TCB_FIELD = 0x5,
+	CPL_GET_TCB = 0x6,
+	CPL_PCMD = 0x7,
+	CPL_CLOSE_CON_REQ = 0x8,
+	CPL_CLOSE_LISTSRV_REQ = 0x9,
+	CPL_ABORT_REQ = 0xA,
+	CPL_ABORT_RPL = 0xB,
+	CPL_TX_DATA = 0xC,
+	CPL_RX_DATA_ACK = 0xD,
+	CPL_TX_PKT = 0xE,
+	CPL_RTE_DELETE_REQ = 0xF,
+	CPL_RTE_WRITE_REQ = 0x10,
+	CPL_RTE_READ_REQ = 0x11,
+	CPL_L2T_WRITE_REQ = 0x12,
+	CPL_L2T_READ_REQ = 0x13,
+	CPL_SMT_WRITE_REQ = 0x14,
+	CPL_SMT_READ_REQ = 0x15,
+	CPL_TX_PKT_LSO = 0x16,
+	CPL_PCMD_READ = 0x17,
+	CPL_BARRIER = 0x18,
+	CPL_TID_RELEASE = 0x1A,
+
+	CPL_CLOSE_LISTSRV_RPL = 0x20,
+	CPL_ERROR = 0x21,
+	CPL_GET_TCB_RPL = 0x22,
+	CPL_L2T_WRITE_RPL = 0x23,
+	CPL_PCMD_READ_RPL = 0x24,
+	CPL_PCMD_RPL = 0x25,
+	CPL_PEER_CLOSE = 0x26,
+	CPL_RTE_DELETE_RPL = 0x27,
+	CPL_RTE_WRITE_RPL = 0x28,
+	CPL_RX_DDP_COMPLETE = 0x29,
+	CPL_RX_PHYS_ADDR = 0x2A,
+	CPL_RX_PKT = 0x2B,
+	CPL_RX_URG_NOTIFY = 0x2C,
+	CPL_SET_TCB_RPL = 0x2D,
+	CPL_SMT_WRITE_RPL = 0x2E,
+	CPL_TX_DATA_ACK = 0x2F,
+
+	CPL_ABORT_REQ_RSS = 0x30,
+	CPL_ABORT_RPL_RSS = 0x31,
+	CPL_CLOSE_CON_RPL = 0x32,
+	CPL_ISCSI_HDR = 0x33,
+	CPL_L2T_READ_RPL = 0x34,
+	CPL_RDMA_CQE = 0x35,
+	CPL_RDMA_CQE_READ_RSP = 0x36,
+	CPL_RDMA_CQE_ERR = 0x37,
+	CPL_RTE_READ_RPL = 0x38,
+	CPL_RX_DATA = 0x39,
+
+	CPL_ACT_OPEN_RPL = 0x40,
+	CPL_PASS_OPEN_RPL = 0x41,
+	CPL_RX_DATA_DDP = 0x42,
+	CPL_SMT_READ_RPL = 0x43,
+
+	CPL_ACT_ESTABLISH = 0x50,
+	CPL_PASS_ESTABLISH = 0x51,
+
+	CPL_PASS_ACCEPT_REQ = 0x70,
+
+	CPL_ASYNC_NOTIF = 0x80,	/* fake opcode for async notifications */
+
+	CPL_TX_DMA_ACK = 0xA0,
+	CPL_RDMA_READ_REQ = 0xA1,
+	CPL_RDMA_TERMINATE = 0xA2,
+	CPL_TRACE_PKT = 0xA3,
+	CPL_RDMA_EC_STATUS = 0xA5,
+
+	NUM_CPL_CMDS		/* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+	CPL_ERR_NONE = 0,
+	CPL_ERR_TCAM_PARITY = 1,
+	CPL_ERR_TCAM_FULL = 3,
+	CPL_ERR_CONN_RESET = 20,
+	CPL_ERR_CONN_EXIST = 22,
+	CPL_ERR_ARP_MISS = 23,
+	CPL_ERR_BAD_SYN = 24,
+	CPL_ERR_CONN_TIMEDOUT = 30,
+	CPL_ERR_XMIT_TIMEDOUT = 31,
+	CPL_ERR_PERSIST_TIMEDOUT = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_RTX_NEG_ADVICE = 35,
+	CPL_ERR_PERSIST_NEG_ADVICE = 36,
+	CPL_ERR_ABORT_FAILED = 42,
+	CPL_ERR_GENERAL = 99
+};
+
+enum {
+	CPL_CONN_POLICY_AUTO = 0,
+	CPL_CONN_POLICY_ASK = 1,
+	CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+	ULP_MODE_NONE = 0,
+	ULP_MODE_ISCSI = 2,
+	ULP_MODE_RDMA = 4,
+	ULP_MODE_TCPDDP = 5
+};
+
+enum {
+	ULP_CRC_HEADER = 1 << 0,
+	ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+	CPL_PASS_OPEN_ACCEPT,
+	CPL_PASS_OPEN_REJECT
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+	CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum {				/* TX_PKT_LSO ethernet types */
+	CPL_ETH_II,
+	CPL_ETH_II_VLAN,
+	CPL_ETH_802_3,
+	CPL_ETH_802_3_VLAN
+};
+
+enum {				/* TCP congestion control algorithms */
+	CONG_ALG_RENO,
+	CONG_ALG_TAHOE,
+	CONG_ALG_NEWRENO,
+	CONG_ALG_HIGHSPEED
+};
+
+enum {			/* RSS hash type */
+	RSS_HASH_NONE = 0,
+	RSS_HASH_2_TUPLE = 1,
+	RSS_HASH_4_TUPLE = 2,
+	RSS_HASH_TCPV6 = 3
+};
+
+union opcode_tid {
+	__be32 opcode_tid;
+	__u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x)    ((x) & 0xFFFFFF)
+
+#define S_QNUM 0
+#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
+
+#define S_HASHTYPE 22
+#define M_HASHTYPE 0x3
+#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+	__be16 mss;
+	__u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:5;
+	__u8 ecn:1;
+	__u8 sack:1;
+	__u8 tstamp:1;
+#else
+	__u8 tstamp:1;
+	__u8 sack:1;
+	__u8 ecn:1;
+	 __u8:5;
+#endif
+};
+
+struct rss_header {
+	__u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 cpu_idx:6;
+	__u8 hash_type:2;
+#else
+	__u8 hash_type:2;
+	__u8 cpu_idx:6;
+#endif
+	__be16 cq_idx;
+	__be32 rss_hash_val;
+};
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+};
+
+/* wr_hi fields */
+#define S_WR_SGE_CREDITS    0
+#define M_WR_SGE_CREDITS    0xFF
+#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
+#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
+
+#define S_WR_SGLSFLT    8
+#define M_WR_SGLSFLT    0xFF
+#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
+#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
+
+#define S_WR_BCNTLFLT    16
+#define M_WR_BCNTLFLT    0xF
+#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
+#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
+
+#define S_WR_DATATYPE    20
+#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
+#define F_WR_DATATYPE    V_WR_DATATYPE(1U)
+
+#define S_WR_COMPL    21
+#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
+#define F_WR_COMPL    V_WR_COMPL(1U)
+
+#define S_WR_EOP    22
+#define V_WR_EOP(x) ((x) << S_WR_EOP)
+#define F_WR_EOP    V_WR_EOP(1U)
+
+#define S_WR_SOP    23
+#define V_WR_SOP(x) ((x) << S_WR_SOP)
+#define F_WR_SOP    V_WR_SOP(1U)
+
+#define S_WR_OP    24
+#define M_WR_OP    0xFF
+#define V_WR_OP(x) ((x) << S_WR_OP)
+#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+/* wr_lo fields */
+#define S_WR_LEN    0
+#define M_WR_LEN    0xFF
+#define V_WR_LEN(x) ((x) << S_WR_LEN)
+#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
+
+#define S_WR_TID    8
+#define M_WR_TID    0xFFFFF
+#define V_WR_TID(x) ((x) << S_WR_TID)
+#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
+
+#define S_WR_CR_FLUSH    30
+#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
+#define F_WR_CR_FLUSH    V_WR_CR_FLUSH(1U)
+
+#define S_WR_GEN    31
+#define V_WR_GEN(x) ((x) << S_WR_GEN)
+#define F_WR_GEN    V_WR_GEN(1U)
+
+# define WR_HDR struct work_request_hdr wr
+# define RSS_HDR
+#else
+# define WR_HDR
+# define RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 lower-half fields */
+#define S_CPL_STATUS    0
+#define M_CPL_STATUS    0xFF
+#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
+#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
+
+#define S_INJECT_TIMER    6
+#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define F_INJECT_TIMER    V_INJECT_TIMER(1U)
+
+#define S_NO_OFFLOAD    7
+#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
+#define F_NO_OFFLOAD    V_NO_OFFLOAD(1U)
+
+#define S_ULP_MODE    8
+#define M_ULP_MODE    0xF
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define S_RCV_BUFSIZ    12
+#define M_RCV_BUFSIZ    0x3FFF
+#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define S_TOS    26
+#define M_TOS    0x3F
+#define V_TOS(x) ((x) << S_TOS)
+#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
+
+/* option 0 upper-half fields */
+#define S_DELACK    0
+#define V_DELACK(x) ((x) << S_DELACK)
+#define F_DELACK    V_DELACK(1U)
+
+#define S_NO_CONG    1
+#define V_NO_CONG(x) ((x) << S_NO_CONG)
+#define F_NO_CONG    V_NO_CONG(1U)
+
+#define S_SRC_MAC_SEL    2
+#define M_SRC_MAC_SEL    0x3
+#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
+#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
+
+#define S_L2T_IDX    4
+#define M_L2T_IDX    0x7FF
+#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
+#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define S_TX_CHANNEL    15
+#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
+#define F_TX_CHANNEL    V_TX_CHANNEL(1U)
+
+#define S_TCAM_BYPASS    16
+#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS    V_TCAM_BYPASS(1U)
+
+#define S_NAGLE    17
+#define V_NAGLE(x) ((x) << S_NAGLE)
+#define F_NAGLE    V_NAGLE(1U)
+
+#define S_WND_SCALE    18
+#define M_WND_SCALE    0xF
+#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
+#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define S_KEEP_ALIVE    22
+#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
+#define F_KEEP_ALIVE    V_KEEP_ALIVE(1U)
+
+#define S_MAX_RETRANS    23
+#define M_MAX_RETRANS    0xF
+#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
+#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
+
+#define S_MAX_RETRANS_OVERRIDE    27
+#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
+#define F_MAX_RETRANS_OVERRIDE    V_MAX_RETRANS_OVERRIDE(1U)
+
+#define S_MSS_IDX    28
+#define M_MSS_IDX    0xF
+#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define S_RSS_ENABLE    0
+#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
+#define F_RSS_ENABLE    V_RSS_ENABLE(1U)
+
+#define S_RSS_MASK_LEN    1
+#define M_RSS_MASK_LEN    0x7
+#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
+#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
+
+#define S_CPU_IDX    4
+#define M_CPU_IDX    0x3F
+#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
+#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
+
+#define S_MAC_MATCH_VALID    18
+#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
+#define F_MAC_MATCH_VALID    V_MAC_MATCH_VALID(1U)
+
+#define S_CONN_POLICY    19
+#define M_CONN_POLICY    0x3
+#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+#define S_SYN_DEFENSE    21
+#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define F_SYN_DEFENSE    V_SYN_DEFENSE(1U)
+
+#define S_VLAN_PRI    22
+#define M_VLAN_PRI    0x3
+#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
+#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
+
+#define S_VLAN_PRI_VALID    24
+#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
+#define F_VLAN_PRI_VALID    V_VLAN_PRI_VALID(1U)
+
+#define S_PKT_TYPE    25
+#define M_PKT_TYPE    0x3
+#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
+#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
+
+#define S_MAC_MATCH    27
+#define M_MAC_MATCH    0x1F
+#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
+#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
+
+/* option 2 fields */
+#define S_CPU_INDEX    0
+#define M_CPU_INDEX    0x7F
+#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
+#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
+
+#define S_CPU_INDEX_VALID    7
+#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
+#define F_CPU_INDEX_VALID    V_CPU_INDEX_VALID(1U)
+
+#define S_RX_COALESCE    8
+#define M_RX_COALESCE    0x3
+#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define S_RX_COALESCE_VALID    10
+#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define F_RX_COALESCE_VALID    V_RX_COALESCE_VALID(1U)
+
+#define S_CONG_CONTROL_FLAVOR    11
+#define M_CONG_CONTROL_FLAVOR    0x3
+#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
+#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
+
+#define S_PACING_FLAVOR    13
+#define M_PACING_FLAVOR    0x3
+#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
+#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
+
+#define S_FLAVORS_VALID    15
+#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
+#define F_FLAVORS_VALID    V_FLAVORS_VALID(1U)
+
+#define S_RX_FC_DISABLE    16
+#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define F_RX_FC_DISABLE    V_RX_FC_DISABLE(1U)
+
+#define S_RX_FC_VALID    17
+#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define F_RX_FC_VALID    V_RX_FC_VALID(1U)
+
+struct cpl_pass_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 opt0h;
+	__be32 opt0l;
+	__be32 peer_netmask;
+	__be32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__u8 resvd[7];
+	__u8 status;
+};
+
+struct cpl_pass_establish {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 tos_tid;
+	__be16 l2t_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_tid fields */
+#define S_PASS_OPEN_TID    0
+#define M_PASS_OPEN_TID    0xFFFFFF
+#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define S_PASS_OPEN_TOS    24
+#define M_PASS_OPEN_TOS    0xFF
+#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.l2t_idx fields */
+#define S_L2T_IDX16    5
+#define M_L2T_IDX16    0x7FF
+#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
+#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
+
+/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
+#define G_TCPOPT_WSCALE_OK(x)  (((x) >> 5) & 1)
+#define G_TCPOPT_SACK(x)       (((x) >> 6) & 1)
+#define G_TCPOPT_TSTAMP(x)     (((x) >> 7) & 1)
+#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define G_TCPOPT_MSS(x)        (((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 tos_tid;
+	struct tcp_options tcp_options;
+	__u8 dst_mac[6];
+	__be16 vlan_tag;
+	__u8 src_mac[6];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:3;
+	__u8 addr_idx:3;
+	__u8 port_idx:1;
+	__u8 exact_match:1;
+#else
+	__u8 exact_match:1;
+	__u8 port_idx:1;
+	__u8 addr_idx:3;
+	 __u8:3;
+#endif
+	__u8 rsvd;
+	__be32 rcv_isn;
+	__be32 rsvd2;
+};
+
+struct cpl_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+	__be32 rsvd;
+	__be32 peer_ip;
+	__be32 opt0h;
+	__be32 opt0l_status;
+};
+
+struct cpl_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 opt0h;
+	__be32 opt0l;
+	__be32 params;
+	__be32 opt2;
+};
+
+/* cpl_act_open_req.params fields */
+#define S_AOPEN_VLAN_PRI    9
+#define M_AOPEN_VLAN_PRI    0x3
+#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
+#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
+
+#define S_AOPEN_VLAN_PRI_VALID    11
+#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
+#define F_AOPEN_VLAN_PRI_VALID    V_AOPEN_VLAN_PRI_VALID(1U)
+
+#define S_AOPEN_PKT_TYPE    12
+#define M_AOPEN_PKT_TYPE    0x3
+#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
+#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
+
+#define S_AOPEN_MAC_MATCH    14
+#define M_AOPEN_MAC_MATCH    0x1F
+#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
+#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
+
+#define S_AOPEN_MAC_MATCH_VALID    19
+#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
+#define F_AOPEN_MAC_MATCH_VALID    V_AOPEN_MAC_MATCH_VALID(1U)
+
+#define S_AOPEN_IFF_VLAN    20
+#define M_AOPEN_IFF_VLAN    0xFFF
+#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
+#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
+
+struct cpl_act_open_rpl {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 atid;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_act_establish {
+	RSS_HDR union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be32 tos_tid;
+	__be16 l2t_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 cpuno;
+	__be16 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd;
+	__u8 status;
+	__be16 len;
+};
+
+struct cpl_set_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 reply;
+	__u8 cpu_idx;
+	__be16 len;
+};
+
+/* cpl_set_tcb.reply fields */
+#define S_NO_REPLY    7
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY    V_NO_REPLY(1U)
+
+struct cpl_set_tcb_field {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 reply;
+	__u8 cpu_idx;
+	__be16 word;
+	__be64 mask;
+	__be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_pcmd {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd[3];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 src:1;
+	__u8 bundle:1;
+	__u8 channel:1;
+	 __u8:5;
+#else
+	 __u8:5;
+	__u8 channel:1;
+	__u8 bundle:1;
+	__u8 src:1;
+#endif
+	__be32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_reply {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd;
+	__be16 len;
+};
+
+struct cpl_close_con_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+	__be32 snd_nxt;
+	__be32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd0;
+	__u8 cpu_idx;
+	__be16 rsvd1;
+};
+
+struct cpl_close_listserv_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct cpl_abort_req_rss {
+	RSS_HDR union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 status;
+	__u8 rsvd2[6];
+};
+
+struct cpl_abort_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 cmd;
+	__u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+	RSS_HDR union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 status;
+	__u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	__u8 rsvd1;
+	__u8 cmd;
+	__u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+	RSS_HDR union opcode_tid ot;
+	__be32 rcv_nxt;
+};
+
+struct tx_data_wr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+	__be32 len;
+	__be32 flags;
+	__be32 sndseq;
+	__be32 param;
+};
+
+/* tx_data_wr.flags fields */
+#define S_TX_ACK_PAGES	21
+#define M_TX_ACK_PAGES	0x7
+#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
+#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
+
+/* tx_data_wr.param fields */
+#define S_TX_PORT    0
+#define M_TX_PORT    0x7
+#define V_TX_PORT(x) ((x) << S_TX_PORT)
+#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define S_TX_MSS    4
+#define M_TX_MSS    0xF
+#define V_TX_MSS(x) ((x) << S_TX_MSS)
+#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define S_TX_QOS    8
+#define M_TX_QOS    0xFF
+#define V_TX_QOS(x) ((x) << S_TX_QOS)
+#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define S_TX_SNDBUF 16
+#define M_TX_SNDBUF 0xFFFF
+#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+	union opcode_tid ot;
+	__be32 len;
+	__be32 rsvd;
+	__be16 urg;
+	__be16 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define S_TX_ULP_SUBMODE    6
+#define M_TX_ULP_SUBMODE    0xF
+#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define S_TX_ULP_MODE    10
+#define M_TX_ULP_MODE    0xF
+#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define S_TX_SHOVE    14
+#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define F_TX_SHOVE    V_TX_SHOVE(1U)
+
+#define S_TX_MORE    15
+#define V_TX_MORE(x) ((x) << S_TX_MORE)
+#define F_TX_MORE    V_TX_MORE(1U)
+
+/* additional tx_data_wr.flags fields */
+#define S_TX_CPU_IDX    0
+#define M_TX_CPU_IDX    0x3F
+#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define S_TX_URG    16
+#define V_TX_URG(x) ((x) << S_TX_URG)
+#define F_TX_URG    V_TX_URG(1U)
+
+#define S_TX_CLOSE    17
+#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define F_TX_CLOSE    V_TX_CLOSE(1U)
+
+#define S_TX_INIT    18
+#define V_TX_INIT(x) ((x) << S_TX_INIT)
+#define F_TX_INIT    V_TX_INIT(1U)
+
+#define S_TX_IMM_ACK    19
+#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define F_TX_IMM_ACK    V_TX_IMM_ACK(1U)
+
+#define S_TX_IMM_DMA    20
+#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define F_TX_IMM_DMA    V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+	RSS_HDR union opcode_tid ot;
+	__be32 ack_seq;
+};
+
+struct cpl_wr_ack {
+	RSS_HDR union opcode_tid ot;
+	__be16 credits;
+	__be16 rsvd;
+	__be32 snd_nxt;
+	__be32 snd_una;
+};
+
+struct cpl_rdma_ec_status {
+	RSS_HDR union opcode_tid ot;
+	__u8 rsvd[3];
+	__u8 status;
+};
+
+struct mngt_pktsched_wr {
+	__be32 wr_hi;
+	__be32 wr_lo;
+	__u8 mngt_opcode;
+	__u8 rsvd[7];
+	__u8 sched;
+	__u8 idx;
+	__u8 min;
+	__u8 max;
+	__u8 binding;
+	__u8 rsvd1[3];
+};
+
+struct cpl_iscsi_hdr {
+	RSS_HDR union opcode_tid ot;
+	__be16 pdu_len_ddp;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	__u8 rsvd;
+	__u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define S_ISCSI_PDU_LEN    0
+#define M_ISCSI_PDU_LEN    0x7FFF
+#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define S_ISCSI_DDP    15
+#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define F_ISCSI_DDP    V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+	RSS_HDR union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 dack_mode:2;
+	__u8 psh:1;
+	__u8 heartbeat:1;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 heartbeat:1;
+	__u8 psh:1;
+	__u8 dack_mode:2;
+#endif
+	__u8 status;
+};
+
+struct cpl_rx_data_ack {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define S_RX_CREDITS    0
+#define M_RX_CREDITS    0x7FFFFFF
+#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define S_RX_MODULATE    27
+#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
+#define F_RX_MODULATE    V_RX_MODULATE(1U)
+
+#define S_RX_FORCE_ACK    28
+#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define F_RX_FORCE_ACK    V_RX_FORCE_ACK(1U)
+
+#define S_RX_DACK_MODE    29
+#define M_RX_DACK_MODE    0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE    31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_urg_notify {
+	RSS_HDR union opcode_tid ot;
+	__be32 seq;
+};
+
+struct cpl_rx_ddp_complete {
+	RSS_HDR union opcode_tid ot;
+	__be32 ddp_report;
+};
+
+struct cpl_rx_data_ddp {
+	RSS_HDR union opcode_tid ot;
+	__be16 urg;
+	__be16 len;
+	__be32 seq;
+	union {
+		__be32 nxt_seq;
+		__be32 ddp_report;
+	};
+	__be32 ulp_crc;
+	__be32 ddpvld_status;
+};
+
+/* cpl_rx_data_ddp.ddpvld_status fields */
+#define S_DDP_STATUS    0
+#define M_DDP_STATUS    0xFF
+#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
+#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
+
+#define S_DDP_VALID    15
+#define M_DDP_VALID    0x1FFFF
+#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define S_DDP_PPOD_MISMATCH    15
+#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define F_DDP_PPOD_MISMATCH    V_DDP_PPOD_MISMATCH(1U)
+
+#define S_DDP_PDU    16
+#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define F_DDP_PDU    V_DDP_PDU(1U)
+
+#define S_DDP_LLIMIT_ERR    17
+#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define F_DDP_LLIMIT_ERR    V_DDP_LLIMIT_ERR(1U)
+
+#define S_DDP_PPOD_PARITY_ERR    18
+#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define F_DDP_PPOD_PARITY_ERR    V_DDP_PPOD_PARITY_ERR(1U)
+
+#define S_DDP_PADDING_ERR    19
+#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define F_DDP_PADDING_ERR    V_DDP_PADDING_ERR(1U)
+
+#define S_DDP_HDRCRC_ERR    20
+#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define F_DDP_HDRCRC_ERR    V_DDP_HDRCRC_ERR(1U)
+
+#define S_DDP_DATACRC_ERR    21
+#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define F_DDP_DATACRC_ERR    V_DDP_DATACRC_ERR(1U)
+
+#define S_DDP_INVALID_TAG    22
+#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define F_DDP_INVALID_TAG    V_DDP_INVALID_TAG(1U)
+
+#define S_DDP_ULIMIT_ERR    23
+#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define F_DDP_ULIMIT_ERR    V_DDP_ULIMIT_ERR(1U)
+
+#define S_DDP_OFFSET_ERR    24
+#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define F_DDP_OFFSET_ERR    V_DDP_OFFSET_ERR(1U)
+
+#define S_DDP_COLOR_ERR    25
+#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define F_DDP_COLOR_ERR    V_DDP_COLOR_ERR(1U)
+
+#define S_DDP_TID_MISMATCH    26
+#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define F_DDP_TID_MISMATCH    V_DDP_TID_MISMATCH(1U)
+
+#define S_DDP_INVALID_PPOD    27
+#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define F_DDP_INVALID_PPOD    V_DDP_INVALID_PPOD(1U)
+
+#define S_DDP_ULP_MODE    28
+#define M_DDP_ULP_MODE    0xF
+#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_data_ddp.ddp_report fields */
+#define S_DDP_OFFSET    0
+#define M_DDP_OFFSET    0x3FFFFF
+#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define S_DDP_URG    24
+#define V_DDP_URG(x) ((x) << S_DDP_URG)
+#define F_DDP_URG    V_DDP_URG(1U)
+
+#define S_DDP_PSH    25
+#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define F_DDP_PSH    V_DDP_PSH(1U)
+
+#define S_DDP_BUF_COMPLETE    26
+#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define F_DDP_BUF_COMPLETE    V_DDP_BUF_COMPLETE(1U)
+
+#define S_DDP_BUF_TIMED_OUT    27
+#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define F_DDP_BUF_TIMED_OUT    V_DDP_BUF_TIMED_OUT(1U)
+
+#define S_DDP_BUF_IDX    28
+#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define F_DDP_BUF_IDX    V_DDP_BUF_IDX(1U)
+
+struct cpl_tx_pkt {
+	WR_HDR;
+	__be32 cntrl;
+	__be32 len;
+};
+
+struct cpl_tx_pkt_lso {
+	WR_HDR;
+	__be32 cntrl;
+	__be32 len;
+
+	__be32 rsvd;
+	__be32 lso_info;
+};
+
+/* cpl_tx_pkt*.cntrl fields */
+#define S_TXPKT_VLAN    0
+#define M_TXPKT_VLAN    0xFFFF
+#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_INTF    16
+#define M_TXPKT_INTF    0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_IPCSUM_DIS    20
+#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS    V_TXPKT_IPCSUM_DIS(1U)
+
+#define S_TXPKT_L4CSUM_DIS    21
+#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS    V_TXPKT_L4CSUM_DIS(1U)
+
+#define S_TXPKT_VLAN_VLD    22
+#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD    V_TXPKT_VLAN_VLD(1U)
+
+#define S_TXPKT_LOOPBACK    23
+#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define F_TXPKT_LOOPBACK    V_TXPKT_LOOPBACK(1U)
+
+#define S_TXPKT_OPCODE    24
+#define M_TXPKT_OPCODE    0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_lso.lso_info fields */
+#define S_LSO_MSS    0
+#define M_LSO_MSS    0x3FFF
+#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define S_LSO_ETH_TYPE    14
+#define M_LSO_ETH_TYPE    0x3
+#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
+#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
+
+#define S_LSO_TCPHDR_WORDS    16
+#define M_LSO_TCPHDR_WORDS    0xF
+#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
+#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
+
+#define S_LSO_IPHDR_WORDS    20
+#define M_LSO_IPHDR_WORDS    0xF
+#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
+#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
+
+#define S_LSO_IPV6    24
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6    V_LSO_IPV6(1U)
+
+struct cpl_trace_pkt {
+#ifdef CHELSIO_FW
+	__u8 rss_opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 err:1;
+	 __u8:7;
+#else
+	 __u8:7;
+	__u8 err:1;
+#endif
+	__u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 qid:4;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 qid:4;
+#endif
+	__be32 tstamp;
+#endif				/* CHELSIO_FW */
+
+	__u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 iff:4;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 iff:4;
+#endif
+	__u8 rsvd[4];
+	__be16 len;
+};
+
+struct cpl_rx_pkt {
+	RSS_HDR __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 iff:4;
+	__u8 csum_valid:1;
+	__u8 ipmi_pkt:1;
+	__u8 vlan_valid:1;
+	__u8 fragment:1;
+#else
+	__u8 fragment:1;
+	__u8 vlan_valid:1;
+	__u8 ipmi_pkt:1;
+	__u8 csum_valid:1;
+	__u8 iff:4;
+#endif
+	__be16 csum;
+	__be16 vlan;
+	__be16 len;
+};
+
+struct cpl_l2t_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+	__u8 rsvd[2];
+	__u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_IDX    0
+#define M_L2T_W_IDX    0x7FF
+#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
+#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
+
+#define S_L2T_W_VLAN    11
+#define M_L2T_W_VLAN    0xFFF
+#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
+#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
+
+#define S_L2T_W_IFF    23
+#define M_L2T_W_IFF    0xF
+#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
+#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
+
+#define S_L2T_W_PRIO    27
+#define M_L2T_W_PRIO    0x7
+#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
+#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
+
+struct cpl_l2t_write_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+	RSS_HDR union opcode_tid ot;
+	__be32 params;
+	__u8 rsvd[2];
+	__u8 dst_mac[6];
+};
+
+/* cpl_l2t_read_rpl.params fields */
+#define S_L2T_R_PRIO    0
+#define M_L2T_R_PRIO    0x7
+#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
+#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
+
+#define S_L2T_R_VLAN    8
+#define M_L2T_R_VLAN    0xFFF
+#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
+#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
+
+#define S_L2T_R_IFF    20
+#define M_L2T_R_IFF    0xF
+#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
+#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
+
+#define S_L2T_STATUS    24
+#define M_L2T_STATUS    0xFF
+#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
+#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
+
+struct cpl_smt_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 mtu_idx:4;
+	__u8 iff:4;
+#else
+	__u8 iff:4;
+	__u8 mtu_idx:4;
+#endif
+	__be16 rsvd2;
+	__be16 rsvd3;
+	__u8 src_mac1[6];
+	__be16 rsvd4;
+	__u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:4;
+	__u8 iff:4;
+#else
+	__u8 iff:4;
+	 __u8:4;
+#endif
+	__be16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 mtu_idx:4;
+	 __u8:4;
+#else
+	 __u8:4;
+	__u8 mtu_idx:4;
+#endif
+	__be16 rsvd2;
+	__be16 rsvd3;
+	__u8 src_mac1[6];
+	__be16 rsvd4;
+	__u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
+#define S_RTE_REQ_LUT_IX    8
+#define M_RTE_REQ_LUT_IX    0x7FF
+#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define S_RTE_REQ_LUT_BASE    19
+#define M_RTE_REQ_LUT_BASE    0x7FF
+#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define S_RTE_READ_REQ_SELECT    31
+#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define F_RTE_READ_REQ_SELECT    V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:6;
+	__u8 write_tcam:1;
+	__u8 write_l2t_lut:1;
+#else
+	__u8 write_l2t_lut:1;
+	__u8 write_tcam:1;
+	 __u8:6;
+#endif
+	__u8 rsvd[3];
+	__be32 lut_params;
+	__be16 rsvd2;
+	__be16 l2t_idx;
+	__be32 netmask;
+	__be32 faddr;
+};
+
+/* cpl_rte_write_req.lut_params fields */
+#define S_RTE_WRITE_REQ_LUT_IX    10
+#define M_RTE_WRITE_REQ_LUT_IX    0x7FF
+#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
+#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
+
+#define S_RTE_WRITE_REQ_LUT_BASE    21
+#define M_RTE_WRITE_REQ_LUT_BASE    0x7FF
+#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
+#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+};
+
+struct cpl_rte_read_rpl {
+	RSS_HDR union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd0;
+	__be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	 __u8:7;
+	__u8 select:1;
+#else
+	__u8 select:1;
+	 __u8:7;
+#endif
+	__u8 rsvd2[3];
+	__be32 addr;
+};
+
+struct cpl_tid_release {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_barrier {
+	WR_HDR;
+	__u8 opcode;
+	__u8 rsvd[7];
+};
+
+struct cpl_rdma_read_req {
+	__u8 opcode;
+	__u8 rsvd[15];
+};
+
+struct cpl_rdma_terminate {
+#ifdef CHELSIO_FW
+	__u8 opcode;
+	__u8 rsvd[2];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 rspq:3;
+	 __u8:5;
+#else
+	 __u8:5;
+	__u8 rspq:3;
+#endif
+	__be32 tid_len;
+#endif
+	__be32 msn;
+	__be32 mo;
+	__u8 data[0];
+};
+
+/* cpl_rdma_terminate.tid_len fields */
+#define S_FLIT_CNT    0
+#define M_FLIT_CNT    0xFF
+#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
+#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
+
+#define S_TERM_TID    8
+#define M_TERM_TID    0xFFFFF
+#define V_TERM_TID(x) ((x) << S_TERM_TID)
+#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
+
+/* ULP_TX opcodes */
+enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
+
+#define S_ULPTX_CMD	28
+#define M_ULPTX_CMD	0xF
+#define V_ULPTX_CMD(x)	((x) << S_ULPTX_CMD)
+
+#define S_ULPTX_NFLITS	0
+#define M_ULPTX_NFLITS	0xFF
+#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
+
+struct ulp_mem_io {
+	WR_HDR;
+	__be32 cmd_lock_addr;
+	__be32 len;
+};
+
+/* ulp_mem_io.cmd_lock_addr fields */
+#define S_ULP_MEMIO_ADDR	0
+#define M_ULP_MEMIO_ADDR	0x7FFFFFF
+#define V_ULP_MEMIO_ADDR(x)	((x) << S_ULP_MEMIO_ADDR)
+#define S_ULP_MEMIO_LOCK	27
+#define V_ULP_MEMIO_LOCK(x)	((x) << S_ULP_MEMIO_LOCK)
+#define F_ULP_MEMIO_LOCK	V_ULP_MEMIO_LOCK(1U)
+
+/* ulp_mem_io.len fields */
+#define S_ULP_MEMIO_DATA_LEN	28
+#define M_ULP_MEMIO_DATA_LEN	0xF
+#define V_ULP_MEMIO_DATA_LEN(x)	((x) << S_ULP_MEMIO_DATA_LEN)
+
+#endif				/* T3_CPL_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
new file mode 100644
index 0000000..a22768c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -0,0 +1,3777 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "firmware_exports.h"
+
+static void t3_port_intr_clear(struct adapter *adapter, int idx);
+
+/**
+ *	t3_wait_op_done_val - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *	@delay: delay in usecs between iterations
+ *	@valp: where to store the value of the register at completion time
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  If @valp is not NULL the value of the register
+ *	at the time it indicated completion is stored there.  Returns 0 if the
+ *	operation completes and -EAGAIN otherwise.
+ */
+
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			int polarity, int attempts, int delay, u32 *valp)
+{
+	while (1) {
+		u32 val = t3_read_reg(adapter, reg);
+
+		if (!!(val & mask) == polarity) {
+			if (valp)
+				*valp = val;
+			return 0;
+		}
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+/**
+ *	t3_write_regs - write a bunch of registers
+ *	@adapter: the adapter to program
+ *	@p: an array of register address/register value pairs
+ *	@n: the number of address/value pairs
+ *	@offset: register address offset
+ *
+ *	Takes an array of register address/register value pairs and writes each
+ *	value to the corresponding register.  Register addresses are adjusted
+ *	by the supplied offset.
+ */
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+		   int n, unsigned int offset)
+{
+	while (n--) {
+		t3_write_reg(adapter, p->reg_addr + offset, p->val);
+		p++;
+	}
+}
+
+/**
+ *	t3_set_reg_field - set a register field to a value
+ *	@adapter: the adapter to program
+ *	@addr: the register address
+ *	@mask: specifies the portion of the register to modify
+ *	@val: the new value for the register field
+ *
+ *	Sets a register field specified by the supplied mask to the
+ *	given value.
+ */
+void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+		      u32 val)
+{
+	u32 v = t3_read_reg(adapter, addr) & ~mask;
+
+	t3_write_reg(adapter, addr, v | val);
+	t3_read_reg(adapter, addr);	/* flush */
+}
+
+/**
+ *	t3_read_indirect - read indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect address
+ *	@data_reg: register holding the value of the indirect register
+ *	@vals: where the read register values are stored
+ *	@start_idx: index of first indirect register to read
+ *	@nregs: how many indirect registers to read
+ *
+ *	Reads registers that are accessed indirectly through an address/data
+ *	register pair.
+ */
+static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
+			     unsigned int data_reg, u32 *vals,
+			     unsigned int nregs, unsigned int start_idx)
+{
+	while (nregs--) {
+		t3_write_reg(adap, addr_reg, start_idx);
+		*vals++ = t3_read_reg(adap, data_reg);
+		start_idx++;
+	}
+}
+
+/**
+ *	t3_mc7_bd_read - read from MC7 through backdoor accesses
+ *	@mc7: identifies MC7 to read from
+ *	@start: index of first 64-bit word to read
+ *	@n: number of 64-bit words to read
+ *	@buf: where to store the read result
+ *
+ *	Read n 64-bit words from MC7 starting at word start, using backdoor
+ *	accesses.
+ */
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+		   u64 *buf)
+{
+	static const int shift[] = { 0, 0, 16, 24 };
+	static const int step[] = { 0, 32, 16, 8 };
+
+	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
+	struct adapter *adap = mc7->adapter;
+
+	if (start >= size64 || start + n > size64)
+		return -EINVAL;
+
+	start *= (8 << mc7->width);
+	while (n--) {
+		int i;
+		u64 val64 = 0;
+
+		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
+			int attempts = 10;
+			u32 val;
+
+			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
+			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
+			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
+			while ((val & F_BUSY) && attempts--)
+				val = t3_read_reg(adap,
+						  mc7->offset + A_MC7_BD_OP);
+			if (val & F_BUSY)
+				return -EIO;
+
+			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
+			if (mc7->width == 0) {
+				val64 = t3_read_reg(adap,
+						    mc7->offset +
+						    A_MC7_BD_DATA0);
+				val64 |= (u64) val << 32;
+			} else {
+				if (mc7->width > 1)
+					val >>= shift[mc7->width];
+				val64 |= (u64) val << (step[mc7->width] * i);
+			}
+			start += 8;
+		}
+		*buf++ = val64;
+	}
+	return 0;
+}
+
+/*
+ * Initialize MI1.
+ */
+static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
+{
+	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+	u32 val = F_PREEN | V_CLKDIV(clkdiv);
+
+	t3_write_reg(adap, A_MI1_CFG, val);
+}
+
+#define MDIO_ATTEMPTS 20
+
+/*
+ * MI1 read/write operations for clause 22 PHYs.
+ */
+static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
+		       u16 reg_addr)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int ret;
+	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+	mutex_lock(&adapter->mdio_lock);
+	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
+	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+	if (!ret)
+		ret = t3_read_reg(adapter, A_MI1_DATA);
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
+			u16 reg_addr, u16 val)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int ret;
+	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+	mutex_lock(&adapter->mdio_lock);
+	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_DATA, val);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ops = {
+	.read = t3_mi1_read,
+	.write = t3_mi1_write,
+	.mode_support = MDIO_SUPPORTS_C22
+};
+
+/*
+ * Performs the address cycle for clause 45 PHYs.
+ * Must be called with the MDIO_LOCK held.
+ */
+static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
+		       int reg_addr)
+{
+	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
+	t3_write_reg(adapter, A_MI1_ADDR, addr);
+	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+			       MDIO_ATTEMPTS, 10);
+}
+
+/*
+ * MI1 read/write operations for indirect-addressed PHYs.
+ */
+static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+			u16 reg_addr)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int ret;
+
+	mutex_lock(&adapter->mdio_lock);
+	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+	if (!ret) {
+		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
+		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+				      MDIO_ATTEMPTS, 10);
+		if (!ret)
+			ret = t3_read_reg(adapter, A_MI1_DATA);
+	}
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
+			 u16 reg_addr, u16 val)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	int ret;
+
+	mutex_lock(&adapter->mdio_lock);
+	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+	if (!ret) {
+		t3_write_reg(adapter, A_MI1_DATA, val);
+		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+				      MDIO_ATTEMPTS, 10);
+	}
+	mutex_unlock(&adapter->mdio_lock);
+	return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ext_ops = {
+	.read = mi1_ext_read,
+	.write = mi1_ext_write,
+	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
+};
+
+/**
+ *	t3_mdio_change_bits - modify the value of a PHY register
+ *	@phy: the PHY to operate on
+ *	@mmd: the device address
+ *	@reg: the register address
+ *	@clear: what part of the register value to mask off
+ *	@set: what part of the register value to set
+ *
+ *	Changes the value of a PHY register by applying a mask to its current
+ *	value and ORing the result with a new value.
+ */
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+			unsigned int set)
+{
+	int ret;
+	unsigned int val;
+
+	ret = t3_mdio_read(phy, mmd, reg, &val);
+	if (!ret) {
+		val &= ~clear;
+		ret = t3_mdio_write(phy, mmd, reg, val | set);
+	}
+	return ret;
+}
+
+/**
+ *	t3_phy_reset - reset a PHY block
+ *	@phy: the PHY to operate on
+ *	@mmd: the device address of the PHY block to reset
+ *	@wait: how long to wait for the reset to complete in 1ms increments
+ *
+ *	Resets a PHY block and optionally waits for the reset to complete.
+ *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
+ *	for 10G PHYs.
+ */
+int t3_phy_reset(struct cphy *phy, int mmd, int wait)
+{
+	int err;
+	unsigned int ctl;
+
+	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
+				  MDIO_CTRL1_RESET);
+	if (err || !wait)
+		return err;
+
+	do {
+		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
+		if (err)
+			return err;
+		ctl &= MDIO_CTRL1_RESET;
+		if (ctl)
+			msleep(1);
+	} while (ctl && --wait);
+
+	return ctl ? -1 : 0;
+}
+
+/**
+ *	t3_phy_advertise - set the PHY advertisement registers for autoneg
+ *	@phy: the PHY to operate on
+ *	@advert: bitmap of capabilities the PHY should advertise
+ *
+ *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
+ *	requested capabilities.
+ */
+int t3_phy_advertise(struct cphy *phy, unsigned int advert)
+{
+	int err;
+	unsigned int val = 0;
+
+	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
+	if (err)
+		return err;
+
+	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+	if (advert & ADVERTISED_1000baseT_Half)
+		val |= ADVERTISE_1000HALF;
+	if (advert & ADVERTISED_1000baseT_Full)
+		val |= ADVERTISE_1000FULL;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
+	if (err)
+		return err;
+
+	val = 1;
+	if (advert & ADVERTISED_10baseT_Half)
+		val |= ADVERTISE_10HALF;
+	if (advert & ADVERTISED_10baseT_Full)
+		val |= ADVERTISE_10FULL;
+	if (advert & ADVERTISED_100baseT_Half)
+		val |= ADVERTISE_100HALF;
+	if (advert & ADVERTISED_100baseT_Full)
+		val |= ADVERTISE_100FULL;
+	if (advert & ADVERTISED_Pause)
+		val |= ADVERTISE_PAUSE_CAP;
+	if (advert & ADVERTISED_Asym_Pause)
+		val |= ADVERTISE_PAUSE_ASYM;
+	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
+}
+
+/**
+ *	t3_phy_advertise_fiber - set fiber PHY advertisement register
+ *	@phy: the PHY to operate on
+ *	@advert: bitmap of capabilities the PHY should advertise
+ *
+ *	Sets a fiber PHY's advertisement register to advertise the
+ *	requested capabilities.
+ */
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
+{
+	unsigned int val = 0;
+
+	if (advert & ADVERTISED_1000baseT_Half)
+		val |= ADVERTISE_1000XHALF;
+	if (advert & ADVERTISED_1000baseT_Full)
+		val |= ADVERTISE_1000XFULL;
+	if (advert & ADVERTISED_Pause)
+		val |= ADVERTISE_1000XPAUSE;
+	if (advert & ADVERTISED_Asym_Pause)
+		val |= ADVERTISE_1000XPSE_ASYM;
+	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
+}
+
+/**
+ *	t3_set_phy_speed_duplex - force PHY speed and duplex
+ *	@phy: the PHY to operate on
+ *	@speed: requested PHY speed
+ *	@duplex: requested PHY duplex
+ *
+ *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
+ *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
+ */
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	int err;
+	unsigned int ctl;
+
+	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
+	if (err)
+		return err;
+
+	if (speed >= 0) {
+		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+		if (speed == SPEED_100)
+			ctl |= BMCR_SPEED100;
+		else if (speed == SPEED_1000)
+			ctl |= BMCR_SPEED1000;
+	}
+	if (duplex >= 0) {
+		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+		if (duplex == DUPLEX_FULL)
+			ctl |= BMCR_FULLDPLX;
+	}
+	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
+		ctl |= BMCR_ANENABLE;
+	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
+}
+
+int t3_phy_lasi_intr_enable(struct cphy *phy)
+{
+	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+			     MDIO_PMA_LASI_LSALARM);
+}
+
+int t3_phy_lasi_intr_disable(struct cphy *phy)
+{
+	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+}
+
+int t3_phy_lasi_intr_clear(struct cphy *phy)
+{
+	u32 val;
+
+	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+}
+
+int t3_phy_lasi_intr_handler(struct cphy *phy)
+{
+	unsigned int status;
+	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
+			       &status);
+
+	if (err)
+		return err;
+	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
+}
+
+static const struct adapter_info t3_adap_info[] = {
+	{1, 1, 0,
+	 F_GPIO2_OEN | F_GPIO4_OEN |
+	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+	 &mi1_mdio_ops, "Chelsio PE9000"},
+	{1, 1, 0,
+	 F_GPIO2_OEN | F_GPIO4_OEN |
+	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+	 &mi1_mdio_ops, "Chelsio T302"},
+	{1, 0, 0,
+	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+	 &mi1_mdio_ext_ops, "Chelsio T310"},
+	{1, 1, 0,
+	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+	 &mi1_mdio_ext_ops, "Chelsio T320"},
+	{},
+	{},
+	{1, 0, 0,
+	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+	 &mi1_mdio_ext_ops, "Chelsio T310" },
+	{1, 0, 0,
+	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
+	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
+};
+
+/*
+ * Return the adapter_info structure with a given index.  Out-of-range indices
+ * return NULL.
+ */
+const struct adapter_info *t3_get_adapter_info(unsigned int id)
+{
+	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
+}
+
+struct port_type_info {
+	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *ops);
+};
+
+static const struct port_type_info port_types[] = {
+	{ NULL },
+	{ t3_ael1002_phy_prep },
+	{ t3_vsc8211_phy_prep },
+	{ NULL},
+	{ t3_xaui_direct_phy_prep },
+	{ t3_ael2005_phy_prep },
+	{ t3_qt2045_phy_prep },
+	{ t3_ael1006_phy_prep },
+	{ NULL },
+	{ t3_aq100x_phy_prep },
+	{ t3_ael2020_phy_prep },
+};
+
+#define VPD_ENTRY(name, len) \
+	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure.  Includes only the ID and
+ * VPD-R sections.
+ */
+struct t3_vpd {
+	u8 id_tag;
+	u8 id_len[2];
+	u8 id_data[16];
+	u8 vpdr_tag;
+	u8 vpdr_len[2];
+	VPD_ENTRY(pn, 16);	/* part number */
+	VPD_ENTRY(ec, 16);	/* EC level */
+	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
+	VPD_ENTRY(na, 12);	/* MAC address base */
+	VPD_ENTRY(cclk, 6);	/* core clock */
+	VPD_ENTRY(mclk, 6);	/* mem clock */
+	VPD_ENTRY(uclk, 6);	/* uP clk */
+	VPD_ENTRY(mdc, 6);	/* MDIO clk */
+	VPD_ENTRY(mt, 2);	/* mem timing */
+	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
+	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
+	VPD_ENTRY(port0, 2);	/* PHY0 complex */
+	VPD_ENTRY(port1, 2);	/* PHY1 complex */
+	VPD_ENTRY(port2, 2);	/* PHY2 complex */
+	VPD_ENTRY(port3, 2);	/* PHY3 complex */
+	VPD_ENTRY(rv, 1);	/* csum */
+	u32 pad;		/* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_MAX_POLL   40
+#define EEPROM_STAT_ADDR  0x4000
+#define VPD_BASE          0xc00
+
+/**
+ *	t3_seeprom_read - read a VPD EEPROM location
+ *	@adapter: adapter to read
+ *	@addr: EEPROM address
+ *	@data: where to store the read data
+ *
+ *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
+ *	VPD ROM capability.  A zero is written to the flag bit when the
+ *	address is written to the control register.  The hardware device will
+ *	set the flag to 1 when 4 bytes have been read into the data register.
+ */
+int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
+{
+	u16 val;
+	int attempts = EEPROM_MAX_POLL;
+	u32 v;
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
+	do {
+		udelay(10);
+		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+	if (!(val & PCI_VPD_ADDR_F)) {
+		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
+		return -EIO;
+	}
+	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
+	*data = cpu_to_le32(v);
+	return 0;
+}
+
+/**
+ *	t3_seeprom_write - write a VPD EEPROM location
+ *	@adapter: adapter to write
+ *	@addr: EEPROM address
+ *	@data: value to write
+ *
+ *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
+ *	VPD ROM capability.
+ */
+int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
+{
+	u16 val;
+	int attempts = EEPROM_MAX_POLL;
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
+			       le32_to_cpu(data));
+	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
+			      addr | PCI_VPD_ADDR_F);
+	do {
+		msleep(1);
+		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+	} while ((val & PCI_VPD_ADDR_F) && --attempts);
+
+	if (val & PCI_VPD_ADDR_F) {
+		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ *	t3_seeprom_wp - enable/disable EEPROM write protection
+ *	@adapter: the adapter
+ *	@enable: 1 to enable write protection, 0 to disable it
+ *
+ *	Enables or disables write protection on the serial EEPROM.
+ */
+int t3_seeprom_wp(struct adapter *adapter, int enable)
+{
+	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/**
+ *	get_vpd_params - read VPD parameters from VPD EEPROM
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	int i, addr, ret;
+	struct t3_vpd vpd;
+
+	/*
+	 * Card information is normally at VPD_BASE but some early cards had
+	 * it at 0.
+	 */
+	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
+	if (ret)
+		return ret;
+	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+
+	for (i = 0; i < sizeof(vpd); i += 4) {
+		ret = t3_seeprom_read(adapter, addr + i,
+				      (__le32 *)((u8 *)&vpd + i));
+		if (ret)
+			return ret;
+	}
+
+	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
+	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
+	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
+	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
+	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
+	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
+
+	/* Old eeproms didn't have port information */
+	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
+		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
+		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
+	} else {
+		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
+		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
+		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
+		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
+	}
+
+	ret = hex2bin(p->eth_base, vpd.na_data, 6);
+	if (ret < 0)
+		return -EINVAL;
+	return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
+	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
+	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
+
+	/* flash command opcodes */
+	SF_PROG_PAGE = 2,	/* program page */
+	SF_WR_DISABLE = 4,	/* disable writes */
+	SF_RD_STATUS = 5,	/* read status register */
+	SF_WR_ENABLE = 6,	/* enable writes */
+	SF_RD_DATA_FAST = 0xb,	/* read flash */
+	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
+
+	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
+	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
+	FW_MIN_SIZE = 8            /* at least version and csum */
+};
+
+/**
+ *	sf1_read - read data from the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to read
+ *	@cont: whether another operation will be chained
+ *	@valp: where to store the read data
+ *
+ *	Reads up to 4 bytes of data from the serial flash.  The location of
+ *	the read needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		    u32 *valp)
+{
+	int ret;
+
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+		return -EBUSY;
+	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+	if (!ret)
+		*valp = t3_read_reg(adapter, A_SF_DATA);
+	return ret;
+}
+
+/**
+ *	sf1_write - write data to the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to write
+ *	@cont: whether another operation will be chained
+ *	@val: value to write
+ *
+ *	Writes up to 4 bytes of data to the serial flash.  The location of
+ *	the write needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		     u32 val)
+{
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+		return -EBUSY;
+	t3_write_reg(adapter, A_SF_DATA, val);
+	t3_write_reg(adapter, A_SF_OP,
+		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+}
+
+/**
+ *	flash_wait_op - wait for a flash operation to complete
+ *	@adapter: the adapter
+ *	@attempts: max number of polls of the status register
+ *	@delay: delay between polls in ms
+ *
+ *	Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+	int ret;
+	u32 status;
+
+	while (1) {
+		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
+		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
+			return ret;
+		if (!(status & 1))
+			return 0;
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			msleep(delay);
+	}
+}
+
+/**
+ *	t3_read_flash - read words from serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address for the read
+ *	@nwords: how many 32-bit words to read
+ *	@data: where to store the read data
+ *	@byte_oriented: whether to store data as bytes or as words
+ *
+ *	Read the specified number of 32-bit words from the serial flash.
+ *	If @byte_oriented is set the read data is stored as a byte array
+ *	(i.e., big-endian), otherwise as 32-bit words in the platform's
+ *	natural endianness.
+ */
+static int t3_read_flash(struct adapter *adapter, unsigned int addr,
+			 unsigned int nwords, u32 *data, int byte_oriented)
+{
+	int ret;
+
+	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+		return -EINVAL;
+
+	addr = swab32(addr) | SF_RD_DATA_FAST;
+
+	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
+	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
+		return ret;
+
+	for (; nwords; nwords--, data++) {
+		ret = sf1_read(adapter, 4, nwords > 1, data);
+		if (ret)
+			return ret;
+		if (byte_oriented)
+			*data = htonl(*data);
+	}
+	return 0;
+}
+
+/**
+ *	t3_write_flash - write up to a page of data to the serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address to write
+ *	@n: length of data to write
+ *	@data: the data to write
+ *
+ *	Writes up to a page of data (256 bytes) to the serial flash starting
+ *	at the given address.
+ */
+static int t3_write_flash(struct adapter *adapter, unsigned int addr,
+			  unsigned int n, const u8 *data)
+{
+	int ret;
+	u32 buf[64];
+	unsigned int i, c, left, val, offset = addr & 0xff;
+
+	if (addr + n > SF_SIZE || offset + n > 256)
+		return -EINVAL;
+
+	val = swab32(addr) | SF_PROG_PAGE;
+
+	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
+		return ret;
+
+	for (left = n; left; left -= c) {
+		c = min(left, 4U);
+		for (val = 0, i = 0; i < c; ++i)
+			val = (val << 8) + *data++;
+
+		ret = sf1_write(adapter, c, c != left, val);
+		if (ret)
+			return ret;
+	}
+	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
+		return ret;
+
+	/* Read the page to verify the write succeeded */
+	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+	if (ret)
+		return ret;
+
+	if (memcmp(data - n, (u8 *) buf + offset, n))
+		return -EIO;
+	return 0;
+}
+
+/**
+ *	t3_get_tp_version - read the tp sram version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the protocol sram version from sram.
+ */
+int t3_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+	int ret;
+
+	/* Get version loaded in SRAM */
+	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
+	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
+			      1, 1, 5, 1);
+	if (ret)
+		return ret;
+
+	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
+
+	return 0;
+}
+
+/**
+ *	t3_check_tpsram_version - read the tp sram version
+ *	@adapter: the adapter
+ *
+ *	Reads the protocol sram version from flash.
+ */
+int t3_check_tpsram_version(struct adapter *adapter)
+{
+	int ret;
+	u32 vers;
+	unsigned int major, minor;
+
+	if (adapter->params.rev == T3_REV_A)
+		return 0;
+
+
+	ret = t3_get_tp_version(adapter, &vers);
+	if (ret)
+		return ret;
+
+	major = G_TP_VERSION_MAJOR(vers);
+	minor = G_TP_VERSION_MINOR(vers);
+
+	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
+		return 0;
+	else {
+		CH_ERR(adapter, "found wrong TP version (%u.%u), "
+		       "driver compiled for version %d.%d\n", major, minor,
+		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
+	}
+	return -EINVAL;
+}
+
+/**
+ *	t3_check_tpsram - check if provided protocol SRAM
+ *			  is compatible with this driver
+ *	@adapter: the adapter
+ *	@tp_sram: the firmware image to write
+ *	@size: image size
+ *
+ *	Checks if an adapter's tp sram is compatible with the driver.
+ *	Returns 0 if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
+		    unsigned int size)
+{
+	u32 csum;
+	unsigned int i;
+	const __be32 *p = (const __be32 *)tp_sram;
+
+	/* Verify checksum */
+	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+		csum += ntohl(p[i]);
+	if (csum != 0xffffffff) {
+		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
+		       csum);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+enum fw_version_type {
+	FW_VERSION_N3,
+	FW_VERSION_T3
+};
+
+/**
+ *	t3_get_fw_version - read the firmware version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW version from flash.
+ */
+int t3_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
+}
+
+/**
+ *	t3_check_fw_version - check if the FW is compatible with this driver
+ *	@adapter: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_fw_version(struct adapter *adapter)
+{
+	int ret;
+	u32 vers;
+	unsigned int type, major, minor;
+
+	ret = t3_get_fw_version(adapter, &vers);
+	if (ret)
+		return ret;
+
+	type = G_FW_VERSION_TYPE(vers);
+	major = G_FW_VERSION_MAJOR(vers);
+	minor = G_FW_VERSION_MINOR(vers);
+
+	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
+	    minor == FW_VERSION_MINOR)
+		return 0;
+	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
+		CH_WARN(adapter, "found old FW minor version(%u.%u), "
+		        "driver compiled for version %u.%u\n", major, minor,
+			FW_VERSION_MAJOR, FW_VERSION_MINOR);
+	else {
+		CH_WARN(adapter, "found newer FW version(%u.%u), "
+		        "driver compiled for version %u.%u\n", major, minor,
+			FW_VERSION_MAJOR, FW_VERSION_MINOR);
+			return 0;
+	}
+	return -EINVAL;
+}
+
+/**
+ *	t3_flash_erase_sectors - erase a range of flash sectors
+ *	@adapter: the adapter
+ *	@start: the first sector to erase
+ *	@end: the last sector to erase
+ *
+ *	Erases the sectors in the given range.
+ */
+static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+	while (start <= end) {
+		int ret;
+
+		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+		    (ret = sf1_write(adapter, 4, 0,
+				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
+		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
+			return ret;
+		start++;
+	}
+	return 0;
+}
+
+/**
+ *	t3_load_fw - download firmware
+ *	@adapter: the adapter
+ *	@fw_data: the firmware image to write
+ *	@size: image size
+ *
+ *	Write the supplied firmware image to the card's serial flash.
+ *	The FW image has the following sections: @size - 8 bytes of code and
+ *	data, followed by 4 bytes of FW version, followed by the 32-bit
+ *	1's complement checksum of the whole image.
+ */
+int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
+{
+	u32 csum;
+	unsigned int i;
+	const __be32 *p = (const __be32 *)fw_data;
+	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
+
+	if ((size & 3) || size < FW_MIN_SIZE)
+		return -EINVAL;
+	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
+		return -EFBIG;
+
+	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+		csum += ntohl(p[i]);
+	if (csum != 0xffffffff) {
+		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+		       csum);
+		return -EINVAL;
+	}
+
+	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
+	if (ret)
+		goto out;
+
+	size -= 8;		/* trim off version and checksum */
+	for (addr = FW_FLASH_BOOT_ADDR; size;) {
+		unsigned int chunk_size = min(size, 256U);
+
+		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
+		if (ret)
+			goto out;
+
+		addr += chunk_size;
+		fw_data += chunk_size;
+		size -= chunk_size;
+	}
+
+	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
+out:
+	if (ret)
+		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
+	return ret;
+}
+
+#define CIM_CTL_BASE 0x2000
+
+/**
+ *      t3_cim_ctl_blk_read - read a block from CIM control region
+ *
+ *      @adap: the adapter
+ *      @addr: the start address within the CIM control region
+ *      @n: number of words to read
+ *      @valp: where to store the result
+ *
+ *      Reads a block of 4-byte words from the CIM control region.
+ */
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+			unsigned int n, unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+		return -EBUSY;
+
+	for ( ; !ret && n--; addr += 4) {
+		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
+		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+				      0, 5, 2);
+		if (!ret)
+			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
+	}
+	return ret;
+}
+
+static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
+			       u32 *rx_hash_high, u32 *rx_hash_low)
+{
+	/* stop Rx unicast traffic */
+	t3_mac_disable_exact_filters(mac);
+
+	/* stop broadcast, multicast, promiscuous mode traffic */
+	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
+	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
+			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
+			 F_DISBCAST);
+
+	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
+	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
+
+	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
+	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
+
+	/* Leave time to drain max RX fifo */
+	msleep(1);
+}
+
+static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
+			       u32 rx_hash_high, u32 rx_hash_low)
+{
+	t3_mac_enable_exact_filters(mac);
+	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
+			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
+			 rx_cfg);
+	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
+	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
+}
+
+/**
+ *	t3_link_changed - handle interface link changes
+ *	@adapter: the adapter
+ *	@port_id: the port index that changed link state
+ *
+ *	Called when a port's link settings change to propagate the new values
+ *	to the associated PHY and MAC.  After performing the common tasks it
+ *	invokes an OS-specific handler.
+ */
+void t3_link_changed(struct adapter *adapter, int port_id)
+{
+	int link_ok, speed, duplex, fc;
+	struct port_info *pi = adap2pinfo(adapter, port_id);
+	struct cphy *phy = &pi->phy;
+	struct cmac *mac = &pi->mac;
+	struct link_config *lc = &pi->link_config;
+
+	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+	if (!lc->link_ok && link_ok) {
+		u32 rx_cfg, rx_hash_high, rx_hash_low;
+		u32 status;
+
+		t3_xgm_intr_enable(adapter, port_id);
+		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
+		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+		t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
+		if (status & F_LINKFAULTCHANGE) {
+			mac->stats.link_faults++;
+			pi->link_fault = 1;
+		}
+		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
+	}
+
+	if (lc->requested_fc & PAUSE_AUTONEG)
+		fc &= lc->requested_fc;
+	else
+		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (link_ok == lc->link_ok && speed == lc->speed &&
+	    duplex == lc->duplex && fc == lc->fc)
+		return;                            /* nothing changed */
+
+	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
+	    uses_xaui(adapter)) {
+		if (link_ok)
+			t3b_pcs_reset(mac);
+		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
+	}
+	lc->link_ok = link_ok;
+	lc->speed = speed < 0 ? SPEED_INVALID : speed;
+	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+
+	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+		/* Set MAC speed, duplex, and flow control to match PHY. */
+		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
+		lc->fc = fc;
+	}
+
+	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+			   speed, duplex, fc);
+}
+
+void t3_link_fault(struct adapter *adapter, int port_id)
+{
+	struct port_info *pi = adap2pinfo(adapter, port_id);
+	struct cmac *mac = &pi->mac;
+	struct cphy *phy = &pi->phy;
+	struct link_config *lc = &pi->link_config;
+	int link_ok, speed, duplex, fc, link_fault;
+	u32 rx_cfg, rx_hash_high, rx_hash_low;
+
+	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
+
+	if (adapter->params.rev > 0 && uses_xaui(adapter))
+		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
+
+	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+	t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
+
+	link_fault = t3_read_reg(adapter,
+				 A_XGM_INT_STATUS + mac->offset);
+	link_fault &= F_LINKFAULTCHANGE;
+
+	link_ok = lc->link_ok;
+	speed = lc->speed;
+	duplex = lc->duplex;
+	fc = lc->fc;
+
+	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+	if (link_fault) {
+		lc->link_ok = 0;
+		lc->speed = SPEED_INVALID;
+		lc->duplex = DUPLEX_INVALID;
+
+		t3_os_link_fault(adapter, port_id, 0);
+
+		/* Account link faults only when the phy reports a link up */
+		if (link_ok)
+			mac->stats.link_faults++;
+	} else {
+		if (link_ok)
+			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+				     F_TXACTENABLE | F_RXEN);
+
+		pi->link_fault = 0;
+		lc->link_ok = (unsigned char)link_ok;
+		lc->speed = speed < 0 ? SPEED_INVALID : speed;
+		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+		t3_os_link_fault(adapter, port_id, link_ok);
+	}
+}
+
+/**
+ *	t3_link_start - apply link configuration to MAC/PHY
+ *	@phy: the PHY to setup
+ *	@mac: the MAC to setup
+ *	@lc: the requested link configuration
+ *
+ *	Set up a port's MAC and PHY according to a desired link configuration.
+ *	- If the PHY can auto-negotiate first decide what to advertise, then
+ *	  enable/disable auto-negotiation as desired, and reset.
+ *	- If the PHY does not auto-negotiate just reset it.
+ *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ *	  otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	lc->link_ok = 0;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
+		if (fc) {
+			lc->advertising |= ADVERTISED_Asym_Pause;
+			if (fc & PAUSE_RX)
+				lc->advertising |= ADVERTISED_Pause;
+		}
+		phy->ops->advertise(phy, lc->advertising);
+
+		if (lc->autoneg == AUTONEG_DISABLE) {
+			lc->speed = lc->requested_speed;
+			lc->duplex = lc->requested_duplex;
+			lc->fc = (unsigned char)fc;
+			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
+						   fc);
+			/* Also disables autoneg */
+			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+		} else
+			phy->ops->autoneg_enable(phy);
+	} else {
+		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
+		lc->fc = (unsigned char)fc;
+		phy->ops->reset(phy, 0);
+	}
+	return 0;
+}
+
+/**
+ *	t3_set_vlan_accel - control HW VLAN extraction
+ *	@adapter: the adapter
+ *	@ports: bitmap of adapter ports to operate on
+ *	@on: enable (1) or disable (0) HW VLAN extraction
+ *
+ *	Enables or disables HW extraction of VLAN tags for the given port.
+ */
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
+{
+	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
+			 ports << S_VLANEXTRACTIONENABLE,
+			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
+}
+
+struct intr_info {
+	unsigned int mask;	/* bits to check in interrupt status */
+	const char *msg;	/* message to print or NULL */
+	short stat_idx;		/* stat counter to increment or -1 */
+	unsigned short fatal;	/* whether the condition reported is fatal */
+};
+
+/**
+ *	t3_handle_intr_status - table driven interrupt handler
+ *	@adapter: the adapter that generated the interrupt
+ *	@reg: the interrupt status register to process
+ *	@mask: a mask to apply to the interrupt status
+ *	@acts: table of interrupt actions
+ *	@stats: statistics counters tracking interrupt occurrences
+ *
+ *	A table driven interrupt handler that applies a set of masks to an
+ *	interrupt status word and performs the corresponding actions if the
+ *	interrupts described by the mask have occurred.  The actions include
+ *	optionally printing a warning or alert message, and optionally
+ *	incrementing a stat counter.  The table is terminated by an entry
+ *	specifying mask 0.  Returns the number of fatal interrupt conditions.
+ */
+static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
+				 unsigned int mask,
+				 const struct intr_info *acts,
+				 unsigned long *stats)
+{
+	int fatal = 0;
+	unsigned int status = t3_read_reg(adapter, reg) & mask;
+
+	for (; acts->mask; ++acts) {
+		if (!(status & acts->mask))
+			continue;
+		if (acts->fatal) {
+			fatal++;
+			CH_ALERT(adapter, "%s (0x%x)\n",
+				 acts->msg, status & acts->mask);
+			status &= ~acts->mask;
+		} else if (acts->msg)
+			CH_WARN(adapter, "%s (0x%x)\n",
+				acts->msg, status & acts->mask);
+		if (acts->stat_idx >= 0)
+			stats[acts->stat_idx]++;
+	}
+	if (status)		/* clear processed interrupts */
+		t3_write_reg(adapter, reg, status);
+	return fatal;
+}
+
+#define SGE_INTR_MASK (F_RSPQDISABLED | \
+		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
+		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+		       F_LOPIODRBDROPERR)
+#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
+		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
+		       F_NFASRCHFAIL)
+#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
+#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
+		       F_TXFIFO_UNDERRUN)
+#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
+			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
+			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
+			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
+			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
+			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
+#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
+			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
+			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
+			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
+			F_TXPARERR | V_BISTERR(M_BISTERR))
+#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
+			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
+			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
+#define ULPTX_INTR_MASK 0xfc
+#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
+			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
+			 F_ZERO_SWITCH_ERROR)
+#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
+		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
+		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
+	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
+		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
+		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
+		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
+		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
+#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
+			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
+			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
+#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
+			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
+			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
+#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
+		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
+		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
+		       V_MCAPARERRENB(M_MCAPARERRENB))
+#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
+#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
+		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
+		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
+		      F_MPS0 | F_CPL_SWITCH)
+/*
+ * Interrupt handler for the PCIX1 module.
+ */
+static void pci_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pcix1_intr_info[] = {
+		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
+		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
+		{F_RCVTARABT, "PCI received target abort", -1, 1},
+		{F_RCVMSTABT, "PCI received master abort", -1, 1},
+		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
+		{F_DETPARERR, "PCI detected parity error", -1, 1},
+		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
+		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
+		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
+		 1},
+		{F_DETCORECCERR, "PCI correctable ECC error",
+		 STAT_PCI_CORR_ECC, 0},
+		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
+		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
+		 1},
+		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
+		 1},
+		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
+		 1},
+		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
+		 "error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
+				  pcix1_intr_info, adapter->irq_stats))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pcie_intr_info[] = {
+		{F_PEXERR, "PCI PEX error", -1, 1},
+		{F_UNXSPLCPLERRR,
+		 "PCI unexpected split completion DMA read error", -1, 1},
+		{F_UNXSPLCPLERRC,
+		 "PCI unexpected split completion DMA command error", -1, 1},
+		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
+		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
+		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
+		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
+		 "PCI MSI-X table/PBA parity error", -1, 1},
+		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
+		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
+		{F_RXPARERR, "PCI Rx parity error", -1, 1},
+		{F_TXPARERR, "PCI Tx parity error", -1, 1},
+		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
+		{0}
+	};
+
+	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
+		CH_ALERT(adapter, "PEX error code 0x%x\n",
+			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
+	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
+				  pcie_intr_info, adapter->irq_stats))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info tp_intr_info[] = {
+		{0xffffff, "TP parity error", -1, 1},
+		{0x1000000, "TP out of Rx pages", -1, 1},
+		{0x2000000, "TP out of Tx pages", -1, 1},
+		{0}
+	};
+
+	static const struct intr_info tp_intr_info_t3c[] = {
+		{0x1fffffff, "TP parity error", -1, 1},
+		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
+		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
+				  adapter->params.rev < T3_REV_C ?
+				  tp_intr_info : tp_intr_info_t3c, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info cim_intr_info[] = {
+		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
+		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
+		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
+		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
+		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
+		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
+		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
+		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
+		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
+		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
+		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
+		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
+		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
+		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
+		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
+		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
+		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
+		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
+		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
+		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
+		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
+		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
+		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
+		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
+				  cim_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info ulprx_intr_info[] = {
+		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
+		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
+		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
+		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
+		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
+		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
+		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
+		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
+				  ulprx_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info ulptx_intr_info[] = {
+		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
+		 STAT_ULP_CH0_PBL_OOB, 0},
+		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
+		 STAT_ULP_CH1_PBL_OOB, 0},
+		{0xfc, "ULP TX parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
+				  ulptx_intr_info, adapter->irq_stats))
+		t3_fatal_err(adapter);
+}
+
+#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
+	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
+	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
+	F_ICSPI1_TX_FRAMING_ERROR)
+#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
+	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
+	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
+	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pmtx_intr_info[] = {
+		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
+		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
+		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
+		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
+		 "PMTX ispi parity error", -1, 1},
+		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
+		 "PMTX ospi parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
+				  pmtx_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
+	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
+	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
+	F_IESPI1_TX_FRAMING_ERROR)
+#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
+	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
+	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
+	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pmrx_intr_info[] = {
+		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
+		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
+		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
+		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
+		 "PMRX ispi parity error", -1, 1},
+		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
+		 "PMRX ospi parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
+				  pmrx_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info cplsw_intr_info[] = {
+		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
+		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
+		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
+		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
+		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
+		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
+				  cplsw_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info mps_intr_info[] = {
+		{0x1ff, "MPS parity error", -1, 1},
+		{0}
+	};
+
+	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
+				  mps_intr_info, NULL))
+		t3_fatal_err(adapter);
+}
+
+#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
+
+/*
+ * MC7 interrupt handler.
+ */
+static void mc7_intr_handler(struct mc7 *mc7)
+{
+	struct adapter *adapter = mc7->adapter;
+	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
+
+	if (cause & F_CE) {
+		mc7->stats.corr_err++;
+		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
+			"data 0x%x 0x%x 0x%x\n", mc7->name,
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
+			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
+	}
+
+	if (cause & F_UE) {
+		mc7->stats.uncorr_err++;
+		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
+			 "data 0x%x 0x%x 0x%x\n", mc7->name,
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
+			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
+	}
+
+	if (G_PE(cause)) {
+		mc7->stats.parity_err++;
+		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
+			 mc7->name, G_PE(cause));
+	}
+
+	if (cause & F_AE) {
+		u32 addr = 0;
+
+		if (adapter->params.rev > 0)
+			addr = t3_read_reg(adapter,
+					   mc7->offset + A_MC7_ERR_ADDR);
+		mc7->stats.addr_err++;
+		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
+			 mc7->name, addr);
+	}
+
+	if (cause & MC7_INTR_FATAL)
+		t3_fatal_err(adapter);
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
+}
+
+#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
+/*
+ * XGMAC interrupt handler.
+ */
+static int mac_intr_handler(struct adapter *adap, unsigned int idx)
+{
+	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
+	/*
+	 * We mask out interrupt causes for which we're not taking interrupts.
+	 * This allows us to use polling logic to monitor some of the other
+	 * conditions when taking interrupts would impose too much load on the
+	 * system.
+	 */
+	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
+		    ~F_RXFIFO_OVERFLOW;
+
+	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
+		mac->stats.tx_fifo_parity_err++;
+		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
+	}
+	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
+		mac->stats.rx_fifo_parity_err++;
+		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
+	}
+	if (cause & F_TXFIFO_UNDERRUN)
+		mac->stats.tx_fifo_urun++;
+	if (cause & F_RXFIFO_OVERFLOW)
+		mac->stats.rx_fifo_ovfl++;
+	if (cause & V_SERDES_LOS(M_SERDES_LOS))
+		mac->stats.serdes_signal_loss++;
+	if (cause & F_XAUIPCSCTCERR)
+		mac->stats.xaui_pcs_ctc_err++;
+	if (cause & F_XAUIPCSALIGNCHANGE)
+		mac->stats.xaui_pcs_align_change++;
+	if (cause & F_XGM_INT) {
+		t3_set_reg_field(adap,
+				 A_XGM_INT_ENABLE + mac->offset,
+				 F_XGM_INT, 0);
+		mac->stats.link_faults++;
+
+		t3_os_link_fault_handler(adap, idx);
+	}
+
+	if (cause & XGM_INTR_FATAL)
+		t3_fatal_err(adap);
+
+	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
+	return cause != 0;
+}
+
+/*
+ * Interrupt handler for PHY events.
+ */
+int t3_phy_intr_handler(struct adapter *adapter)
+{
+	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
+
+	for_each_port(adapter, i) {
+		struct port_info *p = adap2pinfo(adapter, i);
+
+		if (!(p->phy.caps & SUPPORTED_IRQ))
+			continue;
+
+		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
+			int phy_cause = p->phy.ops->intr_handler(&p->phy);
+
+			if (phy_cause & cphy_cause_link_change)
+				t3_link_changed(adapter, i);
+			if (phy_cause & cphy_cause_fifo_error)
+				p->phy.fifo_errors++;
+			if (phy_cause & cphy_cause_module_change)
+				t3_os_phymod_changed(adapter, i);
+		}
+	}
+
+	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
+	return 0;
+}
+
+/*
+ * T3 slow path (non-data) interrupt handler.
+ */
+int t3_slow_intr_handler(struct adapter *adapter)
+{
+	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
+
+	cause &= adapter->slow_intr_mask;
+	if (!cause)
+		return 0;
+	if (cause & F_PCIM0) {
+		if (is_pcie(adapter))
+			pcie_intr_handler(adapter);
+		else
+			pci_intr_handler(adapter);
+	}
+	if (cause & F_SGE3)
+		t3_sge_err_intr_handler(adapter);
+	if (cause & F_MC7_PMRX)
+		mc7_intr_handler(&adapter->pmrx);
+	if (cause & F_MC7_PMTX)
+		mc7_intr_handler(&adapter->pmtx);
+	if (cause & F_MC7_CM)
+		mc7_intr_handler(&adapter->cm);
+	if (cause & F_CIM)
+		cim_intr_handler(adapter);
+	if (cause & F_TP1)
+		tp_intr_handler(adapter);
+	if (cause & F_ULP2_RX)
+		ulprx_intr_handler(adapter);
+	if (cause & F_ULP2_TX)
+		ulptx_intr_handler(adapter);
+	if (cause & F_PM1_RX)
+		pmrx_intr_handler(adapter);
+	if (cause & F_PM1_TX)
+		pmtx_intr_handler(adapter);
+	if (cause & F_CPL_SWITCH)
+		cplsw_intr_handler(adapter);
+	if (cause & F_MPS0)
+		mps_intr_handler(adapter);
+	if (cause & F_MC5A)
+		t3_mc5_intr_handler(&adapter->mc5);
+	if (cause & F_XGMAC0_0)
+		mac_intr_handler(adapter, 0);
+	if (cause & F_XGMAC0_1)
+		mac_intr_handler(adapter, 1);
+	if (cause & F_T3DBG)
+		t3_os_ext_intr_handler(adapter);
+
+	/* Clear the interrupts just processed. */
+	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
+	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
+	return 1;
+}
+
+static unsigned int calc_gpio_intr(struct adapter *adap)
+{
+	unsigned int i, gpi_intr = 0;
+
+	for_each_port(adap, i)
+		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
+		    adapter_info(adap)->gpio_intr[i])
+			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
+	return gpi_intr;
+}
+
+/**
+ *	t3_intr_enable - enable interrupts
+ *	@adapter: the adapter whose interrupts should be enabled
+ *
+ *	Enable interrupts by setting the interrupt enable registers of the
+ *	various HW modules and then enabling the top-level interrupt
+ *	concentrator.
+ */
+void t3_intr_enable(struct adapter *adapter)
+{
+	static const struct addr_val_pair intr_en_avp[] = {
+		{A_SG_INT_ENABLE, SGE_INTR_MASK},
+		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
+		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+		 MC7_INTR_MASK},
+		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+		 MC7_INTR_MASK},
+		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
+		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
+		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
+		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
+		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
+		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
+	};
+
+	adapter->slow_intr_mask = PL_INTR_MASK;
+
+	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
+	t3_write_reg(adapter, A_TP_INT_ENABLE,
+		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
+
+	if (adapter->params.rev > 0) {
+		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
+			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
+		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
+			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
+			     F_PBL_BOUND_ERR_CH1);
+	} else {
+		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
+		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
+	}
+
+	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
+
+	if (is_pcie(adapter))
+		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
+	else
+		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
+	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
+	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
+}
+
+/**
+ *	t3_intr_disable - disable a card's interrupts
+ *	@adapter: the adapter whose interrupts should be disabled
+ *
+ *	Disable interrupts.  We only disable the top-level interrupt
+ *	concentrator and the SGE data interrupts.
+ */
+void t3_intr_disable(struct adapter *adapter)
+{
+	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
+	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
+	adapter->slow_intr_mask = 0;
+}
+
+/**
+ *	t3_intr_clear - clear all interrupts
+ *	@adapter: the adapter whose interrupts should be cleared
+ *
+ *	Clears all interrupts.
+ */
+void t3_intr_clear(struct adapter *adapter)
+{
+	static const unsigned int cause_reg_addr[] = {
+		A_SG_INT_CAUSE,
+		A_SG_RSPQ_FL_STATUS,
+		A_PCIX_INT_CAUSE,
+		A_MC7_INT_CAUSE,
+		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+		A_CIM_HOST_INT_CAUSE,
+		A_TP_INT_CAUSE,
+		A_MC5_DB_INT_CAUSE,
+		A_ULPRX_INT_CAUSE,
+		A_ULPTX_INT_CAUSE,
+		A_CPL_INTR_CAUSE,
+		A_PM1_TX_INT_CAUSE,
+		A_PM1_RX_INT_CAUSE,
+		A_MPS_INT_CAUSE,
+		A_T3DBG_INT_CAUSE,
+	};
+	unsigned int i;
+
+	/* Clear PHY and MAC interrupts for each port. */
+	for_each_port(adapter, i)
+	    t3_port_intr_clear(adapter, i);
+
+	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
+		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
+
+	if (is_pcie(adapter))
+		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
+	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
+	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
+}
+
+void t3_xgm_intr_enable(struct adapter *adapter, int idx)
+{
+	struct port_info *pi = adap2pinfo(adapter, idx);
+
+	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
+		     XGM_EXTRA_INTR_MASK);
+}
+
+void t3_xgm_intr_disable(struct adapter *adapter, int idx)
+{
+	struct port_info *pi = adap2pinfo(adapter, idx);
+
+	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
+		     0x7ff);
+}
+
+/**
+ *	t3_port_intr_enable - enable port-specific interrupts
+ *	@adapter: associated adapter
+ *	@idx: index of port whose interrupts should be enabled
+ *
+ *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
+ *	adapter port.
+ */
+void t3_port_intr_enable(struct adapter *adapter, int idx)
+{
+	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
+	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+	phy->ops->intr_enable(phy);
+}
+
+/**
+ *	t3_port_intr_disable - disable port-specific interrupts
+ *	@adapter: associated adapter
+ *	@idx: index of port whose interrupts should be disabled
+ *
+ *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
+ *	adapter port.
+ */
+void t3_port_intr_disable(struct adapter *adapter, int idx)
+{
+	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
+	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+	phy->ops->intr_disable(phy);
+}
+
+/**
+ *	t3_port_intr_clear - clear port-specific interrupts
+ *	@adapter: associated adapter
+ *	@idx: index of port whose interrupts to clear
+ *
+ *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
+ *	adapter port.
+ */
+static void t3_port_intr_clear(struct adapter *adapter, int idx)
+{
+	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
+	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
+	phy->ops->intr_clear(phy);
+}
+
+#define SG_CONTEXT_CMD_ATTEMPTS 100
+
+/**
+ * 	t3_sge_write_context - write an SGE context
+ * 	@adapter: the adapter
+ * 	@id: the context id
+ * 	@type: the context type
+ *
+ * 	Program an SGE context with the values already loaded in the
+ * 	CONTEXT_DATA? registers.
+ */
+static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
+				unsigned int type)
+{
+	if (type == F_RESPONSEQ) {
+		/*
+		 * Can't write the Response Queue Context bits for
+		 * Interrupt Armed or the Reserve bits after the chip
+		 * has been initialized out of reset.  Writing to these
+		 * bits can confuse the hardware.
+		 */
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+	} else {
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
+		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+	}
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ *	clear_sge_ctxt - completely clear an SGE context
+ *	@adapter: the adapter
+ *	@id: the context id
+ *	@type: the context type
+ *
+ *	Completely clear an SGE context.  Used predominantly at post-reset
+ *	initialization.  Note in particular that we don't skip writing to any
+ *	"sensitive bits" in the contexts the way that t3_sge_write_context()
+ *	does ...
+ */
+static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
+			  unsigned int type)
+{
+	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
+	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
+	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
+	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
+	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
+	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
+	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
+	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
+	t3_write_reg(adap, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ *	t3_sge_init_ecntxt - initialize an SGE egress context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@gts_enable: whether to enable GTS for the context
+ *	@type: the egress context type
+ *	@respq: associated response queue
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@token: uP token
+ *	@gen: initial generation value for the context
+ *	@cidx: consumer pointer
+ *
+ *	Initialize an SGE egress context and make it ready for use.  If the
+ *	platform allows concurrent context operations, the caller is
+ *	responsible for appropriate locking.
+ */
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+		       enum sge_context_type type, int respq, u64 base_addr,
+		       unsigned int size, unsigned int token, int gen,
+		       unsigned int cidx)
+{
+	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
+
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
+		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
+		     V_EC_BASE_LO(base_addr & 0xffff));
+	base_addr >>= 16;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
+	base_addr >>= 32;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
+		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
+		     F_EC_VALID);
+	return t3_sge_write_context(adapter, id, F_EGRESS);
+}
+
+/**
+ *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@gts_enable: whether to enable GTS for the context
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@bsize: size of each buffer for this queue
+ *	@cong_thres: threshold to signal congestion to upstream producers
+ *	@gen: initial generation value for the context
+ *	@cidx: consumer pointer
+ *
+ *	Initialize an SGE free list context and make it ready for use.  The
+ *	caller is responsible for ensuring only one context operation occurs
+ *	at a time.
+ */
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+			int gts_enable, u64 base_addr, unsigned int size,
+			unsigned int bsize, unsigned int cong_thres, int gen,
+			unsigned int cidx)
+{
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
+	base_addr >>= 32;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
+		     V_FL_BASE_HI((u32) base_addr) |
+		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
+		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
+		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
+		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
+	return t3_sge_write_context(adapter, id, F_FREELIST);
+}
+
+/**
+ *	t3_sge_init_rspcntxt - initialize an SGE response queue context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@fl_thres: threshold for selecting the normal or jumbo free list
+ *	@gen: initial generation value for the context
+ *	@cidx: consumer pointer
+ *
+ *	Initialize an SGE response queue context and make it ready for use.
+ *	The caller is responsible for ensuring only one context operation
+ *	occurs at a time.
+ */
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+			 int irq_vec_idx, u64 base_addr, unsigned int size,
+			 unsigned int fl_thres, int gen, unsigned int cidx)
+{
+	unsigned int intr = 0;
+
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
+		     V_CQ_INDEX(cidx));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+	base_addr >>= 32;
+	if (irq_vec_idx >= 0)
+		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
+	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
+}
+
+/**
+ *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
+ *	@adapter: the adapter to configure
+ *	@id: the context id
+ *	@base_addr: base address of queue
+ *	@size: number of queue entries
+ *	@rspq: response queue for async notifications
+ *	@ovfl_mode: CQ overflow mode
+ *	@credits: completion queue credits
+ *	@credit_thres: the credit threshold
+ *
+ *	Initialize an SGE completion queue context and make it ready for use.
+ *	The caller is responsible for ensuring only one context operation
+ *	occurs at a time.
+ */
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+			unsigned int size, int rspq, int ovfl_mode,
+			unsigned int credits, unsigned int credit_thres)
+{
+	if (base_addr & 0xfff)	/* must be 4K aligned */
+		return -EINVAL;
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	base_addr >>= 12;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+	base_addr >>= 32;
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
+		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
+		     V_CQ_ERR(ovfl_mode));
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
+		     V_CQ_CREDIT_THRES(credit_thres));
+	return t3_sge_write_context(adapter, id, F_CQ);
+}
+
+/**
+ *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
+ *	@adapter: the adapter
+ *	@id: the egress context id
+ *	@enable: enable (1) or disable (0) the context
+ *
+ *	Enable or disable an SGE egress context.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ *	t3_sge_disable_fl - disable an SGE free-buffer list
+ *	@adapter: the adapter
+ *	@id: the free list context id
+ *
+ *	Disable an SGE free-buffer list.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ *	t3_sge_disable_rspcntxt - disable an SGE response queue
+ *	@adapter: the adapter
+ *	@id: the response queue context id
+ *
+ *	Disable an SGE response queue.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ *	t3_sge_disable_cqcntxt - disable an SGE completion queue
+ *	@adapter: the adapter
+ *	@id: the completion queue context id
+ *
+ *	Disable an SGE completion queue.  The caller is responsible for
+ *	ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
+{
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
+	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
+ *	@adapter: the adapter
+ *	@id: the context id
+ *	@op: the operation to perform
+ *
+ *	Perform the selected operation on an SGE completion queue context.
+ *	The caller is responsible for ensuring only one context operation
+ *	occurs at a time.
+ */
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+		      unsigned int credits)
+{
+	u32 val;
+
+	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
+	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
+		     V_CONTEXT(id) | F_CQ);
+	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
+		return -EIO;
+
+	if (op >= 2 && op < 7) {
+		if (adapter->params.rev > 0)
+			return G_CQ_INDEX(val);
+
+		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
+		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
+				    F_CONTEXT_CMD_BUSY, 0,
+				    SG_CONTEXT_CMD_ATTEMPTS, 1))
+			return -EIO;
+		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
+	}
+	return 0;
+}
+
+/**
+ *	t3_config_rss - configure Rx packet steering
+ *	@adapter: the adapter
+ *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
+ *	@cpus: values for the CPU lookup table (0xff terminated)
+ *	@rspq: values for the response queue lookup table (0xffff terminated)
+ *
+ *	Programs the receive packet steering logic.  @cpus and @rspq provide
+ *	the values for the CPU and response queue lookup tables.  If they
+ *	provide fewer values than the size of the tables the supplied values
+ *	are used repeatedly until the tables are fully populated.
+ */
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+		   const u8 * cpus, const u16 *rspq)
+{
+	int i, j, cpu_idx = 0, q_idx = 0;
+
+	if (cpus)
+		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+			u32 val = i << 16;
+
+			for (j = 0; j < 2; ++j) {
+				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
+				if (cpus[cpu_idx] == 0xff)
+					cpu_idx = 0;
+			}
+			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
+		}
+
+	if (rspq)
+		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+				     (i << 16) | rspq[q_idx++]);
+			if (rspq[q_idx] == 0xffff)
+				q_idx = 0;
+		}
+
+	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
+}
+
+/**
+ *	t3_tp_set_offload_mode - put TP in NIC/offload mode
+ *	@adap: the adapter
+ *	@enable: 1 to select offload mode, 0 for regular NIC
+ *
+ *	Switches TP to NIC/offload mode.
+ */
+void t3_tp_set_offload_mode(struct adapter *adap, int enable)
+{
+	if (is_offload(adap) || !enable)
+		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
+				 V_NICMODE(!enable));
+}
+
+/**
+ *	pm_num_pages - calculate the number of pages of the payload memory
+ *	@mem_size: the size of the payload memory
+ *	@pg_size: the size of each payload memory page
+ *
+ *	Calculate the number of pages, each of the given size, that fit in a
+ *	memory of the specified size, respecting the HW requirement that the
+ *	number of pages must be a multiple of 24.
+ */
+static inline unsigned int pm_num_pages(unsigned int mem_size,
+					unsigned int pg_size)
+{
+	unsigned int n = mem_size / pg_size;
+
+	return n - n % 24;
+}
+
+#define mem_region(adap, start, size, reg) \
+	t3_write_reg((adap), A_ ## reg, (start)); \
+	start += size
+
+/**
+ *	partition_mem - partition memory and configure TP memory settings
+ *	@adap: the adapter
+ *	@p: the TP parameters
+ *
+ *	Partitions context and payload memory and configures TP's memory
+ *	registers.
+ */
+static void partition_mem(struct adapter *adap, const struct tp_params *p)
+{
+	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
+	unsigned int timers = 0, timers_shift = 22;
+
+	if (adap->params.rev > 0) {
+		if (tids <= 16 * 1024) {
+			timers = 1;
+			timers_shift = 16;
+		} else if (tids <= 64 * 1024) {
+			timers = 2;
+			timers_shift = 18;
+		} else if (tids <= 256 * 1024) {
+			timers = 3;
+			timers_shift = 20;
+		}
+	}
+
+	t3_write_reg(adap, A_TP_PMM_SIZE,
+		     p->chan_rx_size | (p->chan_tx_size >> 16));
+
+	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
+	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
+	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
+	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
+			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
+
+	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
+	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
+	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
+
+	pstructs = p->rx_num_pgs + p->tx_num_pgs;
+	/* Add a bit of headroom and make multiple of 24 */
+	pstructs += 48;
+	pstructs -= pstructs % 24;
+	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
+
+	m = tids * TCB_SIZE;
+	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
+	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
+	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
+	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
+	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
+	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
+	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
+	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
+
+	m = (m + 4095) & ~0xfff;
+	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
+	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
+
+	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
+	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
+	if (tids < m)
+		adap->params.mc5.nservers += m - tids;
+}
+
+static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
+				  u32 val)
+{
+	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
+	t3_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+static void tp_config(struct adapter *adap, const struct tp_params *p)
+{
+	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
+		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
+		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
+	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
+		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
+		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
+		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
+		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
+		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
+	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
+			 F_IPV6ENABLE | F_NICMODE);
+	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
+	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
+	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
+			 adap->params.rev > 0 ? F_ENABLEESND :
+			 F_T3A_ENABLEESND);
+
+	t3_set_reg_field(adap, A_TP_PC_CONFIG,
+			 F_ENABLEEPCMDAFULL,
+			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
+			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
+	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
+			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
+			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
+	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
+	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
+
+	if (adap->params.rev > 0) {
+		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
+		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
+				 F_TXPACEAUTO);
+		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
+		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
+	} else
+		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
+
+	if (adap->params.rev == T3_REV_C)
+		t3_set_reg_field(adap, A_TP_PC_CONFIG,
+				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
+				 V_TABLELATENCYDELTA(4));
+
+	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
+	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
+	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
+	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
+}
+
+/* Desired TP timer resolution in usec */
+#define TP_TMR_RES 50
+
+/* TCP timer values in ms */
+#define TP_DACK_TIMER 50
+#define TP_RTO_MIN    250
+
+/**
+ *	tp_set_timers - set TP timing parameters
+ *	@adap: the adapter to set
+ *	@core_clk: the core clock frequency in Hz
+ *
+ *	Set TP's timing parameters, such as the various timer resolutions and
+ *	the TCP timer values.
+ */
+static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
+{
+	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
+	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
+	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
+	unsigned int tps = core_clk >> tre;
+
+	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
+		     V_DELAYEDACKRESOLUTION(dack_re) |
+		     V_TIMESTAMPRESOLUTION(tstamp_re));
+	t3_write_reg(adap, A_TP_DACK_TIMER,
+		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
+	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
+	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
+		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
+		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
+		     V_KEEPALIVEMAX(9));
+
+#define SECONDS * tps
+
+	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
+	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
+	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
+	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
+	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
+	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
+	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
+	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
+	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
+
+#undef SECONDS
+}
+
+/**
+ *	t3_tp_set_coalescing_size - set receive coalescing size
+ *	@adap: the adapter
+ *	@size: the receive coalescing size
+ *	@psh: whether a set PSH bit should deliver coalesced data
+ *
+ *	Set the receive coalescing size and PSH bit handling.
+ */
+static int t3_tp_set_coalescing_size(struct adapter *adap,
+				     unsigned int size, int psh)
+{
+	u32 val;
+
+	if (size > MAX_RX_COALESCING_LEN)
+		return -EINVAL;
+
+	val = t3_read_reg(adap, A_TP_PARA_REG3);
+	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
+
+	if (size) {
+		val |= F_RXCOALESCEENABLE;
+		if (psh)
+			val |= F_RXCOALESCEPSHEN;
+		size = min(MAX_RX_COALESCING_LEN, size);
+		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
+			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
+	}
+	t3_write_reg(adap, A_TP_PARA_REG3, val);
+	return 0;
+}
+
+/**
+ *	t3_tp_set_max_rxsize - set the max receive size
+ *	@adap: the adapter
+ *	@size: the max receive size
+ *
+ *	Set TP's max receive size.  This is the limit that applies when
+ *	receive coalescing is disabled.
+ */
+static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
+{
+	t3_write_reg(adap, A_TP_PARA_REG7,
+		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
+}
+
+static void init_mtus(unsigned short mtus[])
+{
+	/*
+	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
+	 * it can accommodate max size TCP/IP headers when SACK and timestamps
+	 * are enabled and still have at least 8 bytes of payload.
+	 */
+	mtus[0] = 88;
+	mtus[1] = 88;
+	mtus[2] = 256;
+	mtus[3] = 512;
+	mtus[4] = 576;
+	mtus[5] = 1024;
+	mtus[6] = 1280;
+	mtus[7] = 1492;
+	mtus[8] = 1500;
+	mtus[9] = 2002;
+	mtus[10] = 2048;
+	mtus[11] = 4096;
+	mtus[12] = 4352;
+	mtus[13] = 8192;
+	mtus[14] = 9000;
+	mtus[15] = 9600;
+}
+
+/*
+ * Initial congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+	a[9] = 2;
+	a[10] = 3;
+	a[11] = 4;
+	a[12] = 5;
+	a[13] = 6;
+	a[14] = 7;
+	a[15] = 8;
+	a[16] = 9;
+	a[17] = 10;
+	a[18] = 14;
+	a[19] = 17;
+	a[20] = 21;
+	a[21] = 25;
+	a[22] = 30;
+	a[23] = 35;
+	a[24] = 45;
+	a[25] = 60;
+	a[26] = 80;
+	a[27] = 100;
+	a[28] = 200;
+	a[29] = 300;
+	a[30] = 400;
+	a[31] = 500;
+
+	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+	b[9] = b[10] = 1;
+	b[11] = b[12] = 2;
+	b[13] = b[14] = b[15] = b[16] = 3;
+	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+	b[28] = b[29] = 6;
+	b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ *	t3_load_mtus - write the MTU and congestion control HW tables
+ *	@adap: the adapter
+ *	@mtus: the unrestricted values for the MTU table
+ *	@alphs: the values for the congestion control alpha parameter
+ *	@beta: the values for the congestion control beta parameter
+ *	@mtu_cap: the maximum permitted effective MTU
+ *
+ *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
+ *	Update the high-speed congestion control table with the supplied alpha,
+ * 	beta, and MTUs.
+ */
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+		  unsigned short alpha[NCCTRL_WIN],
+		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
+{
+	static const unsigned int avg_pkts[NCCTRL_WIN] = {
+		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+		28672, 40960, 57344, 81920, 114688, 163840, 229376
+	};
+
+	unsigned int i, w;
+
+	for (i = 0; i < NMTUS; ++i) {
+		unsigned int mtu = min(mtus[i], mtu_cap);
+		unsigned int log2 = fls(mtu);
+
+		if (!(mtu & ((1 << log2) >> 2)))	/* round */
+			log2--;
+		t3_write_reg(adap, A_TP_MTU_TABLE,
+			     (i << 24) | (log2 << 16) | mtu);
+
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			unsigned int inc;
+
+			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+				  CC_MIN_INCR);
+
+			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+				     (w << 16) | (beta[w] << 13) | inc);
+		}
+	}
+}
+
+/**
+ *	t3_tp_get_mib_stats - read TP's MIB counters
+ *	@adap: the adapter
+ *	@tps: holds the returned counter values
+ *
+ *	Returns the values of TP's MIB counters.
+ */
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
+{
+	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
+			 sizeof(*tps) / sizeof(u32), 0);
+}
+
+#define ulp_region(adap, name, start, len) \
+	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
+	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
+		     (start) + (len) - 1); \
+	start += len
+
+#define ulptx_region(adap, name, start, len) \
+	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
+	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
+		     (start) + (len) - 1)
+
+static void ulp_config(struct adapter *adap, const struct tp_params *p)
+{
+	unsigned int m = p->chan_rx_size;
+
+	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
+	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
+	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
+	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
+	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
+	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
+	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
+	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
+}
+
+/**
+ *	t3_set_proto_sram - set the contents of the protocol sram
+ *	@adapter: the adapter
+ *	@data: the protocol image
+ *
+ *	Write the contents of the protocol SRAM.
+ */
+int t3_set_proto_sram(struct adapter *adap, const u8 *data)
+{
+	int i;
+	const __be32 *buf = (const __be32 *)data;
+
+	for (i = 0; i < PROTO_SRAM_LINES; i++) {
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
+
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
+		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
+			return -EIO;
+	}
+	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
+
+	return 0;
+}
+
+void t3_config_trace_filter(struct adapter *adapter,
+			    const struct trace_params *tp, int filter_index,
+			    int invert, int enable)
+{
+	u32 addr, key[4], mask[4];
+
+	key[0] = tp->sport | (tp->sip << 16);
+	key[1] = (tp->sip >> 16) | (tp->dport << 16);
+	key[2] = tp->dip;
+	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
+
+	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
+	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
+	mask[2] = tp->dip_mask;
+	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
+
+	if (invert)
+		key[3] |= (1 << 29);
+	if (enable)
+		key[3] |= (1 << 28);
+
+	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
+	tp_wr_indirect(adapter, addr++, key[0]);
+	tp_wr_indirect(adapter, addr++, mask[0]);
+	tp_wr_indirect(adapter, addr++, key[1]);
+	tp_wr_indirect(adapter, addr++, mask[1]);
+	tp_wr_indirect(adapter, addr++, key[2]);
+	tp_wr_indirect(adapter, addr++, mask[2]);
+	tp_wr_indirect(adapter, addr++, key[3]);
+	tp_wr_indirect(adapter, addr, mask[3]);
+	t3_read_reg(adapter, A_TP_PIO_DATA);
+}
+
+/**
+ *	t3_config_sched - configure a HW traffic scheduler
+ *	@adap: the adapter
+ *	@kbps: target rate in Kbps
+ *	@sched: the scheduler index
+ *
+ *	Configure a HW scheduler for the target rate
+ */
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
+{
+	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
+	unsigned int clk = adap->params.vpd.cclk * 1000;
+	unsigned int selected_cpt = 0, selected_bpt = 0;
+
+	if (kbps > 0) {
+		kbps *= 125;	/* -> bytes */
+		for (cpt = 1; cpt <= 255; cpt++) {
+			tps = clk / cpt;
+			bpt = (kbps + tps / 2) / tps;
+			if (bpt > 0 && bpt <= 255) {
+				v = bpt * tps;
+				delta = v >= kbps ? v - kbps : kbps - v;
+				if (delta <= mindelta) {
+					mindelta = delta;
+					selected_cpt = cpt;
+					selected_bpt = bpt;
+				}
+			} else if (selected_cpt)
+				break;
+		}
+		if (!selected_cpt)
+			return -EINVAL;
+	}
+	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
+		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+	if (sched & 1)
+		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+	else
+		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
+	return 0;
+}
+
+static int tp_init(struct adapter *adap, const struct tp_params *p)
+{
+	int busy = 0;
+
+	tp_config(adap, p);
+	t3_set_vlan_accel(adap, 3, 0);
+
+	if (is_offload(adap)) {
+		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
+		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
+		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
+				       0, 1000, 5);
+		if (busy)
+			CH_ERR(adap, "TP initialization timed out\n");
+	}
+
+	if (!busy)
+		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
+	return busy;
+}
+
+/*
+ * Perform the bits of HW initialization that are dependent on the Tx
+ * channels being used.
+ */
+static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
+{
+	int i;
+
+	if (chan_map != 3) {                                 /* one channel */
+		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
+		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
+		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
+			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
+					      F_TPTXPORT1EN | F_PORT1ACTIVE));
+		t3_write_reg(adap, A_PM1_TX_CFG,
+			     chan_map == 1 ? 0xffffffff : 0);
+	} else {                                             /* two channels */
+		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
+		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
+		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
+			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
+		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
+			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
+			     F_ENFORCEPKT);
+		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
+		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
+		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
+		for (i = 0; i < 16; i++)
+			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
+				     (i << 16) | 0x1010);
+	}
+}
+
+static int calibrate_xgm(struct adapter *adapter)
+{
+	if (uses_xaui(adapter)) {
+		unsigned int v, i;
+
+		for (i = 0; i < 5; ++i) {
+			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
+			t3_read_reg(adapter, A_XGM_XAUI_IMP);
+			msleep(1);
+			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
+			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
+				t3_write_reg(adapter, A_XGM_XAUI_IMP,
+					     V_XAUIIMP(G_CALIMP(v) >> 2));
+				return 0;
+			}
+		}
+		CH_ERR(adapter, "MAC calibration failed\n");
+		return -1;
+	} else {
+		t3_write_reg(adapter, A_XGM_RGMII_IMP,
+			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+				 F_XGM_IMPSETUPDATE);
+	}
+	return 0;
+}
+
+static void calibrate_xgm_t3b(struct adapter *adapter)
+{
+	if (!uses_xaui(adapter)) {
+		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
+			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
+				 F_XGM_IMPSETUPDATE);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+				 0);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
+		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
+	}
+}
+
+struct mc7_timing_params {
+	unsigned char ActToPreDly;
+	unsigned char ActToRdWrDly;
+	unsigned char PreCyc;
+	unsigned char RefCyc[5];
+	unsigned char BkCyc;
+	unsigned char WrToRdDly;
+	unsigned char RdToWrDly;
+};
+
+/*
+ * Write a value to a register and check that the write completed.  These
+ * writes normally complete in a cycle or two, so one read should suffice.
+ * The very first read exists to flush the posted write to the device.
+ */
+static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
+{
+	t3_write_reg(adapter, addr, val);
+	t3_read_reg(adapter, addr);	/* flush */
+	if (!(t3_read_reg(adapter, addr) & F_BUSY))
+		return 0;
+	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
+	return -EIO;
+}
+
+static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
+{
+	static const unsigned int mc7_mode[] = {
+		0x632, 0x642, 0x652, 0x432, 0x442
+	};
+	static const struct mc7_timing_params mc7_timings[] = {
+		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
+		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
+		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
+		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
+		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
+	};
+
+	u32 val;
+	unsigned int width, density, slow, attempts;
+	struct adapter *adapter = mc7->adapter;
+	const struct mc7_timing_params *p = &mc7_timings[mem_type];
+
+	if (!mc7->size)
+		return 0;
+
+	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+	slow = val & F_SLOW;
+	width = G_WIDTH(val);
+	density = G_DEN(val);
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
+	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
+	msleep(1);
+
+	if (!slow) {
+		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
+		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
+		msleep(1);
+		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
+		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
+			CH_ERR(adapter, "%s MC7 calibration timed out\n",
+			       mc7->name);
+			goto out_fail;
+		}
+	}
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
+		     V_ACTTOPREDLY(p->ActToPreDly) |
+		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
+		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
+		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
+		     val | F_CLKEN | F_TERM150);
+	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
+
+	if (!slow)
+		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
+				 F_DLLENB);
+	udelay(1);
+
+	val = slow ? 3 : 6;
+	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+		goto out_fail;
+
+	if (!slow) {
+		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
+		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
+		udelay(5);
+	}
+
+	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
+		       mc7_mode[mem_type]) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
+	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+		goto out_fail;
+
+	/* clock value is in KHz */
+	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
+	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
+		     F_PERREFEN | V_PREREFDIV(mc7_clock));
+	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
+
+	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
+		     (mc7->size << width) - 1);
+	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
+	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
+
+	attempts = 50;
+	do {
+		msleep(250);
+		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
+	} while ((val & F_BUSY) && --attempts);
+	if (val & F_BUSY) {
+		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
+		goto out_fail;
+	}
+
+	/* Enable normal memory accesses. */
+	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
+	return 0;
+
+out_fail:
+	return -1;
+}
+
+static void config_pcie(struct adapter *adap)
+{
+	static const u16 ack_lat[4][6] = {
+		{237, 416, 559, 1071, 2095, 4143},
+		{128, 217, 289, 545, 1057, 2081},
+		{73, 118, 154, 282, 538, 1050},
+		{67, 107, 86, 150, 278, 534}
+	};
+	static const u16 rpl_tmr[4][6] = {
+		{711, 1248, 1677, 3213, 6285, 12429},
+		{384, 651, 867, 1635, 3171, 6243},
+		{219, 354, 462, 846, 1614, 3150},
+		{201, 321, 258, 450, 834, 1602}
+	};
+
+	u16 val, devid;
+	unsigned int log2_width, pldsize;
+	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
+
+	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
+	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+
+	pci_read_config_word(adap->pdev, 0x2, &devid);
+	if (devid == 0x37) {
+		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
+					   val & ~PCI_EXP_DEVCTL_READRQ &
+					   ~PCI_EXP_DEVCTL_PAYLOAD);
+		pldsize = 0;
+	}
+
+	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
+
+	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
+	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
+	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
+	log2_width = fls(adap->params.pci.width) - 1;
+	acklat = ack_lat[log2_width][pldsize];
+	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
+		acklat += fst_trn_tx * 4;
+	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
+
+	if (adap->params.rev == 0)
+		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
+				 V_T3A_ACKLAT(M_T3A_ACKLAT),
+				 V_T3A_ACKLAT(acklat));
+	else
+		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
+				 V_ACKLAT(acklat));
+
+	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
+			 V_REPLAYLMT(rpllmt));
+
+	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
+	t3_set_reg_field(adap, A_PCIE_CFG, 0,
+			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
+			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
+}
+
+/*
+ * Initialize and configure T3 HW modules.  This performs the
+ * initialization steps that need to be done once after a card is reset.
+ * MAC and PHY initialization is handled separarely whenever a port is enabled.
+ *
+ * fw_params are passed to FW and their value is platform dependent.  Only the
+ * top 8 bits are available for use, the rest must be 0.
+ */
+int t3_init_hw(struct adapter *adapter, u32 fw_params)
+{
+	int err = -EIO, attempts, i;
+	const struct vpd_params *vpd = &adapter->params.vpd;
+
+	if (adapter->params.rev > 0)
+		calibrate_xgm_t3b(adapter);
+	else if (calibrate_xgm(adapter))
+		goto out_err;
+
+	if (vpd->mclk) {
+		partition_mem(adapter, &adapter->params.tp);
+
+		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
+		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
+		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
+		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
+				adapter->params.mc5.nfilters,
+				adapter->params.mc5.nroutes))
+			goto out_err;
+
+		for (i = 0; i < 32; i++)
+			if (clear_sge_ctxt(adapter, i, F_CQ))
+				goto out_err;
+	}
+
+	if (tp_init(adapter, &adapter->params.tp))
+		goto out_err;
+
+	t3_tp_set_coalescing_size(adapter,
+				  min(adapter->params.sge.max_pkt_size,
+				      MAX_RX_COALESCING_LEN), 1);
+	t3_tp_set_max_rxsize(adapter,
+			     min(adapter->params.sge.max_pkt_size, 16384U));
+	ulp_config(adapter, &adapter->params.tp);
+
+	if (is_pcie(adapter))
+		config_pcie(adapter);
+	else
+		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
+				 F_DMASTOPEN | F_CLIDECEN);
+
+	if (adapter->params.rev == T3_REV_C)
+		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
+				 F_CFG_CQE_SOP_MASK);
+
+	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
+	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
+	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
+	chan_init_hw(adapter, adapter->params.chan_map);
+	t3_sge_init(adapter, &adapter->params.sge);
+	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
+
+	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
+
+	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
+	t3_write_reg(adapter, A_CIM_BOOT_CFG,
+		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
+	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
+
+	attempts = 100;
+	do {			/* wait for uP to initialize */
+		msleep(20);
+	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
+	if (!attempts) {
+		CH_ERR(adapter, "uP initialization timed out\n");
+		goto out_err;
+	}
+
+	err = 0;
+out_err:
+	return err;
+}
+
+/**
+ *	get_pci_mode - determine a card's PCI mode
+ *	@adapter: the adapter
+ *	@p: where to store the PCI settings
+ *
+ *	Determines a card's PCI mode and associated parameters, such as speed
+ *	and width.
+ */
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
+{
+	static unsigned short speed_map[] = { 33, 66, 100, 133 };
+	u32 pci_mode;
+
+	if (pci_is_pcie(adapter->pdev)) {
+		u16 val;
+
+		p->variant = PCI_VARIANT_PCIE;
+		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
+		p->width = (val >> 4) & 0x3f;
+		return;
+	}
+
+	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
+	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
+	p->width = (pci_mode & F_64BIT) ? 64 : 32;
+	pci_mode = G_PCIXINITPAT(pci_mode);
+	if (pci_mode == 0)
+		p->variant = PCI_VARIANT_PCI;
+	else if (pci_mode < 4)
+		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
+	else if (pci_mode < 8)
+		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
+	else
+		p->variant = PCI_VARIANT_PCIX_266_MODE2;
+}
+
+/**
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@ai: information about the current card
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/duplex/flow-control/autonegotiation
+ *	settings.
+ */
+static void init_link_config(struct link_config *lc, unsigned int caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = lc->speed = SPEED_INVALID;
+	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising = lc->supported;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+/**
+ *	mc7_calc_size - calculate MC7 memory size
+ *	@cfg: the MC7 configuration
+ *
+ *	Calculates the size of an MC7 memory in bytes from the value of its
+ *	configuration register.
+ */
+static unsigned int mc7_calc_size(u32 cfg)
+{
+	unsigned int width = G_WIDTH(cfg);
+	unsigned int banks = !!(cfg & F_BKS) + 1;
+	unsigned int org = !!(cfg & F_ORG) + 1;
+	unsigned int density = G_DEN(cfg);
+	unsigned int MBs = ((256 << density) * banks) / (org << width);
+
+	return MBs << 20;
+}
+
+static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+		     unsigned int base_addr, const char *name)
+{
+	u32 cfg;
+
+	mc7->adapter = adapter;
+	mc7->name = name;
+	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
+	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
+	mc7->width = G_WIDTH(cfg);
+}
+
+static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
+{
+	u16 devid;
+
+	mac->adapter = adapter;
+	pci_read_config_word(adapter->pdev, 0x2, &devid);
+
+	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
+		index = 0;
+	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
+	mac->nucast = 1;
+
+	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
+		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
+			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
+		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
+				 F_ENRGMII, 0);
+	}
+}
+
+static void early_hw_init(struct adapter *adapter,
+			  const struct adapter_info *ai)
+{
+	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
+
+	mi1_init(adapter, ai);
+	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
+		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
+	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
+		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
+	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
+	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
+
+	if (adapter->params.rev == 0 || !uses_xaui(adapter))
+		val |= F_ENRGMII;
+
+	/* Enable MAC clocks so we can access the registers */
+	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+	t3_read_reg(adapter, A_XGM_PORT_CFG);
+
+	val |= F_CLKDIVRESET_;
+	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+	t3_read_reg(adapter, A_XGM_PORT_CFG);
+	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
+	t3_read_reg(adapter, A_XGM_PORT_CFG);
+}
+
+/*
+ * Reset the adapter.
+ * Older PCIe cards lose their config space during reset, PCI-X
+ * ones don't.
+ */
+int t3_reset_adapter(struct adapter *adapter)
+{
+	int i, save_and_restore_pcie =
+	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
+	uint16_t devid = 0;
+
+	if (save_and_restore_pcie)
+		pci_save_state(adapter->pdev);
+	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
+
+	/*
+	 * Delay. Give Some time to device to reset fully.
+	 * XXX The delay time should be modified.
+	 */
+	for (i = 0; i < 10; i++) {
+		msleep(50);
+		pci_read_config_word(adapter->pdev, 0x00, &devid);
+		if (devid == 0x1425)
+			break;
+	}
+
+	if (devid != 0x1425)
+		return -1;
+
+	if (save_and_restore_pcie)
+		pci_restore_state(adapter->pdev);
+	return 0;
+}
+
+static int init_parity(struct adapter *adap)
+{
+		int i, err, addr;
+
+	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+		return -EBUSY;
+
+	for (err = i = 0; !err && i < 16; i++)
+		err = clear_sge_ctxt(adap, i, F_EGRESS);
+	for (i = 0xfff0; !err && i <= 0xffff; i++)
+		err = clear_sge_ctxt(adap, i, F_EGRESS);
+	for (i = 0; !err && i < SGE_QSETS; i++)
+		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
+	if (err)
+		return err;
+
+	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
+	for (i = 0; i < 4; i++)
+		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
+			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
+				     F_IBQDBGWR | V_IBQDBGQID(i) |
+				     V_IBQDBGADDR(addr));
+			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
+					      F_IBQDBGBUSY, 0, 2, 1);
+			if (err)
+				return err;
+		}
+	return 0;
+}
+
+/*
+ * Initialize adapter SW state for the various HW modules, set initial values
+ * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
+ * interface.
+ */
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+		    int reset)
+{
+	int ret;
+	unsigned int i, j = -1;
+
+	get_pci_mode(adapter, &adapter->params.pci);
+
+	adapter->params.info = ai;
+	adapter->params.nports = ai->nports0 + ai->nports1;
+	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
+	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
+	/*
+	 * We used to only run the "adapter check task" once a second if
+	 * we had PHYs which didn't support interrupts (we would check
+	 * their link status once a second).  Now we check other conditions
+	 * in that routine which could potentially impose a very high
+	 * interrupt load on the system.  As such, we now always scan the
+	 * adapter state once a second ...
+	 */
+	adapter->params.linkpoll_period = 10;
+	adapter->params.stats_update_period = is_10G(adapter) ?
+	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
+	adapter->params.pci.vpd_cap_addr =
+	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+	ret = get_vpd_params(adapter, &adapter->params.vpd);
+	if (ret < 0)
+		return ret;
+
+	if (reset && t3_reset_adapter(adapter))
+		return -1;
+
+	t3_sge_prep(adapter, &adapter->params.sge);
+
+	if (adapter->params.vpd.mclk) {
+		struct tp_params *p = &adapter->params.tp;
+
+		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
+		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
+		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
+
+		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
+		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
+		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
+		p->cm_size = t3_mc7_size(&adapter->cm);
+		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
+		p->chan_tx_size = p->pmtx_size / p->nchan;
+		p->rx_pg_size = 64 * 1024;
+		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
+		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
+		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
+		p->ntimer_qs = p->cm_size >= (128 << 20) ||
+		    adapter->params.rev > 0 ? 12 : 6;
+	}
+
+	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
+				  t3_mc7_size(&adapter->pmtx) &&
+				  t3_mc7_size(&adapter->cm);
+
+	if (is_offload(adapter)) {
+		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
+		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
+		    DEFAULT_NFILTERS : 0;
+		adapter->params.mc5.nroutes = 0;
+		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
+
+		init_mtus(adapter->params.mtus);
+		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+	}
+
+	early_hw_init(adapter, ai);
+	ret = init_parity(adapter);
+	if (ret)
+		return ret;
+
+	for_each_port(adapter, i) {
+		u8 hw_addr[6];
+		const struct port_type_info *pti;
+		struct port_info *p = adap2pinfo(adapter, i);
+
+		while (!adapter->params.vpd.port_type[++j])
+			;
+
+		pti = &port_types[adapter->params.vpd.port_type[j]];
+		if (!pti->phy_prep) {
+			CH_ALERT(adapter, "Invalid port type index %d\n",
+				 adapter->params.vpd.port_type[j]);
+			return -EINVAL;
+		}
+
+		p->phy.mdio.dev = adapter->port[i];
+		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+				    ai->mdio_ops);
+		if (ret)
+			return ret;
+		mac_prep(&p->mac, adapter, j);
+
+		/*
+		 * The VPD EEPROM stores the base Ethernet address for the
+		 * card.  A port's address is derived from the base by adding
+		 * the port's index to the base's low octet.
+		 */
+		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
+		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
+
+		memcpy(adapter->port[i]->dev_addr, hw_addr,
+		       ETH_ALEN);
+		init_link_config(&p->link_config, p->phy.caps);
+		p->phy.ops->power_down(&p->phy, 1);
+
+		/*
+		 * If the PHY doesn't support interrupts for link status
+		 * changes, schedule a scan of the adapter links at least
+		 * once a second.
+		 */
+		if (!(p->phy.caps & SUPPORTED_IRQ) &&
+		    adapter->params.linkpoll_period > 10)
+			adapter->params.linkpoll_period = 10;
+	}
+
+	return 0;
+}
+
+void t3_led_ready(struct adapter *adapter)
+{
+	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+			 F_GPIO0_OUT_VAL);
+}
+
+int t3_replay_prep_adapter(struct adapter *adapter)
+{
+	const struct adapter_info *ai = adapter->params.info;
+	unsigned int i, j = -1;
+	int ret;
+
+	early_hw_init(adapter, ai);
+	ret = init_parity(adapter);
+	if (ret)
+		return ret;
+
+	for_each_port(adapter, i) {
+		const struct port_type_info *pti;
+		struct port_info *p = adap2pinfo(adapter, i);
+
+		while (!adapter->params.vpd.port_type[++j])
+			;
+
+		pti = &port_types[adapter->params.vpd.port_type[j]];
+		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
+		if (ret)
+			return ret;
+		p->phy.ops->power_down(&p->phy, 1);
+	}
+
+return 0;
+}
+
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
new file mode 100644
index 0000000..705713b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2006-2008 Chelsio Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T3CDEV_H_
+#define _T3CDEV_H_
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <net/neighbour.h>
+
+#define T3CNAMSIZ 16
+
+struct cxgb3_client;
+
+enum t3ctype {
+	T3A = 0,
+	T3B,
+	T3C,
+};
+
+struct t3cdev {
+	char name[T3CNAMSIZ];	/* T3C device name */
+	enum t3ctype type;
+	struct list_head ofld_dev_list;	/* for list linking */
+	struct net_device *lldev;	/* LL dev associated with T3C messages */
+	struct proc_dir_entry *proc_dir;	/* root of proc dir for this T3C */
+	int (*send)(struct t3cdev *dev, struct sk_buff *skb);
+	int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
+	int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
+	void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
+	void *priv;		/* driver private data */
+	void *l2opt;		/* optional layer 2 data */
+	void *l3opt;		/* optional layer 3 data */
+	void *l4opt;		/* optional layer 4 data */
+	void *ulp;		/* ulp stuff */
+	void *ulp_iscsi;	/* ulp iscsi */
+};
+
+#endif				/* _T3CDEV_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
new file mode 100644
index 0000000..165bfb9
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
+#ifndef __CHELSIO_VERSION_H
+#define __CHELSIO_VERSION_H
+#define DRV_DESC "Chelsio T3 Network Driver"
+#define DRV_NAME "cxgb3"
+/* Driver version */
+#define DRV_VERSION "1.1.5-ko"
+
+/* Firmware version */
+#define FW_VERSION_MAJOR 7
+#define FW_VERSION_MINOR 12
+#define FW_VERSION_MICRO 0
+#endif				/* __CHELSIO_VERSION_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
new file mode 100644
index 0000000..4f9a1c2
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+
+/* VSC8211 PHY specific registers. */
+enum {
+	VSC8211_SIGDET_CTRL = 19,
+	VSC8211_EXT_CTRL = 23,
+	VSC8211_INTR_ENABLE = 25,
+	VSC8211_INTR_STATUS = 26,
+	VSC8211_LED_CTRL = 27,
+	VSC8211_AUX_CTRL_STAT = 28,
+	VSC8211_EXT_PAGE_AXS = 31,
+};
+
+enum {
+	VSC_INTR_RX_ERR = 1 << 0,
+	VSC_INTR_MS_ERR = 1 << 1,  /* master/slave resolution error */
+	VSC_INTR_CABLE = 1 << 2,  /* cable impairment */
+	VSC_INTR_FALSE_CARR = 1 << 3,  /* false carrier */
+	VSC_INTR_MEDIA_CHG = 1 << 4,  /* AMS media change */
+	VSC_INTR_RX_FIFO = 1 << 5,  /* Rx FIFO over/underflow */
+	VSC_INTR_TX_FIFO = 1 << 6,  /* Tx FIFO over/underflow */
+	VSC_INTR_DESCRAMBL = 1 << 7,  /* descrambler lock-lost */
+	VSC_INTR_SYMBOL_ERR = 1 << 8,  /* symbol error */
+	VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
+	VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
+	VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */
+	VSC_INTR_LINK_CHG = 1 << 13, /* link change */
+	VSC_INTR_SPD_CHG = 1 << 14, /* speed change */
+	VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+};
+
+enum {
+	VSC_CTRL_CLAUSE37_VIEW = 1 << 4,   /* Switch to Clause 37 view */
+	VSC_CTRL_MEDIA_MODE_HI = 0xf000    /* High part of media mode select */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+			   VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \
+	 		   VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+		   VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR    0
+#define M_ACSR_ACTIPHY_TMR    0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED    3
+#define M_ACSR_SPEED    0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY.  This PHY completes reset immediately so we never wait.
+ */
+static int vsc8211_reset(struct cphy *cphy, int wait)
+{
+	return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0);
+}
+
+static int vsc8211_intr_enable(struct cphy *cphy)
+{
+	return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE,
+			     INTR_MASK);
+}
+
+static int vsc8211_intr_disable(struct cphy *cphy)
+{
+	return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0);
+}
+
+static int vsc8211_intr_clear(struct cphy *cphy)
+{
+	u32 val;
+
+	/* Clear PHY interrupts by reading the register. */
+	return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val);
+}
+
+static int vsc8211_autoneg_enable(struct cphy *cphy)
+{
+	return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+				   BMCR_PDOWN | BMCR_ISOLATE,
+				   BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8211_autoneg_restart(struct cphy *cphy)
+{
+	return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+				   BMCR_PDOWN | BMCR_ISOLATE,
+				   BMCR_ANRESTART);
+}
+
+static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
+				   int *speed, int *duplex, int *fc)
+{
+	unsigned int bmcr, status, lpa, adv;
+	int err, sp = -1, dplx = -1, pause = 0;
+
+	err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
+	if (!err)
+		err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
+	if (err)
+		return err;
+
+	if (link_ok) {
+		/*
+		 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+		 * once more to get the current link state.
+		 */
+		if (!(status & BMSR_LSTATUS))
+			err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+					   &status);
+		if (err)
+			return err;
+		*link_ok = (status & BMSR_LSTATUS) != 0;
+	}
+	if (!(bmcr & BMCR_ANENABLE)) {
+		dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+		if (bmcr & BMCR_SPEED1000)
+			sp = SPEED_1000;
+		else if (bmcr & BMCR_SPEED100)
+			sp = SPEED_100;
+		else
+			sp = SPEED_10;
+	} else if (status & BMSR_ANEGCOMPLETE) {
+		err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT,
+				   &status);
+		if (err)
+			return err;
+
+		dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+		sp = G_ACSR_SPEED(status);
+		if (sp == 0)
+			sp = SPEED_10;
+		else if (sp == 1)
+			sp = SPEED_100;
+		else
+			sp = SPEED_1000;
+
+		if (fc && dplx == DUPLEX_FULL) {
+			err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA,
+					   &lpa);
+			if (!err)
+				err = t3_mdio_read(cphy, MDIO_DEVAD_NONE,
+						   MII_ADVERTISE, &adv);
+			if (err)
+				return err;
+
+			if (lpa & adv & ADVERTISE_PAUSE_CAP)
+				pause = PAUSE_RX | PAUSE_TX;
+			else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+				 (lpa & ADVERTISE_PAUSE_ASYM) &&
+				 (adv & ADVERTISE_PAUSE_ASYM))
+				pause = PAUSE_TX;
+			else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+				 (adv & ADVERTISE_PAUSE_CAP))
+				pause = PAUSE_RX;
+		}
+	}
+	if (speed)
+		*speed = sp;
+	if (duplex)
+		*duplex = dplx;
+	if (fc)
+		*fc = pause;
+	return 0;
+}
+
+static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
+					 int *speed, int *duplex, int *fc)
+{
+	unsigned int bmcr, status, lpa, adv;
+	int err, sp = -1, dplx = -1, pause = 0;
+
+	err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
+	if (!err)
+		err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
+	if (err)
+		return err;
+
+	if (link_ok) {
+		/*
+		 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+		 * once more to get the current link state.
+		 */
+		if (!(status & BMSR_LSTATUS))
+			err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+					   &status);
+		if (err)
+			return err;
+		*link_ok = (status & BMSR_LSTATUS) != 0;
+	}
+	if (!(bmcr & BMCR_ANENABLE)) {
+		dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+		if (bmcr & BMCR_SPEED1000)
+			sp = SPEED_1000;
+		else if (bmcr & BMCR_SPEED100)
+			sp = SPEED_100;
+		else
+			sp = SPEED_10;
+	} else if (status & BMSR_ANEGCOMPLETE) {
+		err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa);
+		if (!err)
+			err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE,
+					   &adv);
+		if (err)
+			return err;
+
+		if (adv & lpa & ADVERTISE_1000XFULL) {
+			dplx = DUPLEX_FULL;
+			sp = SPEED_1000;
+		} else if (adv & lpa & ADVERTISE_1000XHALF) {
+			dplx = DUPLEX_HALF;
+			sp = SPEED_1000;
+		}
+
+		if (fc && dplx == DUPLEX_FULL) {
+			if (lpa & adv & ADVERTISE_1000XPAUSE)
+				pause = PAUSE_RX | PAUSE_TX;
+			else if ((lpa & ADVERTISE_1000XPAUSE) &&
+				 (adv & lpa & ADVERTISE_1000XPSE_ASYM))
+				pause = PAUSE_TX;
+			else if ((lpa & ADVERTISE_1000XPSE_ASYM) &&
+				 (adv & ADVERTISE_1000XPAUSE))
+				pause = PAUSE_RX;
+		}
+	}
+	if (speed)
+		*speed = sp;
+	if (duplex)
+		*duplex = dplx;
+	if (fc)
+		*fc = pause;
+	return 0;
+}
+
+#ifdef UNUSED
+/*
+ * Enable/disable auto MDI/MDI-X in forced link speed mode.
+ */
+static int vsc8211_set_automdi(struct cphy *phy, int enable)
+{
+	int err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	int err;
+
+	err = t3_set_phy_speed_duplex(phy, speed, duplex);
+	if (!err)
+		err = vsc8211_set_automdi(phy, 1);
+	return err;
+}
+#endif /* UNUSED */
+
+static int vsc8211_power_down(struct cphy *cphy, int enable)
+{
+	return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+				   enable ? BMCR_PDOWN : 0);
+}
+
+static int vsc8211_intr_handler(struct cphy *cphy)
+{
+	unsigned int cause;
+	int err, cphy_cause = 0;
+
+	err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause);
+	if (err)
+		return err;
+
+	cause &= INTR_MASK;
+	if (cause & CFG_CHG_INTR_MASK)
+		cphy_cause |= cphy_cause_link_change;
+	if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+		cphy_cause |= cphy_cause_fifo_error;
+	return cphy_cause;
+}
+
+static struct cphy_ops vsc8211_ops = {
+	.reset = vsc8211_reset,
+	.intr_enable = vsc8211_intr_enable,
+	.intr_disable = vsc8211_intr_disable,
+	.intr_clear = vsc8211_intr_clear,
+	.intr_handler = vsc8211_intr_handler,
+	.autoneg_enable = vsc8211_autoneg_enable,
+	.autoneg_restart = vsc8211_autoneg_restart,
+	.advertise = t3_phy_advertise,
+	.set_speed_duplex = t3_set_phy_speed_duplex,
+	.get_link_status = vsc8211_get_link_status,
+	.power_down = vsc8211_power_down,
+};
+
+static struct cphy_ops vsc8211_fiber_ops = {
+	.reset = vsc8211_reset,
+	.intr_enable = vsc8211_intr_enable,
+	.intr_disable = vsc8211_intr_disable,
+	.intr_clear = vsc8211_intr_clear,
+	.intr_handler = vsc8211_intr_handler,
+	.autoneg_enable = vsc8211_autoneg_enable,
+	.autoneg_restart = vsc8211_autoneg_restart,
+	.advertise = t3_phy_advertise_fiber,
+	.set_speed_duplex = t3_set_phy_speed_duplex,
+	.get_link_status = vsc8211_get_link_status_fiber,
+	.power_down = vsc8211_power_down,
+};
+
+int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+			int phy_addr, const struct mdio_ops *mdio_ops)
+{
+	int err;
+	unsigned int val;
+
+	cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
+		  SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+		  SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
+		  SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
+	msleep(20);       /* PHY needs ~10ms to start responding to MDIO */
+
+	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val);
+	if (err)
+		return err;
+	if (val & VSC_CTRL_MEDIA_MODE_HI) {
+		/* copper interface, just need to configure the LEDs */
+		return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL,
+				     0x100);
+	}
+
+	phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+		    SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
+	phy->desc = "1000BASE-X";
+	phy->ops = &vsc8211_fiber_ops;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
+	if (err)
+		return err;
+
+	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL,
+			    val | VSC_CTRL_CLAUSE37_VIEW);
+	if (err)
+		return err;
+
+	err = vsc8211_reset(phy, 0);
+	if (err)
+		return err;
+
+	udelay(5); /* delay after reset before next SMI */
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
new file mode 100644
index 0000000..3af19a5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+/*
+ * # of exact address filters.  The first one is used for the station address,
+ * the rest are available for multicast addresses.
+ */
+#define EXACT_ADDR_FILTERS 8
+
+static inline int macidx(const struct cmac *mac)
+{
+	return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
+}
+
+static void xaui_serdes_reset(struct cmac *mac)
+{
+	static const unsigned int clear[] = {
+		F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
+		F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
+	};
+
+	int i;
+	struct adapter *adap = mac->adapter;
+	u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
+
+	t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
+		     F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
+		     F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
+		     F_RESETPLL23 | F_RESETPLL01);
+	t3_read_reg(adap, ctrl);
+	udelay(15);
+
+	for (i = 0; i < ARRAY_SIZE(clear); i++) {
+		t3_set_reg_field(adap, ctrl, clear[i], 0);
+		udelay(15);
+	}
+}
+
+void t3b_pcs_reset(struct cmac *mac)
+{
+	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+			 F_PCS_RESET_, 0);
+	udelay(20);
+	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
+			 F_PCS_RESET_);
+}
+
+int t3_mac_reset(struct cmac *mac)
+{
+	static const struct addr_val_pair mac_reset_avp[] = {
+		{A_XGM_TX_CTRL, 0},
+		{A_XGM_RX_CTRL, 0},
+		{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+		 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
+		{A_XGM_RX_HASH_LOW, 0},
+		{A_XGM_RX_HASH_HIGH, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
+		{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
+		{A_XGM_STAT_CTRL, F_CLRSTATS}
+	};
+	u32 val;
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
+
+	t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
+	t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
+			 F_RXSTRFRWRD | F_DISERRFRAMES,
+			 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
+	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
+
+	if (uses_xaui(adap)) {
+		if (adap->params.rev == 0) {
+			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+					 F_RXENABLE | F_TXENABLE);
+			if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
+					    F_CMULOCK, 1, 5, 2)) {
+				CH_ERR(adap,
+				       "MAC %d XAUI SERDES CMU lock failed\n",
+				       macidx(mac));
+				return -1;
+			}
+			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+					 F_SERDESRESET_);
+		} else
+			xaui_serdes_reset(mac);
+	}
+
+	t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
+			 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
+			 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
+	val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
+
+	if (is_10G(adap))
+		val |= F_PCS_RESET_;
+	else if (uses_xaui(adap))
+		val |= F_PCS_RESET_ | F_XG2G_RESET_;
+	else
+		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
+	if ((val & F_PCS_RESET_) && adap->params.rev) {
+		msleep(1);
+		t3b_pcs_reset(mac);
+	}
+
+	memset(&mac->stats, 0, sizeof(mac->stats));
+	return 0;
+}
+
+static int t3b2_mac_reset(struct cmac *mac)
+{
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset, store;
+	int idx = macidx(mac);
+	u32 val;
+
+	if (!macidx(mac))
+		t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
+	else
+		t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
+
+	/* Stop NIC traffic to reduce the number of TXTOGGLES */
+	t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
+	/* Ensure TX drains */
+	t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
+
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);    /* flush */
+
+	/* Store A_TP_TX_DROP_CFG_CH0 */
+	t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+	store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
+
+	msleep(10);
+
+	/* Change DROP_CFG to 0xc0000011 */
+	t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+	t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
+
+	/* Check for xgm Rx fifo empty */
+	/* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
+	if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
+			    0x80000000, 1, 1000, 2)) {
+		CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
+		       macidx(mac));
+		return -1;
+	}
+
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);    /* flush */
+
+	val = F_MAC_RESET_;
+	if (is_10G(adap))
+		val |= F_PCS_RESET_;
+	else if (uses_xaui(adap))
+		val |= F_PCS_RESET_ | F_XG2G_RESET_;
+	else
+		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);  /* flush */
+	if ((val & F_PCS_RESET_) && adap->params.rev) {
+		msleep(1);
+		t3b_pcs_reset(mac);
+	}
+	t3_write_reg(adap, A_XGM_RX_CFG + oft,
+		     F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+		     F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
+
+	/* Restore the DROP_CFG */
+	t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+	t3_write_reg(adap, A_TP_PIO_DATA, store);
+
+	if (!idx)
+		t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
+	else
+		t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
+
+	/* re-enable nic traffic */
+	t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
+
+	/*  Set: re-enable NIC traffic */
+	t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
+
+	return 0;
+}
+
+/*
+ * Set the exact match register 'idx' to recognize the given Ethernet address.
+ */
+static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
+{
+	u32 addr_lo, addr_hi;
+	unsigned int oft = mac->offset + idx * 8;
+
+	addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	addr_hi = (addr[5] << 8) | addr[4];
+
+	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
+	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
+}
+
+/* Set one of the station's unicast MAC addresses. */
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+{
+	if (idx >= mac->nucast)
+		return -EINVAL;
+	set_addr_filter(mac, idx, addr);
+	return 0;
+}
+
+/*
+ * Specify the number of exact address filters that should be reserved for
+ * unicast addresses.  Caller should reload the unicast and multicast addresses
+ * after calling this.
+ */
+int t3_mac_set_num_ucast(struct cmac *mac, int n)
+{
+	if (n > EXACT_ADDR_FILTERS)
+		return -EINVAL;
+	mac->nucast = n;
+	return 0;
+}
+
+void t3_mac_disable_exact_filters(struct cmac *mac)
+{
+	unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
+
+	for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+		u32 v = t3_read_reg(mac->adapter, reg);
+		t3_write_reg(mac->adapter, reg, v);
+	}
+	t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);	/* flush */
+}
+
+void t3_mac_enable_exact_filters(struct cmac *mac)
+{
+	unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
+
+	for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+		u32 v = t3_read_reg(mac->adapter, reg);
+		t3_write_reg(mac->adapter, reg, v);
+	}
+	t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);	/* flush */
+}
+
+/* Calculate the RX hash filter index of an Ethernet address */
+static int hash_hw_addr(const u8 * addr)
+{
+	int hash = 0, octet, bit, i = 0, c;
+
+	for (octet = 0; octet < 6; ++octet)
+		for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
+			hash ^= (c & 1) << i;
+			if (++i == 6)
+				i = 0;
+		}
+	return hash;
+}
+
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
+{
+	u32 val, hash_lo, hash_hi;
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
+	if (dev->flags & IFF_PROMISC)
+		val |= F_COPYALLFRAMES;
+	t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
+
+	if (dev->flags & IFF_ALLMULTI)
+		hash_lo = hash_hi = 0xffffffff;
+	else {
+		struct netdev_hw_addr *ha;
+		int exact_addr_idx = mac->nucast;
+
+		hash_lo = hash_hi = 0;
+		netdev_for_each_mc_addr(ha, dev)
+			if (exact_addr_idx < EXACT_ADDR_FILTERS)
+				set_addr_filter(mac, exact_addr_idx++,
+						ha->addr);
+			else {
+				int hash = hash_hw_addr(ha->addr);
+
+				if (hash < 32)
+					hash_lo |= (1 << hash);
+				else
+					hash_hi |= (1 << (hash - 32));
+			}
+	}
+
+	t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
+	t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
+	return 0;
+}
+
+static int rx_fifo_hwm(int mtu)
+{
+	int hwm;
+
+	hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
+	return min(hwm, MAC_RXFIFO_SIZE - 8192);
+}
+
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
+{
+	int hwm, lwm, divisor;
+	int ipg;
+	unsigned int thres, v, reg;
+	struct adapter *adap = mac->adapter;
+
+	/*
+	 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't.  The HW max
+	 * packet size register includes header, but not FCS.
+	 */
+	mtu += 14;
+	if (mtu > 1536)
+		mtu += 4;
+
+	if (mtu > MAX_FRAME_SIZE - 4)
+		return -EINVAL;
+	t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+
+	if (adap->params.rev >= T3_REV_B2 &&
+	    (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
+		t3_mac_disable_exact_filters(mac);
+		v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
+		t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
+				 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
+
+		reg = adap->params.rev == T3_REV_B2 ?
+			A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
+
+		/* drain RX FIFO */
+		if (t3_wait_op_done(adap, reg + mac->offset,
+				    F_RXFIFO_EMPTY, 1, 20, 5)) {
+			t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
+			t3_mac_enable_exact_filters(mac);
+			return -EIO;
+		}
+		t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+				 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+				 V_RXMAXPKTSIZE(mtu));
+		t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
+		t3_mac_enable_exact_filters(mac);
+	} else
+		t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+				 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+				 V_RXMAXPKTSIZE(mtu));
+
+	/*
+	 * Adjust the PAUSE frame watermarks.  We always set the LWM, and the
+	 * HWM only if flow-control is enabled.
+	 */
+	hwm = rx_fifo_hwm(mtu);
+	lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
+	v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
+	v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
+	v |= V_RXFIFOPAUSELWM(lwm / 8);
+	if (G_RXFIFOPAUSEHWM(v))
+		v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
+		    V_RXFIFOPAUSEHWM(hwm / 8);
+
+	t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
+
+	/* Adjust the TX FIFO threshold based on the MTU */
+	thres = (adap->params.vpd.cclk * 1000) / 15625;
+	thres = (thres * mtu) / 1000;
+	if (is_10G(adap))
+		thres /= 10;
+	thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
+	thres = max(thres, 8U);	/* need at least 8 */
+	ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
+	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
+			 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
+			 V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
+
+	if (adap->params.rev > 0) {
+		divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
+		t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
+			     (hwm - lwm) * 4 / divisor);
+	}
+	t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
+		     MAC_RXFIFO_SIZE * 4 * 8 / 512);
+	return 0;
+}
+
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
+{
+	u32 val;
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+
+	if (duplex >= 0 && duplex != DUPLEX_FULL)
+		return -EINVAL;
+	if (speed >= 0) {
+		if (speed == SPEED_10)
+			val = V_PORTSPEED(0);
+		else if (speed == SPEED_100)
+			val = V_PORTSPEED(1);
+		else if (speed == SPEED_1000)
+			val = V_PORTSPEED(2);
+		else if (speed == SPEED_10000)
+			val = V_PORTSPEED(3);
+		else
+			return -EINVAL;
+
+		t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
+				 V_PORTSPEED(M_PORTSPEED), val);
+	}
+
+	val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+	val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+	if (fc & PAUSE_TX) {
+		u32 rx_max_pkt_size =
+		    G_RXMAXPKTSIZE(t3_read_reg(adap,
+					       A_XGM_RX_MAX_PKT_SIZE + oft));
+		val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
+	}
+	t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
+	t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
+			 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
+	return 0;
+}
+
+int t3_mac_enable(struct cmac *mac, int which)
+{
+	int idx = macidx(mac);
+	struct adapter *adap = mac->adapter;
+	unsigned int oft = mac->offset;
+	struct mac_stats *s = &mac->stats;
+
+	if (which & MAC_DIRECTION_TX) {
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+		t3_write_reg(adap, A_TP_PIO_DATA,
+			     adap->params.rev == T3_REV_C ?
+			     0xc4ffff01 : 0xc0ede401);
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+		t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
+				 adap->params.rev == T3_REV_C ? 0 : 1 << idx);
+
+		t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+
+		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
+		mac->tx_mcnt = s->tx_frames;
+		mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+							A_TP_PIO_DATA)));
+		mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+						A_XGM_TX_SPI4_SOP_EOP_CNT +
+						oft)));
+		mac->rx_mcnt = s->rx_frames;
+		mac->rx_pause = s->rx_pause;
+		mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+						A_XGM_RX_SPI4_SOP_EOP_CNT +
+						oft)));
+		mac->rx_ocnt = s->rx_fifo_ovfl;
+		mac->txen = F_TXEN;
+		mac->toggle_cnt = 0;
+	}
+	if (which & MAC_DIRECTION_RX)
+		t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
+	return 0;
+}
+
+int t3_mac_disable(struct cmac *mac, int which)
+{
+	struct adapter *adap = mac->adapter;
+
+	if (which & MAC_DIRECTION_TX) {
+		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+		mac->txen = 0;
+	}
+	if (which & MAC_DIRECTION_RX) {
+		int val = F_MAC_RESET_;
+
+		t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+				 F_PCS_RESET_, 0);
+		msleep(100);
+		t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
+		if (is_10G(adap))
+			val |= F_PCS_RESET_;
+		else if (uses_xaui(adap))
+			val |= F_PCS_RESET_ | F_XG2G_RESET_;
+		else
+			val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+		t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
+	}
+	return 0;
+}
+
+int t3b2_mac_watchdog_task(struct cmac *mac)
+{
+	struct adapter *adap = mac->adapter;
+	struct mac_stats *s = &mac->stats;
+	unsigned int tx_tcnt, tx_xcnt;
+	u64 tx_mcnt = s->tx_frames;
+	int status;
+
+	status = 0;
+	tx_xcnt = 1;		/* By default tx_xcnt is making progress */
+	tx_tcnt = mac->tx_tcnt;	/* If tx_mcnt is progressing ignore tx_tcnt */
+	if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
+		tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+						A_XGM_TX_SPI4_SOP_EOP_CNT +
+					       	mac->offset)));
+		if (tx_xcnt == 0) {
+			t3_write_reg(adap, A_TP_PIO_ADDR,
+				     A_TP_TX_DROP_CNT_CH0 + macidx(mac));
+			tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+						      A_TP_PIO_DATA)));
+		} else {
+			goto out;
+		}
+	} else {
+		mac->toggle_cnt = 0;
+		goto out;
+	}
+
+	if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
+		if (mac->toggle_cnt > 4) {
+			status = 2;
+			goto out;
+		} else {
+			status = 1;
+			goto out;
+		}
+	} else {
+		mac->toggle_cnt = 0;
+		goto out;
+	}
+
+out:
+	mac->tx_tcnt = tx_tcnt;
+	mac->tx_xcnt = tx_xcnt;
+	mac->tx_mcnt = s->tx_frames;
+	mac->rx_pause = s->rx_pause;
+	if (status == 1) {
+		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+		t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset);  /* flush */
+		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
+		t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset);  /* flush */
+		mac->toggle_cnt++;
+	} else if (status == 2) {
+		t3b2_mac_reset(mac);
+		mac->toggle_cnt = 0;
+	}
+	return status;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics.  Since the packet counters are only
+ * 32 bits they can overflow in ~286 secs at 10G, so the function should be
+ * called more frequently than that.  The byte counters are 45-bit wide, they
+ * would overflow in ~7.8 hours.
+ */
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
+{
+#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
+#define RMON_UPDATE(mac, name, reg) \
+	(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
+#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
+	(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
+			     ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
+
+	u32 v, lo;
+
+	RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
+	RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
+	RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
+	RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
+	RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
+	RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
+	RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
+	RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
+	RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
+
+	RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
+
+	v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+	if (mac->adapter->params.rev == T3_REV_B2)
+		v &= 0x7fffffff;
+	mac->stats.rx_too_long += v;
+
+	RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
+	RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
+
+	RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
+	RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
+	RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
+	RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
+	RMON_UPDATE(mac, tx_pause, TX_PAUSE);
+	/* This counts error frames in general (bad FCS, underrun, etc). */
+	RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
+
+	RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
+	RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
+
+	/* The next stat isn't clear-on-read. */
+	t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
+	v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
+	lo = (u32) mac->stats.rx_cong_drops;
+	mac->stats.rx_cong_drops += (u64) (v - lo);
+
+	return &mac->stats;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
new file mode 100644
index 0000000..ace0ab9
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -0,0 +1,10 @@
+#
+# Chelsio T4 driver
+#
+
+obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
+
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
+cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
+cxgb4-$(CONFIG_CHELSIO_T4_FCOE) +=  cxgb4_fcoe.o
+cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
new file mode 100644
index 0000000..11dd91e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -0,0 +1,322 @@
+/*
+ *  This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *  Copyright (C) 2003-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Deepak (deepak.s@chelsio.com)
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ *  release for licensing terms and conditions.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/jhash.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+#include "cxgb4.h"
+#include "clip_tbl.h"
+
+static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
+{
+	unsigned int clipt_size_half = c->clipt_size / 2;
+
+	return jhash_1word(*key, 0) % clipt_size_half;
+}
+
+static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
+{
+	unsigned int clipt_size_half = d->clipt_size / 2;
+	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+	return clipt_size_half +
+		(jhash_1word(xor, 0) % clipt_size_half);
+}
+
+static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
+				   u8 v6)
+{
+	return v6 ? ipv6_clip_hash(ctbl, addr) :
+			ipv4_clip_hash(ctbl, addr);
+}
+
+static int clip6_get_mbox(const struct net_device *dev,
+			  const struct in6_addr *lip)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct fw_clip_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+			      FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+	c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
+	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+static int clip6_release_mbox(const struct net_device *dev,
+			      const struct in6_addr *lip)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct fw_clip_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+			      FW_CMD_REQUEST_F | FW_CMD_READ_F);
+	c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
+	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct clip_tbl *ctbl = adap->clipt;
+	struct clip_entry *ce, *cte;
+	u32 *addr = (u32 *)lip;
+	int hash;
+	int ret = -1;
+
+	if (!ctbl)
+		return 0;
+
+	hash = clip_addr_hash(ctbl, addr, v6);
+
+	read_lock_bh(&ctbl->lock);
+	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
+		if (cte->addr6.sin6_family == AF_INET6 && v6)
+			ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+				     sizeof(struct in6_addr));
+		else if (cte->addr.sin_family == AF_INET && !v6)
+			ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+				     sizeof(struct in_addr));
+		if (!ret) {
+			ce = cte;
+			read_unlock_bh(&ctbl->lock);
+			goto found;
+		}
+	}
+	read_unlock_bh(&ctbl->lock);
+
+	write_lock_bh(&ctbl->lock);
+	if (!list_empty(&ctbl->ce_free_head)) {
+		ce = list_first_entry(&ctbl->ce_free_head,
+				      struct clip_entry, list);
+		list_del(&ce->list);
+		INIT_LIST_HEAD(&ce->list);
+		spin_lock_init(&ce->lock);
+		atomic_set(&ce->refcnt, 0);
+		atomic_dec(&ctbl->nfree);
+		list_add_tail(&ce->list, &ctbl->hash_list[hash]);
+		if (v6) {
+			ce->addr6.sin6_family = AF_INET6;
+			memcpy(ce->addr6.sin6_addr.s6_addr,
+			       lip, sizeof(struct in6_addr));
+			ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
+			if (ret) {
+				write_unlock_bh(&ctbl->lock);
+				return ret;
+			}
+		} else {
+			ce->addr.sin_family = AF_INET;
+			memcpy((char *)(&ce->addr.sin_addr), lip,
+			       sizeof(struct in_addr));
+		}
+	} else {
+		write_unlock_bh(&ctbl->lock);
+		return -ENOMEM;
+	}
+	write_unlock_bh(&ctbl->lock);
+found:
+	atomic_inc(&ce->refcnt);
+
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_clip_get);
+
+void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct clip_tbl *ctbl = adap->clipt;
+	struct clip_entry *ce, *cte;
+	u32 *addr = (u32 *)lip;
+	int hash;
+	int ret = -1;
+
+	hash = clip_addr_hash(ctbl, addr, v6);
+
+	read_lock_bh(&ctbl->lock);
+	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
+		if (cte->addr6.sin6_family == AF_INET6 && v6)
+			ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+				     sizeof(struct in6_addr));
+		else if (cte->addr.sin_family == AF_INET && !v6)
+			ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+				     sizeof(struct in_addr));
+		if (!ret) {
+			ce = cte;
+			read_unlock_bh(&ctbl->lock);
+			goto found;
+		}
+	}
+	read_unlock_bh(&ctbl->lock);
+
+	return;
+found:
+	write_lock_bh(&ctbl->lock);
+	spin_lock_bh(&ce->lock);
+	if (atomic_dec_and_test(&ce->refcnt)) {
+		list_del(&ce->list);
+		INIT_LIST_HEAD(&ce->list);
+		list_add_tail(&ce->list, &ctbl->ce_free_head);
+		atomic_inc(&ctbl->nfree);
+		if (v6)
+			clip6_release_mbox(dev, (const struct in6_addr *)lip);
+	}
+	spin_unlock_bh(&ce->lock);
+	write_unlock_bh(&ctbl->lock);
+}
+EXPORT_SYMBOL(cxgb4_clip_release);
+
+/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actul CLIP command.
+ */
+static int cxgb4_update_dev_clip(struct net_device *root_dev,
+				 struct net_device *dev)
+{
+	struct inet6_dev *idev = NULL;
+	struct inet6_ifaddr *ifa;
+	int ret = 0;
+
+	idev = __in6_dev_get(root_dev);
+	if (!idev)
+		return ret;
+
+	read_lock_bh(&idev->lock);
+	list_for_each_entry(ifa, &idev->addr_list, if_list) {
+		ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
+		if (ret < 0)
+			break;
+	}
+	read_unlock_bh(&idev->lock);
+
+	return ret;
+}
+
+int cxgb4_update_root_dev_clip(struct net_device *dev)
+{
+	struct net_device *root_dev = NULL;
+	int i, ret = 0;
+
+	/* First populate the real net device's IPv6 addresses */
+	ret = cxgb4_update_dev_clip(dev, dev);
+	if (ret)
+		return ret;
+
+	/* Parse all bond and vlan devices layered on top of the physical dev */
+	root_dev = netdev_master_upper_dev_get_rcu(dev);
+	if (root_dev) {
+		ret = cxgb4_update_dev_clip(root_dev, dev);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < VLAN_N_VID; i++) {
+		root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
+		if (!root_dev)
+			continue;
+
+		ret = cxgb4_update_dev_clip(root_dev, dev);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
+
+int clip_tbl_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	struct clip_tbl *ctbl = adapter->clipt;
+	struct clip_entry *ce;
+	char ip[60];
+	int i;
+
+	read_lock_bh(&ctbl->lock);
+
+	seq_puts(seq, "IP Address                  Users\n");
+	for (i = 0 ; i < ctbl->clipt_size;  ++i) {
+		list_for_each_entry(ce, &ctbl->hash_list[i], list) {
+			ip[0] = '\0';
+			sprintf(ip, "%pISc", &ce->addr);
+			seq_printf(seq, "%-25s   %u\n", ip,
+				   atomic_read(&ce->refcnt));
+		}
+	}
+	seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
+
+	read_unlock_bh(&ctbl->lock);
+
+	return 0;
+}
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+				  unsigned int clipt_end)
+{
+	struct clip_entry *cl_list;
+	struct clip_tbl *ctbl;
+	unsigned int clipt_size;
+	int i;
+
+	if (clipt_start >= clipt_end)
+		return NULL;
+	clipt_size = clipt_end - clipt_start + 1;
+	if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
+		return NULL;
+
+	ctbl = t4_alloc_mem(sizeof(*ctbl) +
+			    clipt_size*sizeof(struct list_head));
+	if (!ctbl)
+		return NULL;
+
+	ctbl->clipt_start = clipt_start;
+	ctbl->clipt_size = clipt_size;
+	INIT_LIST_HEAD(&ctbl->ce_free_head);
+
+	atomic_set(&ctbl->nfree, clipt_size);
+	rwlock_init(&ctbl->lock);
+
+	for (i = 0; i < ctbl->clipt_size; ++i)
+		INIT_LIST_HEAD(&ctbl->hash_list[i]);
+
+	cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
+	if (!cl_list) {
+		t4_free_mem(ctbl);
+		return NULL;
+	}
+	ctbl->cl_list = (void *)cl_list;
+
+	for (i = 0; i < clipt_size; i++) {
+		INIT_LIST_HEAD(&cl_list[i].list);
+		list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
+	}
+
+	return ctbl;
+}
+
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+	struct clip_tbl *ctbl = adap->clipt;
+
+	if (ctbl) {
+		if (ctbl->cl_list)
+			t4_free_mem(ctbl->cl_list);
+		t4_free_mem(ctbl);
+	}
+}
+EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
new file mode 100644
index 0000000..35eb43c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -0,0 +1,43 @@
+/*
+ *  This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *  Copyright (C) 2003-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Deepak (deepak.s@chelsio.com)
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ *  release for licensing terms and conditions.
+ */
+
+struct clip_entry {
+	spinlock_t lock;	/* Hold while modifying clip reference */
+	atomic_t refcnt;
+	struct list_head list;
+	union {
+		struct sockaddr_in addr;
+		struct sockaddr_in6 addr6;
+	};
+};
+
+struct clip_tbl {
+	unsigned int clipt_start;
+	unsigned int clipt_size;
+	rwlock_t lock;
+	atomic_t nfree;
+	struct list_head ce_free_head;
+	void *cl_list;
+	struct list_head hash_list[0];
+};
+
+enum {
+	CLIPT_MIN_HASH_BUCKETS = 2,
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+				  unsigned int clipt_end);
+int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6);
+void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6);
+int clip_tbl_show(struct seq_file *seq, void *v);
+int cxgb4_update_root_dev_clip(struct net_device *dev);
+void t4_cleanup_clip_tbl(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
new file mode 100644
index 0000000..55a47de
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -0,0 +1,1427 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_H__
+#define __CXGB4_H__
+
+#include "t4_hw.h"
+
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <asm/io.h>
+#include "t4_chip_type.h"
+#include "cxgb4_uld.h"
+
+#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
+
+enum {
+	MAX_NPORTS	= 4,     /* max # of ports */
+	SERNUM_LEN	= 24,    /* Serial # length */
+	EC_LEN		= 16,    /* E/C length */
+	ID_LEN		= 16,    /* ID length */
+	PN_LEN		= 16,    /* Part Number length */
+	MACADDR_LEN	= 12,    /* MAC Address length */
+};
+
+enum {
+	T4_REGMAP_SIZE = (160 * 1024),
+	T5_REGMAP_SIZE = (332 * 1024),
+};
+
+enum {
+	MEM_EDC0,
+	MEM_EDC1,
+	MEM_MC,
+	MEM_MC0 = MEM_MC,
+	MEM_MC1
+};
+
+enum {
+	MEMWIN0_APERTURE = 2048,
+	MEMWIN0_BASE     = 0x1b800,
+	MEMWIN1_APERTURE = 32768,
+	MEMWIN1_BASE     = 0x28000,
+	MEMWIN1_BASE_T5  = 0x52000,
+	MEMWIN2_APERTURE = 65536,
+	MEMWIN2_BASE     = 0x30000,
+	MEMWIN2_APERTURE_T5 = 131072,
+	MEMWIN2_BASE_T5  = 0x60000,
+};
+
+enum dev_master {
+	MASTER_CANT,
+	MASTER_MAY,
+	MASTER_MUST
+};
+
+enum dev_state {
+	DEV_STATE_UNINIT,
+	DEV_STATE_INIT,
+	DEV_STATE_ERR
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+struct port_stats {
+	u64 tx_octets;            /* total # of octets in good frames */
+	u64 tx_frames;            /* all good frames */
+	u64 tx_bcast_frames;      /* all broadcast frames */
+	u64 tx_mcast_frames;      /* all multicast frames */
+	u64 tx_ucast_frames;      /* all unicast frames */
+	u64 tx_error_frames;      /* all error frames */
+
+	u64 tx_frames_64;         /* # of Tx frames in a particular range */
+	u64 tx_frames_65_127;
+	u64 tx_frames_128_255;
+	u64 tx_frames_256_511;
+	u64 tx_frames_512_1023;
+	u64 tx_frames_1024_1518;
+	u64 tx_frames_1519_max;
+
+	u64 tx_drop;              /* # of dropped Tx frames */
+	u64 tx_pause;             /* # of transmitted pause frames */
+	u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
+	u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
+	u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
+	u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
+	u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
+	u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
+	u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
+	u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
+
+	u64 rx_octets;            /* total # of octets in good frames */
+	u64 rx_frames;            /* all good frames */
+	u64 rx_bcast_frames;      /* all broadcast frames */
+	u64 rx_mcast_frames;      /* all multicast frames */
+	u64 rx_ucast_frames;      /* all unicast frames */
+	u64 rx_too_long;          /* # of frames exceeding MTU */
+	u64 rx_jabber;            /* # of jabber frames */
+	u64 rx_fcs_err;           /* # of received frames with bad FCS */
+	u64 rx_len_err;           /* # of received frames with length error */
+	u64 rx_symbol_err;        /* symbol errors */
+	u64 rx_runt;              /* # of short frames */
+
+	u64 rx_frames_64;         /* # of Rx frames in a particular range */
+	u64 rx_frames_65_127;
+	u64 rx_frames_128_255;
+	u64 rx_frames_256_511;
+	u64 rx_frames_512_1023;
+	u64 rx_frames_1024_1518;
+	u64 rx_frames_1519_max;
+
+	u64 rx_pause;             /* # of received pause frames */
+	u64 rx_ppp0;              /* # of received PPP prio 0 frames */
+	u64 rx_ppp1;              /* # of received PPP prio 1 frames */
+	u64 rx_ppp2;              /* # of received PPP prio 2 frames */
+	u64 rx_ppp3;              /* # of received PPP prio 3 frames */
+	u64 rx_ppp4;              /* # of received PPP prio 4 frames */
+	u64 rx_ppp5;              /* # of received PPP prio 5 frames */
+	u64 rx_ppp6;              /* # of received PPP prio 6 frames */
+	u64 rx_ppp7;              /* # of received PPP prio 7 frames */
+
+	u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
+	u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
+	u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
+	u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
+	u64 rx_trunc0;            /* buffer-group 0 truncated packets */
+	u64 rx_trunc1;            /* buffer-group 1 truncated packets */
+	u64 rx_trunc2;            /* buffer-group 2 truncated packets */
+	u64 rx_trunc3;            /* buffer-group 3 truncated packets */
+};
+
+struct lb_port_stats {
+	u64 octets;
+	u64 frames;
+	u64 bcast_frames;
+	u64 mcast_frames;
+	u64 ucast_frames;
+	u64 error_frames;
+
+	u64 frames_64;
+	u64 frames_65_127;
+	u64 frames_128_255;
+	u64 frames_256_511;
+	u64 frames_512_1023;
+	u64 frames_1024_1518;
+	u64 frames_1519_max;
+
+	u64 drop;
+
+	u64 ovflow0;
+	u64 ovflow1;
+	u64 ovflow2;
+	u64 ovflow3;
+	u64 trunc0;
+	u64 trunc1;
+	u64 trunc2;
+	u64 trunc3;
+};
+
+struct tp_tcp_stats {
+	u32 tcp_out_rsts;
+	u64 tcp_in_segs;
+	u64 tcp_out_segs;
+	u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+	u32 frames;
+	u32 drops;
+	u64 octets;
+};
+
+struct tp_fcoe_stats {
+	u32 frames_ddp;
+	u32 frames_drop;
+	u64 octets_ddp;
+};
+
+struct tp_err_stats {
+	u32 mac_in_errs[4];
+	u32 hdr_in_errs[4];
+	u32 tcp_in_errs[4];
+	u32 tnl_cong_drops[4];
+	u32 ofld_chan_drops[4];
+	u32 tnl_tx_drops[4];
+	u32 ofld_vlan_drops[4];
+	u32 tcp6_in_errs[4];
+	u32 ofld_no_neigh;
+	u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+	u32 req[4];
+	u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+	u32 rqe_dfr_pkt;
+	u32 rqe_dfr_mod;
+};
+
+struct sge_params {
+	u32 hps;			/* host page size for our PF/VF */
+	u32 eq_qpp;			/* egress queues/page for our PF/VF */
+	u32 iq_qpp;			/* egress queues/page for our PF/VF */
+};
+
+struct tp_params {
+	unsigned int tre;            /* log2 of core clocks per TP tick */
+	unsigned int la_mask;        /* what events are recorded by TP LA */
+	unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
+				     /* channel map */
+
+	uint32_t dack_re;            /* DACK timer resolution */
+	unsigned short tx_modq[NCHAN];	/* channel to modulation queue map */
+
+	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
+	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
+
+	/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
+	 * subset of the set of fields which may be present in the Compressed
+	 * Filter Tuple portion of filters and TCP TCB connections.  The
+	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+	 * Since a variable number of fields may or may not be present, their
+	 * shifted field positions within the Compressed Filter Tuple may
+	 * vary, or not even be present if the field isn't selected in
+	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
+	 * places we store their offsets here, or a -1 if the field isn't
+	 * present.
+	 */
+	int vlan_shift;
+	int vnic_shift;
+	int port_shift;
+	int protocol_shift;
+};
+
+struct vpd_params {
+	unsigned int cclk;
+	u8 ec[EC_LEN + 1];
+	u8 sn[SERNUM_LEN + 1];
+	u8 id[ID_LEN + 1];
+	u8 pn[PN_LEN + 1];
+	u8 na[MACADDR_LEN + 1];
+};
+
+struct pci_params {
+	unsigned char speed;
+	unsigned char width;
+};
+
+struct devlog_params {
+	u32 memtype;                    /* which memory (EDC0, EDC1, MC) */
+	u32 start;                      /* start of log in firmware memory */
+	u32 size;                       /* size of log */
+};
+
+/* Stores chip specific parameters */
+struct arch_specific_params {
+	u8 nchan;
+	u16 mps_rplc_size;
+	u16 vfcount;
+	u32 sge_fl_db;
+	u16 mps_tcam_size;
+};
+
+struct adapter_params {
+	struct sge_params sge;
+	struct tp_params  tp;
+	struct vpd_params vpd;
+	struct pci_params pci;
+	struct devlog_params devlog;
+	enum pcie_memwin drv_memwin;
+
+	unsigned int cim_la_size;
+
+	unsigned int sf_size;             /* serial flash size in bytes */
+	unsigned int sf_nsec;             /* # of flash sectors */
+	unsigned int sf_fw_start;         /* start of FW image in flash */
+
+	unsigned int fw_vers;
+	unsigned int tp_vers;
+	u8 api_vers[7];
+
+	unsigned short mtus[NMTUS];
+	unsigned short a_wnd[NCCTRL_WIN];
+	unsigned short b_wnd[NCCTRL_WIN];
+
+	unsigned char nports;             /* # of ethernet ports */
+	unsigned char portvec;
+	enum chip_type chip;               /* chip code */
+	struct arch_specific_params arch;  /* chip specific params */
+	unsigned char offload;
+
+	unsigned char bypass;
+
+	unsigned int ofldq_wr_cred;
+	bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
+
+	unsigned int max_ordird_qp;       /* Max read depth per RDMA QP */
+	unsigned int max_ird_adapter;     /* Max read depth per adapter */
+};
+
+/* State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+	unsigned int idma_1s_thresh;	/* 1s threshold in Core Clock ticks */
+	unsigned int idma_stalled[2];	/* synthesized stalled timers in HZ */
+	unsigned int idma_state[2];	/* IDMA Hang detect state */
+	unsigned int idma_qid[2];	/* IDMA Hung Ingress Queue ID */
+	unsigned int idma_warn[2];	/* time to warning in HZ */
+};
+
+#include "t4fw_api.h"
+
+#define FW_VERSION(chip) ( \
+		FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+		FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+		FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+		FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+	u8 chip;
+	char *fs_name;
+	char *fw_mod_name;
+	struct fw_hdr fw_hdr;
+};
+
+
+struct trace_params {
+	u32 data[TRACE_LEN / 4];
+	u32 mask[TRACE_LEN / 4];
+	unsigned short snap_len;
+	unsigned short min_len;
+	unsigned char skip_ofst;
+	unsigned char skip_len;
+	unsigned char invert;
+	unsigned char port;
+};
+
+struct link_config {
+	unsigned short supported;        /* link capabilities */
+	unsigned short advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+	unsigned char  link_ok;          /* link up? */
+};
+
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
+
+enum {
+	MAX_ETH_QSETS = 32,           /* # of Ethernet Tx/Rx queue sets */
+	MAX_OFLD_QSETS = 16,          /* # of offload Tx/Rx queue sets */
+	MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
+	MAX_RDMA_QUEUES = NCHAN,      /* # of streaming RDMA Rx queues */
+	MAX_RDMA_CIQS = 32,        /* # of  RDMA concentrator IQs */
+	MAX_ISCSI_QUEUES = NCHAN,     /* # of streaming iSCSI Rx queues */
+};
+
+enum {
+	MAX_TXQ_ENTRIES      = 16384,
+	MAX_CTRL_TXQ_ENTRIES = 1024,
+	MAX_RSPQ_ENTRIES     = 16384,
+	MAX_RX_BUFFERS       = 16384,
+	MIN_TXQ_ENTRIES      = 32,
+	MIN_CTRL_TXQ_ENTRIES = 32,
+	MIN_RSPQ_ENTRIES     = 128,
+	MIN_FL_ENTRIES       = 16
+};
+
+enum {
+	INGQ_EXTRAS = 2,        /* firmware event queue and */
+				/*   forwarded interrupts */
+	MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+		   + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
+};
+
+struct adapter;
+struct sge_rspq;
+
+#include "cxgb4_dcb.h"
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#include "cxgb4_fcoe.h"
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
+struct port_info {
+	struct adapter *adapter;
+	u16    viid;
+	s16    xact_addr_filt;        /* index of exact MAC address filter */
+	u16    rss_size;              /* size of VI's RSS table slice */
+	s8     mdio_addr;
+	enum fw_port_type port_type;
+	u8     mod_type;
+	u8     port_id;
+	u8     tx_chan;
+	u8     lport;                 /* associated offload logical port */
+	u8     nqsets;                /* # of qsets */
+	u8     first_qset;            /* index of first qset */
+	u8     rss_mode;
+	struct link_config link_cfg;
+	u16   *rss;
+	struct port_stats stats_base;
+#ifdef CONFIG_CHELSIO_T4_DCB
+	struct port_dcb_info dcb;     /* Data Center Bridging support */
+#endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+	struct cxgb_fcoe fcoe;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+	bool rxtstamp;  /* Enable TS */
+	struct hwtstamp_config tstamp_config;
+};
+
+struct dentry;
+struct work_struct;
+
+enum {                                 /* adapter flags */
+	FULL_INIT_DONE     = (1 << 0),
+	DEV_ENABLED        = (1 << 1),
+	USING_MSI          = (1 << 2),
+	USING_MSIX         = (1 << 3),
+	FW_OK              = (1 << 4),
+	RSS_TNLALLLOOKUP   = (1 << 5),
+	USING_SOFT_PARAMS  = (1 << 6),
+	MASTER_PF          = (1 << 7),
+	FW_OFLD_CONN       = (1 << 9),
+};
+
+struct rx_sw_desc;
+
+struct sge_fl {                     /* SGE free-buffer queue state */
+	unsigned int avail;         /* # of available Rx buffers */
+	unsigned int pend_cred;     /* new buffers since last FL DB ring */
+	unsigned int cidx;          /* consumer index */
+	unsigned int pidx;          /* producer index */
+	unsigned long alloc_failed; /* # of times buffer allocation failed */
+	unsigned long large_alloc_failed;
+	unsigned long starving;
+	/* RO fields */
+	unsigned int cntxt_id;      /* SGE context id for the free list */
+	unsigned int size;          /* capacity of free list */
+	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
+	__be64 *desc;               /* address of HW Rx descriptor ring */
+	dma_addr_t addr;            /* bus address of HW ring start */
+	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
+	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
+};
+
+/* A packet gather list */
+struct pkt_gl {
+	u64 sgetstamp;		    /* SGE Time Stamp for Ingress Packet */
+	struct page_frag frags[MAX_SKB_FRAGS];
+	void *va;                         /* virtual address of first byte */
+	unsigned int nfrags;              /* # of fragments */
+	unsigned int tot_len;             /* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+			      const struct pkt_gl *gl);
+
+struct sge_rspq {                   /* state for an SGE response queue */
+	struct napi_struct napi;
+	const __be64 *cur_desc;     /* current descriptor in queue */
+	unsigned int cidx;          /* consumer index */
+	u8 gen;                     /* current generation bit */
+	u8 intr_params;             /* interrupt holdoff parameters */
+	u8 next_intr_params;        /* holdoff params for next interrupt */
+	u8 adaptive_rx;
+	u8 pktcnt_idx;              /* interrupt packet threshold */
+	u8 uld;                     /* ULD handling this queue */
+	u8 idx;                     /* queue index within its group */
+	int offset;                 /* offset into current Rx buffer */
+	u16 cntxt_id;               /* SGE context id for the response q */
+	u16 abs_id;                 /* absolute SGE id for the response q */
+	__be64 *desc;               /* address of HW response ring */
+	dma_addr_t phys_addr;       /* physical address of the ring */
+	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
+	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
+	unsigned int iqe_len;       /* entry size */
+	unsigned int size;          /* capacity of response queue */
+	struct adapter *adap;
+	struct net_device *netdev;  /* associated net device */
+	rspq_handler_t handler;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define CXGB_POLL_STATE_IDLE		0
+#define CXGB_POLL_STATE_NAPI		BIT(0) /* NAPI owns this poll */
+#define CXGB_POLL_STATE_POLL		BIT(1) /* poll owns this poll */
+#define CXGB_POLL_STATE_NAPI_YIELD	BIT(2) /* NAPI yielded this poll */
+#define CXGB_POLL_STATE_POLL_YIELD	BIT(3) /* poll yielded this poll */
+#define CXGB_POLL_YIELD			(CXGB_POLL_STATE_NAPI_YIELD |   \
+					 CXGB_POLL_STATE_POLL_YIELD)
+#define CXGB_POLL_LOCKED		(CXGB_POLL_STATE_NAPI |         \
+					 CXGB_POLL_STATE_POLL)
+#define CXGB_POLL_USER_PEND		(CXGB_POLL_STATE_POLL |         \
+					 CXGB_POLL_STATE_POLL_YIELD)
+	unsigned int bpoll_state;
+	spinlock_t bpoll_lock;		/* lock for busy poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+};
+
+struct sge_eth_stats {              /* Ethernet queue statistics */
+	unsigned long pkts;         /* # of ethernet packets */
+	unsigned long lro_pkts;     /* # of LRO super packets */
+	unsigned long lro_merged;   /* # of wire packets merged by LRO */
+	unsigned long rx_cso;       /* # of Rx checksum offloads */
+	unsigned long vlan_ex;      /* # of Rx VLAN extractions */
+	unsigned long rx_drops;     /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq {                /* SW Ethernet Rx queue */
+	struct sge_rspq rspq;
+	struct sge_fl fl;
+	struct sge_eth_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_stats {             /* offload queue statistics */
+	unsigned long pkts;         /* # of packets */
+	unsigned long imm;          /* # of immediate-data packets */
+	unsigned long an;           /* # of asynchronous notifications */
+	unsigned long nomem;        /* # of responses deferred due to no mem */
+};
+
+struct sge_ofld_rxq {               /* SW offload Rx queue */
+	struct sge_rspq rspq;
+	struct sge_fl fl;
+	struct sge_ofld_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct tx_desc {
+	__be64 flit[8];
+};
+
+struct tx_sw_desc;
+
+struct sge_txq {
+	unsigned int  in_use;       /* # of in-use Tx descriptors */
+	unsigned int  size;         /* # of descriptors */
+	unsigned int  cidx;         /* SW consumer index */
+	unsigned int  pidx;         /* producer index */
+	unsigned long stops;        /* # of times q has been stopped */
+	unsigned long restarts;     /* # of queue restarts */
+	unsigned int  cntxt_id;     /* SGE context id for the Tx q */
+	struct tx_desc *desc;       /* address of HW Tx descriptor ring */
+	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
+	struct sge_qstat *stat;     /* queue status entry */
+	dma_addr_t    phys_addr;    /* physical address of the ring */
+	spinlock_t db_lock;
+	int db_disabled;
+	unsigned short db_pidx;
+	unsigned short db_pidx_inc;
+	void __iomem *bar2_addr;    /* address of BAR2 Queue registers */
+	unsigned int bar2_qid;      /* Queue ID for BAR2 Queue registers */
+};
+
+struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
+	struct sge_txq q;
+	struct netdev_queue *txq;   /* associated netdev TX queue */
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u8 dcb_prio;		    /* DCB Priority bound to queue */
+#endif
+	unsigned long tso;          /* # of TSO requests */
+	unsigned long tx_cso;       /* # of Tx checksum offloads */
+	unsigned long vlan_ins;     /* # of Tx VLAN insertions */
+	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_txq {               /* state for an SGE offload Tx queue */
+	struct sge_txq q;
+	struct adapter *adap;
+	struct sk_buff_head sendq;  /* list of backpressured packets */
+	struct tasklet_struct qresume_tsk; /* restarts the queue */
+	u8 full;                    /* the Tx ring is full */
+	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
+	struct sge_txq q;
+	struct adapter *adap;
+	struct sk_buff_head sendq;  /* list of backpressured packets */
+	struct tasklet_struct qresume_tsk; /* restarts the queue */
+	u8 full;                    /* the Tx ring is full */
+} ____cacheline_aligned_in_smp;
+
+struct sge {
+	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+	struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
+	struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
+
+	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+	struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
+	struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+	struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
+	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+	struct sge_rspq intrq ____cacheline_aligned_in_smp;
+	spinlock_t intrq_lock;
+
+	u16 max_ethqsets;           /* # of available Ethernet queue sets */
+	u16 ethqsets;               /* # of active Ethernet queue sets */
+	u16 ethtxq_rover;           /* Tx queue to clean up next */
+	u16 ofldqsets;              /* # of active offload queue sets */
+	u16 rdmaqs;                 /* # of available RDMA Rx queues */
+	u16 rdmaciqs;               /* # of available RDMA concentrator IQs */
+	u16 ofld_rxq[MAX_OFLD_QSETS];
+	u16 rdma_rxq[MAX_RDMA_QUEUES];
+	u16 rdma_ciq[MAX_RDMA_CIQS];
+	u16 timer_val[SGE_NTIMERS];
+	u8 counter_val[SGE_NCOUNTERS];
+	u32 fl_pg_order;            /* large page allocation size */
+	u32 stat_len;               /* length of status page at ring end */
+	u32 pktshift;               /* padding between CPL & packet data */
+	u32 fl_align;               /* response queue message alignment */
+	u32 fl_starve_thres;        /* Free List starvation threshold */
+
+	struct sge_idma_monitor_state idma_monitor;
+	unsigned int egr_start;
+	unsigned int egr_sz;
+	unsigned int ingr_start;
+	unsigned int ingr_sz;
+	void **egr_map;    /* qid->queue egress queue map */
+	struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
+	unsigned long *starving_fl;
+	unsigned long *txq_maperr;
+	unsigned long *blocked_fl;
+	struct timer_list rx_timer; /* refills starving FLs */
+	struct timer_list tx_timer; /* checks Tx queues */
+};
+
+#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
+#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
+#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
+
+struct l2t_data;
+
+#ifdef CONFIG_PCI_IOV
+
+/* T4 supports SRIOV on PF0-3 and T5 on PF0-7.  However, the Serial
+ * Configuration initialization for T5 only has SR-IOV functionality enabled
+ * on PF0-3 in order to simplify everything.
+ */
+#define NUM_OF_PF_WITH_SRIOV 4
+
+#endif
+
+struct doorbell_stats {
+	u32 db_drop;
+	u32 db_empty;
+	u32 db_full;
+};
+
+struct adapter {
+	void __iomem *regs;
+	void __iomem *bar2;
+	u32 t4_bar0;
+	struct pci_dev *pdev;
+	struct device *pdev_dev;
+	unsigned int mbox;
+	unsigned int pf;
+	unsigned int flags;
+	enum chip_type chip;
+
+	int msg_enable;
+
+	struct adapter_params params;
+	struct cxgb4_virt_res vres;
+	unsigned int swintr;
+
+	struct {
+		unsigned short vec;
+		char desc[IFNAMSIZ + 10];
+	} msix_info[MAX_INGQ + 1];
+
+	struct doorbell_stats db_stats;
+	struct sge sge;
+
+	struct net_device *port[MAX_NPORTS];
+	u8 chan_map[NCHAN];                   /* channel -> port map */
+
+	u32 filter_mode;
+	unsigned int l2t_start;
+	unsigned int l2t_end;
+	struct l2t_data *l2t;
+	unsigned int clipt_start;
+	unsigned int clipt_end;
+	struct clip_tbl *clipt;
+	void *uld_handle[CXGB4_ULD_MAX];
+	struct list_head list_node;
+	struct list_head rcu_node;
+
+	struct tid_info tids;
+	void **tid_release_head;
+	spinlock_t tid_release_lock;
+	struct workqueue_struct *workq;
+	struct work_struct tid_release_task;
+	struct work_struct db_full_task;
+	struct work_struct db_drop_task;
+	bool tid_release_task_busy;
+
+	struct dentry *debugfs_root;
+	bool use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
+	bool trace_rss;	/* 1 implies that different RSS flit per filter is
+			 * used per filter else if 0 default RSS flit is
+			 * used for all 4 filters.
+			 */
+
+	spinlock_t stats_lock;
+	spinlock_t win0_lock ____cacheline_aligned_in_smp;
+};
+
+/* Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/* Filter matching rules.  These consist of a set of ingress packet field
+ * (value, mask) tuples.  The associated ingress packet field matches the
+ * tuple when ((field & mask) == value).  (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).)  A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ *
+ * Most of the following data structures are modeled on T4 capabilities.
+ * Drivers for earlier chips use the subsets which make sense for those chips.
+ * We really need to come up with a hardware-independent mechanism to
+ * represent hardware filter capabilities ...
+ */
+struct ch_filter_tuple {
+	/* Compressed header matching field rules.  The TP_VLAN_PRI_MAP
+	 * register selects which of these fields will participate in the
+	 * filter match rules -- up to a maximum of 36 bits.  Because
+	 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+	 * set of fields.
+	 */
+	uint32_t ethtype:ETHTYPE_BITWIDTH;      /* Ethernet type */
+	uint32_t frag:FRAG_BITWIDTH;            /* IP fragmentation header */
+	uint32_t ivlan_vld:1;                   /* inner VLAN valid */
+	uint32_t ovlan_vld:1;                   /* outer VLAN valid */
+	uint32_t pfvf_vld:1;                    /* PF/VF valid */
+	uint32_t macidx:MACIDX_BITWIDTH;        /* exact match MAC index */
+	uint32_t fcoe:FCOE_BITWIDTH;            /* FCoE packet */
+	uint32_t iport:IPORT_BITWIDTH;          /* ingress port */
+	uint32_t matchtype:MATCHTYPE_BITWIDTH;  /* MPS match type */
+	uint32_t proto:PROTO_BITWIDTH;          /* protocol type */
+	uint32_t tos:TOS_BITWIDTH;              /* TOS/Traffic Type */
+	uint32_t pf:PF_BITWIDTH;                /* PCI-E PF ID */
+	uint32_t vf:VF_BITWIDTH;                /* PCI-E VF ID */
+	uint32_t ivlan:IVLAN_BITWIDTH;          /* inner VLAN */
+	uint32_t ovlan:OVLAN_BITWIDTH;          /* outer VLAN */
+
+	/* Uncompressed header matching field rules.  These are always
+	 * available for field rules.
+	 */
+	uint8_t lip[16];        /* local IP address (IPv4 in [3:0]) */
+	uint8_t fip[16];        /* foreign IP address (IPv4 in [3:0]) */
+	uint16_t lport;         /* local port */
+	uint16_t fport;         /* foreign port */
+};
+
+/* A filter ioctl command.
+ */
+struct ch_filter_specification {
+	/* Administrative fields for filter.
+	 */
+	uint32_t hitcnts:1;     /* count filter hits in TCB */
+	uint32_t prio:1;        /* filter has priority over active/server */
+
+	/* Fundamental filter typing.  This is the one element of filter
+	 * matching that doesn't exist as a (value, mask) tuple.
+	 */
+	uint32_t type:1;        /* 0 => IPv4, 1 => IPv6 */
+
+	/* Packet dispatch information.  Ingress packets which match the
+	 * filter rules will be dropped, passed to the host or switched back
+	 * out as egress packets.
+	 */
+	uint32_t action:2;      /* drop, pass, switch */
+
+	uint32_t rpttid:1;      /* report TID in RSS hash field */
+
+	uint32_t dirsteer:1;    /* 0 => RSS, 1 => steer to iq */
+	uint32_t iq:10;         /* ingress queue */
+
+	uint32_t maskhash:1;    /* dirsteer=0: store RSS hash in TCB */
+	uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
+				/*             1 => TCB contains IQ ID */
+
+	/* Switch proxy/rewrite fields.  An ingress packet which matches a
+	 * filter with "switch" set will be looped back out as an egress
+	 * packet -- potentially with some Ethernet header rewriting.
+	 */
+	uint32_t eport:2;       /* egress port to switch packet out */
+	uint32_t newdmac:1;     /* rewrite destination MAC address */
+	uint32_t newsmac:1;     /* rewrite source MAC address */
+	uint32_t newvlan:2;     /* rewrite VLAN Tag */
+	uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
+	uint8_t smac[ETH_ALEN]; /* new source MAC address */
+	uint16_t vlan;          /* VLAN Tag to insert */
+
+	/* Filter rule value/mask pairs.
+	 */
+	struct ch_filter_tuple val;
+	struct ch_filter_tuple mask;
+};
+
+enum {
+	FILTER_PASS = 0,        /* default */
+	FILTER_DROP,
+	FILTER_SWITCH
+};
+
+enum {
+	VLAN_NOCHANGE = 0,      /* default */
+	VLAN_REMOVE,
+	VLAN_INSERT,
+	VLAN_REWRITE
+};
+
+static inline int is_offload(const struct adapter *adap)
+{
+	return adap->params.offload;
+}
+
+static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
+{
+	return readl(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
+{
+	writel(val, adap->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
+{
+	return readq(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
+{
+	writeq(val, adap->regs + reg_addr);
+}
+
+/**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW.  Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+				  u8 hw_addr[])
+{
+	ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+	ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+	return netdev_priv(adap->port[idx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->adapter;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+	spin_lock_init(&q->bpoll_lock);
+	q->bpoll_state = CXGB_POLL_STATE_IDLE;
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+	bool rc = true;
+
+	spin_lock(&q->bpoll_lock);
+	if (q->bpoll_state & CXGB_POLL_LOCKED) {
+		q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
+		rc = false;
+	} else {
+		q->bpoll_state = CXGB_POLL_STATE_NAPI;
+	}
+	spin_unlock(&q->bpoll_lock);
+	return rc;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+	bool rc = false;
+
+	spin_lock(&q->bpoll_lock);
+	if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+		rc = true;
+	q->bpoll_state = CXGB_POLL_STATE_IDLE;
+	spin_unlock(&q->bpoll_lock);
+	return rc;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+	bool rc = true;
+
+	spin_lock_bh(&q->bpoll_lock);
+	if (q->bpoll_state & CXGB_POLL_LOCKED) {
+		q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
+		rc = false;
+	} else {
+		q->bpoll_state |= CXGB_POLL_STATE_POLL;
+	}
+	spin_unlock_bh(&q->bpoll_lock);
+	return rc;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+	bool rc = false;
+
+	spin_lock_bh(&q->bpoll_lock);
+	if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+		rc = true;
+	q->bpoll_state = CXGB_POLL_STATE_IDLE;
+	spin_unlock_bh(&q->bpoll_lock);
+	return rc;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+	return q->bpoll_state & CXGB_POLL_USER_PEND;
+}
+#else
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+	return true;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+	return false;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+	return false;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+	return false;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+	return false;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/* Return a version number to identify the type of adapter.  The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ * - bits 16..23: register dump version
+ */
+static inline unsigned int mk_adap_vers(struct adapter *ap)
+{
+	return CHELSIO_CHIP_VERSION(ap->params.chip) |
+		(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
+}
+
+/* Return a queue's interrupt hold-off time in us.  0 means no timer. */
+static inline unsigned int qtimer_val(const struct adapter *adap,
+				      const struct sge_rspq *q)
+{
+	unsigned int idx = q->intr_params >> 1;
+
+	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/* driver version & name used for ethtool_drvinfo */
+extern char cxgb4_driver_name[];
+extern const char cxgb4_driver_version[];
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void *t4_alloc_mem(size_t size);
+
+void t4_free_sge_resources(struct adapter *adap);
+void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
+irq_handler_t t4_intr_handler(struct adapter *adap);
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+		     const struct pkt_gl *gl);
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+		     struct net_device *dev, int intr_idx,
+		     struct sge_fl *fl, rspq_handler_t hnd, int cong);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+			 struct net_device *dev, struct netdev_queue *netdevq,
+			 unsigned int iqid);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+			  struct net_device *dev, unsigned int iqid,
+			  unsigned int cmplqid);
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+			  struct net_device *dev, unsigned int iqid);
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
+int t4_sge_init(struct adapter *adap);
+void t4_sge_start(struct adapter *adap);
+void t4_sge_stop(struct adapter *adap);
+int cxgb_busy_poll(struct napi_struct *napi);
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+			       unsigned int cnt);
+void cxgb4_set_ethtool_ops(struct net_device *netdev);
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+extern int dbfifo_int_thresh;
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline int is_bypass(struct adapter *adap)
+{
+	return adap->params.bypass;
+}
+
+static inline int is_bypass_device(int device)
+{
+	/* this should be set based upon device capabilities */
+	switch (device) {
+	case 0x440b:
+	case 0x440c:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static inline int is_10gbt_device(int device)
+{
+	/* this should be set based upon device capabilities */
+	switch (device) {
+	case 0x4409:
+	case 0x4486:
+		return 1;
+
+	default:
+		return 0;
+	}
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+	return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+					    unsigned int us)
+{
+	return (us * adap->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+					    unsigned int ticks)
+{
+	/* add Core Clock / 2 to round ticks to nearest uS */
+	return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+		adapter->params.vpd.cclk);
+}
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+		      u32 val);
+
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+			    int size, void *rpl, bool sleep_ok, int timeout);
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+				     const void *cmd, int size, void *rpl,
+				     int timeout)
+{
+	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+				       timeout);
+}
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+			     int size, void *rpl)
+{
+	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+				int size, void *rpl)
+{
+	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+		       unsigned int data_reg, const u32 *vals,
+		       unsigned int nregs, unsigned int start_idx);
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+		      unsigned int data_reg, u32 *vals, unsigned int nregs,
+		      unsigned int start_idx);
+void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
+
+struct fw_filter_wr;
+
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+int t4_slow_intr_handler(struct adapter *adapter);
+
+int t4_wait_dev_ready(void __iomem *regs);
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+		  struct link_config *lc);
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
+u32 t4_get_util_window(struct adapter *adap);
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+
+#define T4_MEMORY_WRITE	0
+#define T4_MEMORY_READ	1
+int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
+		 void *buf, int dir);
+static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
+				  u32 len, __be32 *buf)
+{
+	return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
+}
+
+unsigned int t4_get_regs_len(struct adapter *adapter);
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+
+int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+		  unsigned int nwords, u32 *data, int byte_oriented);
+int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_phy_fw(struct adapter *adap,
+		   int win, spinlock_t *lock,
+		   int (*phy_fw_version)(const u8 *, size_t),
+		   const u8 *phy_fw_data, size_t phy_fw_size);
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+		  const u8 *fw_data, unsigned int size, int force);
+unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_check_fw_version(struct adapter *adap);
+int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
+int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
+	       const u8 *fw_data, unsigned int fw_size,
+	       struct fw_hdr *card_fw, enum dev_state state, int *reset);
+int t4_prep_adapter(struct adapter *adapter);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter,
+		      unsigned int qid,
+		      enum t4_bar2_qtype qtype,
+		      int user,
+		      u64 *pbar2_qoffset,
+		      unsigned int *pbar2_qid);
+
+unsigned int qtimer_val(const struct adapter *adap,
+			const struct sge_rspq *q);
+
+int t4_init_devlog_params(struct adapter *adapter);
+int t4_init_sge_params(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+void t4_fatal_err(struct adapter *adapter);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+			int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+		       unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+		     unsigned int flags, unsigned int defq);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+void t4_read_rss_key(struct adapter *adapter, u32 *key);
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+			   u32 *valp);
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+			   u32 *vfl, u32 *vfh);
+u32 t4_read_rss_pf_map(struct adapter *adapter);
+u32 t4_read_rss_pf_mask(struct adapter *adapter);
+
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
+		    size_t n);
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
+		    size_t n);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+		unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+		 const unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+			unsigned int *pif_req_wrptr,
+			unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
+const char *t4_get_port_type_description(enum fw_port_type port_type);
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+			      struct port_stats *stats,
+			      struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+			    unsigned int mask, unsigned int val);
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+		       struct tp_fcoe_stats *st);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+		  const unsigned short *alpha, const unsigned short *beta);
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+			 const u8 *addr);
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+		      u64 mask0, u64 mask1, unsigned int crc, bool enable);
+
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+		enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_early_init(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+			  unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int nparams, const u32 *params,
+		    u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		       unsigned int vf, unsigned int nparams, const u32 *params,
+		       u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+			  unsigned int pf, unsigned int vf,
+			  unsigned int nparams, const u32 *params,
+			  const u32 *val, int timeout);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		  unsigned int vf, unsigned int nparams, const u32 *params,
+		  const u32 *val);
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+		unsigned int rxqi, unsigned int rxq, unsigned int tc,
+		unsigned int vi, unsigned int cmask, unsigned int pmask,
+		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+		unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+	       unsigned int pf, unsigned int vf,
+	       unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		int mtu, int promisc, int all_multi, int bcast, int vlanex,
+		bool sleep_ok);
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+		      unsigned int viid, bool free, unsigned int naddr,
+		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		  int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		 bool rx_en, bool tx_en);
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     unsigned int nblinks);
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 *valp);
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		   unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid);
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+void t4_db_full(struct adapter *adapter);
+void t4_db_dropped(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+			int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+			 int filter_index, int *enabled);
+int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
+			 u32 addr, u32 val);
+void t4_sge_decode_idma_state(struct adapter *adapter, int state);
+void t4_free_mem(void *addr);
+void t4_idma_monitor_init(struct adapter *adapter,
+			  struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+		     struct sge_idma_monitor_state *idma,
+		     int hz, int ticks);
+#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
new file mode 100644
index 0000000..052c660
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -0,0 +1,1259 @@
+/*
+ *  Copyright (C) 2013-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Anish Bhatt (anish@chelsio.com)
+ *	       Casey Leedom (leedom@chelsio.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#include "cxgb4.h"
+
+/* DCBx version control
+ */
+static const char * const dcb_ver_array[] = {
+	"Unknown",
+	"DCBx-CIN",
+	"DCBx-CEE 1.01",
+	"DCBx-IEEE",
+	"", "", "",
+	"Auto Negotiated"
+};
+
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+	if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	    state == CXGB4_DCB_STATE_HOST)
+		return true;
+	else
+		return false;
+}
+
+/* Initialize a port's Data Center Bridging state.  Typically used after a
+ * Link Down event.
+ */
+void cxgb4_dcb_state_init(struct net_device *dev)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+	int version_temp = dcb->dcb_version;
+
+	memset(dcb, 0, sizeof(struct port_dcb_info));
+	dcb->state = CXGB4_DCB_STATE_START;
+	if (version_temp)
+		dcb->dcb_version = version_temp;
+
+	netdev_dbg(dev, "%s: Initializing DCB state for port[%d]\n",
+		    __func__, pi->port_id);
+}
+
+void cxgb4_dcb_version_init(struct net_device *dev)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+
+	/* Any writes here are only done on kernels that exlicitly need
+	 * a specific version, say < 2.6.38 which only support CEE
+	 */
+	dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
+}
+
+static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	struct port_dcb_info *dcb = &pi->dcb;
+	struct dcb_app app;
+	int i, err;
+
+	/* zero priority implies remove */
+	app.priority = 0;
+
+	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+		/* Check if app list is exhausted */
+		if (!dcb->app_priority[i].protocolid)
+			break;
+
+		app.protocol = dcb->app_priority[i].protocolid;
+
+		if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+			app.priority = dcb->app_priority[i].user_prio_map;
+			app.selector = dcb->app_priority[i].sel_field + 1;
+			err = dcb_ieee_delapp(dev, &app);
+		} else {
+			app.selector = !!(dcb->app_priority[i].sel_field);
+			err = dcb_setapp(dev, &app);
+		}
+
+		if (err) {
+			dev_err(adap->pdev_dev,
+				"Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+				dcb_ver_array[dcb->dcb_version], app.selector,
+				app.protocol, -err);
+			break;
+		}
+	}
+}
+
+/* Finite State machine for Data Center Bridging.
+ */
+void cxgb4_dcb_state_fsm(struct net_device *dev,
+			 enum cxgb4_dcb_state_input transition_to)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+	struct adapter *adap = pi->adapter;
+	enum cxgb4_dcb_state current_state = dcb->state;
+
+	netdev_dbg(dev, "%s: State change from %d to %d for %s\n",
+		    __func__, dcb->state, transition_to, dev->name);
+
+	switch (current_state) {
+	case CXGB4_DCB_STATE_START: {
+		switch (transition_to) {
+		case CXGB4_DCB_INPUT_FW_DISABLED: {
+			/* we're going to use Host DCB */
+			dcb->state = CXGB4_DCB_STATE_HOST;
+			dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_ENABLED: {
+			/* we're going to use Firmware DCB */
+			dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
+			dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
+			if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
+				dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+			else
+				dcb->supported |= DCB_CAP_DCBX_VER_CEE;
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
+			/* expected transition */
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
+			dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
+			break;
+		}
+
+		default:
+			goto bad_state_input;
+		}
+		break;
+	}
+
+	case CXGB4_DCB_STATE_FW_INCOMPLETE: {
+		switch (transition_to) {
+		case CXGB4_DCB_INPUT_FW_ENABLED: {
+			/* we're alreaady in firmware DCB mode */
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
+			/* we're already incomplete */
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
+			dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
+			dcb->enabled = 1;
+			linkwatch_fire_event(dev);
+			break;
+		}
+
+		default:
+			goto bad_state_input;
+		}
+		break;
+	}
+
+	case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+		switch (transition_to) {
+		case CXGB4_DCB_INPUT_FW_ENABLED: {
+			/* we're alreaady in firmware DCB mode */
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
+			/* We were successfully running with firmware DCB but
+			 * now it's telling us that it's in an "incomplete
+			 * state.  We need to reset back to a ground state
+			 * of incomplete.
+			 */
+			cxgb4_dcb_cleanup_apps(dev);
+			cxgb4_dcb_state_init(dev);
+			dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
+			dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+			linkwatch_fire_event(dev);
+			break;
+		}
+
+		case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
+			/* we're already all sync'ed
+			 * this is only applicable for IEEE or
+			 * when another VI already completed negotiaton
+			 */
+			dcb->enabled = 1;
+			linkwatch_fire_event(dev);
+			break;
+		}
+
+		default:
+			goto bad_state_input;
+		}
+		break;
+	}
+
+	case CXGB4_DCB_STATE_HOST: {
+		switch (transition_to) {
+		case CXGB4_DCB_INPUT_FW_DISABLED: {
+			/* we're alreaady in Host DCB mode */
+			break;
+		}
+
+		default:
+			goto bad_state_input;
+		}
+		break;
+	}
+
+	default:
+		goto bad_state_transition;
+	}
+	return;
+
+bad_state_input:
+	dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n",
+		transition_to);
+	return;
+
+bad_state_transition:
+	dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n",
+		current_state, transition_to);
+}
+
+/* Handle a DCB/DCBX update message from the firmware.
+ */
+void cxgb4_dcb_handle_fw_update(struct adapter *adap,
+				const struct fw_port_cmd *pcmd)
+{
+	const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
+	int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
+	struct net_device *dev = adap->port[port];
+	struct port_info *pi = netdev_priv(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+	int dcb_type = pcmd->u.dcb.pgid.type;
+	int dcb_running_version;
+
+	/* Handle Firmware DCB Control messages separately since they drive
+	 * our state machine.
+	 */
+	if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
+		enum cxgb4_dcb_state_input input =
+			((pcmd->u.dcb.control.all_syncd_pkd &
+			  FW_PORT_CMD_ALL_SYNCD_F)
+			 ? CXGB4_DCB_STATE_FW_ALLSYNCED
+			 : CXGB4_DCB_STATE_FW_INCOMPLETE);
+
+		if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
+			dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
+				be16_to_cpu(
+				pcmd->u.dcb.control.dcb_version_to_app_state));
+			if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 ||
+			    dcb_running_version == FW_PORT_DCB_VER_IEEE) {
+				dcb->dcb_version = dcb_running_version;
+				dev_warn(adap->pdev_dev, "Interface %s is running %s\n",
+					 dev->name,
+					 dcb_ver_array[dcb->dcb_version]);
+			} else {
+				dev_warn(adap->pdev_dev,
+					 "Something screwed up, requested firmware for %s, but firmware returned %s instead\n",
+					 dcb_ver_array[dcb->dcb_version],
+					 dcb_ver_array[dcb_running_version]);
+				dcb->dcb_version = FW_PORT_DCB_VER_UNKNOWN;
+			}
+		}
+
+		cxgb4_dcb_state_fsm(dev, input);
+		return;
+	}
+
+	/* It's weird, and almost certainly an error, to get Firmware DCB
+	 * messages when we either haven't been told whether we're going to be
+	 * doing Host or Firmware DCB; and even worse when we've been told
+	 * that we're doing Host DCB!
+	 */
+	if (dcb->state == CXGB4_DCB_STATE_START ||
+	    dcb->state == CXGB4_DCB_STATE_HOST) {
+		dev_err(adap->pdev_dev, "Receiving Firmware DCB messages in State %d\n",
+			dcb->state);
+		return;
+	}
+
+	/* Now handle the general Firmware DCB update messages ...
+	 */
+	switch (dcb_type) {
+	case FW_PORT_DCB_TYPE_PGID:
+		dcb->pgid = be32_to_cpu(fwdcb->pgid.pgid);
+		dcb->msgs |= CXGB4_DCB_FW_PGID;
+		break;
+
+	case FW_PORT_DCB_TYPE_PGRATE:
+		dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported;
+		memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate,
+		       sizeof(dcb->pgrate));
+		memcpy(dcb->tsa, &fwdcb->pgrate.tsa,
+		       sizeof(dcb->tsa));
+		dcb->msgs |= CXGB4_DCB_FW_PGRATE;
+		if (dcb->msgs & CXGB4_DCB_FW_PGID)
+			IEEE_FAUX_SYNC(dev, dcb);
+		break;
+
+	case FW_PORT_DCB_TYPE_PRIORATE:
+		memcpy(dcb->priorate, &fwdcb->priorate.strict_priorate,
+		       sizeof(dcb->priorate));
+		dcb->msgs |= CXGB4_DCB_FW_PRIORATE;
+		break;
+
+	case FW_PORT_DCB_TYPE_PFC:
+		dcb->pfcen = fwdcb->pfc.pfcen;
+		dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs;
+		dcb->msgs |= CXGB4_DCB_FW_PFC;
+		IEEE_FAUX_SYNC(dev, dcb);
+		break;
+
+	case FW_PORT_DCB_TYPE_APP_ID: {
+		const struct fw_port_app_priority *fwap = &fwdcb->app_priority;
+		int idx = fwap->idx;
+		struct app_priority *ap = &dcb->app_priority[idx];
+
+		struct dcb_app app = {
+			.protocol = be16_to_cpu(fwap->protocolid),
+		};
+		int err;
+
+		/* Convert from firmware format to relevant format
+		 * when using app selector
+		 */
+		if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+			app.selector = (fwap->sel_field + 1);
+			app.priority = ffs(fwap->user_prio_map) - 1;
+			err = dcb_ieee_setapp(dev, &app);
+			IEEE_FAUX_SYNC(dev, dcb);
+		} else {
+			/* Default is CEE */
+			app.selector = !!(fwap->sel_field);
+			app.priority = fwap->user_prio_map;
+			err = dcb_setapp(dev, &app);
+		}
+
+		if (err)
+			dev_err(adap->pdev_dev,
+				"Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n",
+				app.selector, app.protocol, app.priority, -err);
+
+		ap->user_prio_map = fwap->user_prio_map;
+		ap->sel_field = fwap->sel_field;
+		ap->protocolid = be16_to_cpu(fwap->protocolid);
+		dcb->msgs |= CXGB4_DCB_FW_APP_ID;
+		break;
+	}
+
+	default:
+		dev_err(adap->pdev_dev, "Unknown DCB update type received %x\n",
+			dcb_type);
+		break;
+	}
+}
+
+/* Data Center Bridging netlink operations.
+ */
+
+
+/* Get current DCB enabled/disabled state.
+ */
+static u8 cxgb4_getstate(struct net_device *dev)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	return pi->dcb.enabled;
+}
+
+/* Set DCB enabled/disabled.
+ */
+static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	/* If DCBx is host-managed, dcb is enabled by outside lldp agents */
+	if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
+		pi->dcb.enabled = enabled;
+		return 0;
+	}
+
+	/* Firmware doesn't provide any mechanism to control the DCB state.
+	 */
+	if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
+		return 1;
+
+	return 0;
+}
+
+static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
+			     u8 *prio_type, u8 *pgid, u8 *bw_per,
+			     u8 *up_tc_map, int local)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int err;
+
+	*prio_type = *pgid = *bw_per = *up_tc_map = 0;
+
+	if (local)
+		INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	else
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+	pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+		return;
+	}
+	*pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
+
+	if (local)
+		INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	else
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+	pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+			-err);
+		return;
+	}
+
+	*bw_per = pcmd.u.dcb.pgrate.pgrate[*pgid];
+	*up_tc_map = (1 << tc);
+
+	/* prio_type is link strict */
+	if (*pgid != 0xF)
+		*prio_type = 0x2;
+}
+
+static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
+				u8 *prio_type, u8 *pgid, u8 *bw_per,
+				u8 *up_tc_map)
+{
+	/* tc 0 is written at MSB position */
+	return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+				up_tc_map, 1);
+}
+
+
+static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
+				u8 *prio_type, u8 *pgid, u8 *bw_per,
+				u8 *up_tc_map)
+{
+	/* tc 0 is written at MSB position */
+	return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+				up_tc_map, 0);
+}
+
+static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
+				u8 prio_type, u8 pgid, u8 bw_per,
+				u8 up_tc_map)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int fw_tc = 7 - tc;
+	u32 _pgid;
+	int err;
+
+	if (pgid == DCB_ATTR_VALUE_UNDEFINED)
+		return;
+	if (bw_per == DCB_ATTR_VALUE_UNDEFINED)
+		return;
+
+	INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+		return;
+	}
+
+	_pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+	_pgid &= ~(0xF << (fw_tc * 4));
+	_pgid |= pgid << (fw_tc * 4);
+	pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
+
+	INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB write PGID failed with %d\n",
+			-err);
+		return;
+	}
+
+	memset(&pcmd, 0, sizeof(struct fw_port_cmd));
+
+	INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+			-err);
+		return;
+	}
+
+	pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
+
+	INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+	if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+		pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS)
+		dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
+			-err);
+}
+
+static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per,
+			      int local)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int err;
+
+	if (local)
+		INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	else
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+	pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+			-err);
+		return;
+	}
+
+	*bw_per = pcmd.u.dcb.pgrate.pgrate[pgid];
+}
+
+static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per)
+{
+	return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 1);
+}
+
+static void cxgb4_getpgbwgcfg_rx(struct net_device *dev, int pgid, u8 *bw_per)
+{
+	return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 0);
+}
+
+static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
+				 u8 bw_per)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int err;
+
+	INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+			-err);
+		return;
+	}
+
+	pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
+
+	INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+	if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+		pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+	if (err != FW_PORT_DCB_CFG_SUCCESS)
+		dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
+			-err);
+}
+
+/* Return whether the specified Traffic Class Priority has Priority Pause
+ * Frames enabled.
+ */
+static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+
+	if (!cxgb4_dcb_state_synced(dcb->state) ||
+	    priority >= CXGB4_MAX_PRIORITY)
+		*pfccfg = 0;
+	else
+		*pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
+}
+
+/* Enable/disable Priority Pause Frames for the specified Traffic Class
+ * Priority.
+ */
+static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int err;
+
+	if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
+	    priority >= CXGB4_MAX_PRIORITY)
+		return;
+
+	INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+	if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+		pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
+
+	pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
+	pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
+
+	if (pfccfg)
+		pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
+	else
+		pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB PFC write failed with %d\n", -err);
+		return;
+	}
+
+	pi->dcb.pfcen = pcmd.u.dcb.pfc.pfcen;
+}
+
+static u8 cxgb4_setall(struct net_device *dev)
+{
+	return 0;
+}
+
+/* Return DCB capabilities.
+ */
+static u8 cxgb4_getcap(struct net_device *dev, int cap_id, u8 *caps)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	switch (cap_id) {
+	case DCB_CAP_ATTR_PG:
+	case DCB_CAP_ATTR_PFC:
+		*caps = true;
+		break;
+
+	case DCB_CAP_ATTR_PG_TCS:
+		/* 8 priorities for PG represented by bitmap */
+		*caps = 0x80;
+		break;
+
+	case DCB_CAP_ATTR_PFC_TCS:
+		/* 8 priorities for PFC represented by bitmap */
+		*caps = 0x80;
+		break;
+
+	case DCB_CAP_ATTR_GSP:
+		*caps = true;
+		break;
+
+	case DCB_CAP_ATTR_UP2TC:
+	case DCB_CAP_ATTR_BCN:
+		*caps = false;
+		break;
+
+	case DCB_CAP_ATTR_DCBX:
+		*caps = pi->dcb.supported;
+		break;
+
+	default:
+		*caps = false;
+	}
+
+	return 0;
+}
+
+/* Return the number of Traffic Classes for the indicated Traffic Class ID.
+ */
+static int cxgb4_getnumtcs(struct net_device *dev, int tcs_id, u8 *num)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	switch (tcs_id) {
+	case DCB_NUMTCS_ATTR_PG:
+		if (pi->dcb.msgs & CXGB4_DCB_FW_PGRATE)
+			*num = pi->dcb.pg_num_tcs_supported;
+		else
+			*num = 0x8;
+		break;
+
+	case DCB_NUMTCS_ATTR_PFC:
+		*num = 0x8;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Set the number of Traffic Classes supported for the indicated Traffic Class
+ * ID.
+ */
+static int cxgb4_setnumtcs(struct net_device *dev, int tcs_id, u8 num)
+{
+	/* Setting the number of Traffic Classes isn't supported.
+	 */
+	return -ENOSYS;
+}
+
+/* Return whether Priority Flow Control is enabled.  */
+static u8 cxgb4_getpfcstate(struct net_device *dev)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
+		return false;
+
+	return pi->dcb.pfcen != 0;
+}
+
+/* Enable/disable Priority Flow Control. */
+static void cxgb4_setpfcstate(struct net_device *dev, u8 state)
+{
+	/* We can't enable/disable Priority Flow Control but we also can't
+	 * return an error ...
+	 */
+}
+
+/* Return the Application User Priority Map associated with the specified
+ * Application ID.
+ */
+static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+			  int peer)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int i;
+
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
+		return 0;
+
+	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+		struct fw_port_cmd pcmd;
+		int err;
+
+		if (peer)
+			INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+		else
+			INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+
+		pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+		pcmd.u.dcb.app_priority.idx = i;
+
+		err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+		if (err != FW_PORT_DCB_CFG_SUCCESS) {
+			dev_err(adap->pdev_dev, "DCB APP read failed with %d\n",
+				-err);
+			return err;
+		}
+		if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id)
+			if (pcmd.u.dcb.app_priority.sel_field == app_idtype)
+				return pcmd.u.dcb.app_priority.user_prio_map;
+
+		/* exhausted app list */
+		if (!pcmd.u.dcb.app_priority.protocolid)
+			break;
+	}
+
+	return -EEXIST;
+}
+
+/* Return the Application User Priority Map associated with the specified
+ * Application ID.
+ */
+static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
+{
+	/* Convert app_idtype to firmware format before querying */
+	return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+			      app_idtype : 3, app_id, 0);
+}
+
+/* Write a new Application User Priority Map for the specified Application ID
+ */
+static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+			  u8 app_prio)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int i, err;
+
+
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
+		return -EINVAL;
+
+	/* DCB info gets thrown away on link up */
+	if (!netif_carrier_ok(dev))
+		return -ENOLINK;
+
+	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+		INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+		pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+		pcmd.u.dcb.app_priority.idx = i;
+		err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+		if (err != FW_PORT_DCB_CFG_SUCCESS) {
+			dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+				-err);
+			return err;
+		}
+		if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) {
+			/* overwrite existing app table */
+			pcmd.u.dcb.app_priority.protocolid = 0;
+			break;
+		}
+		/* find first empty slot */
+		if (!pcmd.u.dcb.app_priority.protocolid)
+			break;
+	}
+
+	if (i == CXGB4_MAX_DCBX_APP_SUPPORTED) {
+		/* no empty slots available */
+		dev_err(adap->pdev_dev, "DCB app table full\n");
+		return -EBUSY;
+	}
+
+	/* write out new app table entry */
+	INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+	if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+		pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
+
+	pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+	pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
+	pcmd.u.dcb.app_priority.sel_field = app_idtype;
+	pcmd.u.dcb.app_priority.user_prio_map = app_prio;
+	pcmd.u.dcb.app_priority.idx = i;
+
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB app table write failed with %d\n",
+			-err);
+		return err;
+	}
+
+	return 0;
+}
+
+/* Priority for CEE inside dcb_app is bitmask, with 0 being an invalid value */
+static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+			u8 app_prio)
+{
+	int ret;
+	struct dcb_app app = {
+		.selector = app_idtype,
+		.protocol = app_id,
+		.priority = app_prio,
+	};
+
+	if (app_idtype != DCB_APP_IDTYPE_ETHTYPE &&
+	    app_idtype != DCB_APP_IDTYPE_PORTNUM)
+		return -EINVAL;
+
+	/* Convert app_idtype to a format that firmware understands */
+	ret = __cxgb4_setapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+			      app_idtype : 3, app_id, app_prio);
+	if (ret)
+		return ret;
+
+	return dcb_setapp(dev, &app);
+}
+
+/* Return whether IEEE Data Center Bridging has been negotiated.
+ */
+static inline int
+cxgb4_ieee_negotiation_complete(struct net_device *dev,
+				enum cxgb4_dcb_fw_msgs dcb_subtype)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+
+	if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+		if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+			return 0;
+
+	return (cxgb4_dcb_state_synced(dcb->state) &&
+		(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
+}
+
+static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
+			       int local)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+	struct adapter *adap = pi->adapter;
+	uint32_t tc_info;
+	struct fw_port_cmd pcmd;
+	int i, bwg, err;
+
+	if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
+		return 0;
+
+	ets->ets_cap =  dcb->pg_num_tcs_supported;
+
+	if (local) {
+		ets->willing = 1;
+		INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	} else {
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+	}
+
+	pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+		return err;
+	}
+
+	tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+
+	if (local)
+		INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+	else
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+	pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+			-err);
+		return err;
+	}
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
+		ets->prio_tc[i] = bwg;
+		ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+		ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
+		ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
+	}
+
+	return 0;
+}
+
+static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
+{
+	return cxgb4_ieee_read_ets(dev, ets, 1);
+}
+
+/* We reuse this for peer PFC as well, as we can't have it enabled one way */
+static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct port_dcb_info *dcb = &pi->dcb;
+
+	memset(pfc, 0, sizeof(struct ieee_pfc));
+
+	if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
+		return 0;
+
+	pfc->pfc_cap = dcb->pfc_num_tcs_supported;
+	pfc->pfc_en = bitswap_1(dcb->pfcen);
+
+	return 0;
+}
+
+static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
+{
+	return cxgb4_ieee_read_ets(dev, ets, 0);
+}
+
+/* Fill in the Application User Priority Map associated with the
+ * specified Application.
+ * Priority for IEEE dcb_app is an integer, with 0 being a valid value
+ */
+static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
+{
+	int prio;
+
+	if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
+		return -EINVAL;
+	if (!(app->selector && app->protocol))
+		return -EINVAL;
+
+	/* Try querying firmware first, use firmware format */
+	prio = __cxgb4_getapp(dev, app->selector - 1, app->protocol, 0);
+
+	if (prio < 0)
+		prio = dcb_ieee_getapp_mask(dev, app);
+
+	app->priority = ffs(prio) - 1;
+	return 0;
+}
+
+/* Write a new Application User Priority Map for the specified Application ID.
+ * Priority for IEEE dcb_app is an integer, with 0 being a valid value
+ */
+static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+	int ret;
+
+	if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
+		return -EINVAL;
+	if (!(app->selector && app->protocol))
+		return -EINVAL;
+
+	if (!(app->selector > IEEE_8021QAZ_APP_SEL_ETHERTYPE  &&
+	      app->selector < IEEE_8021QAZ_APP_SEL_ANY))
+		return -EINVAL;
+
+	/* change selector to a format that firmware understands */
+	ret = __cxgb4_setapp(dev, app->selector - 1, app->protocol,
+			     (1 << app->priority));
+	if (ret)
+		return ret;
+
+	return dcb_ieee_setapp(dev, app);
+}
+
+/* Return our DCBX parameters.
+ */
+static u8 cxgb4_getdcbx(struct net_device *dev)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	/* This is already set by cxgb4_set_dcb_caps, so just return it */
+	return pi->dcb.supported;
+}
+
+/* Set our DCBX parameters.
+ */
+static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	/* Filter out requests which exceed our capabilities.
+	 */
+	if ((dcb_request & (CXGB4_DCBX_FW_SUPPORT | CXGB4_DCBX_HOST_SUPPORT))
+	    != dcb_request)
+		return 1;
+
+	/* Can't enable DCB if we haven't successfully negotiated it.
+	 */
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
+		return 1;
+
+	/* There's currently no mechanism to allow for the firmware DCBX
+	 * negotiation to be changed from the Host Driver.  If the caller
+	 * requests exactly the same parameters that we already have then
+	 * we'll allow them to be successfully "set" ...
+	 */
+	if (dcb_request != pi->dcb.supported)
+		return 1;
+
+	pi->dcb.supported = dcb_request;
+	return 0;
+}
+
+static int cxgb4_getpeer_app(struct net_device *dev,
+			     struct dcb_peer_app_info *info, u16 *app_count)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int i, err = 0;
+
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
+		return 1;
+
+	info->willing = 0;
+	info->error = 0;
+
+	*app_count = 0;
+	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+		pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+		pcmd.u.dcb.app_priority.idx = *app_count;
+		err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+		if (err != FW_PORT_DCB_CFG_SUCCESS) {
+			dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+				-err);
+			return err;
+		}
+
+		/* find first empty slot */
+		if (!pcmd.u.dcb.app_priority.protocolid)
+			break;
+	}
+	*app_count = i;
+	return err;
+}
+
+static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	int i, err = 0;
+
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
+		return 1;
+
+	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+		INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+		pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+		pcmd.u.dcb.app_priority.idx = i;
+		err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+		if (err != FW_PORT_DCB_CFG_SUCCESS) {
+			dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+				-err);
+			return err;
+		}
+
+		/* find first empty slot */
+		if (!pcmd.u.dcb.app_priority.protocolid)
+			break;
+
+		table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
+		table[i].protocol =
+			be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
+		table[i].priority =
+			ffs(pcmd.u.dcb.app_priority.user_prio_map) - 1;
+	}
+	return err;
+}
+
+/* Return Priority Group information.
+ */
+static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
+{
+	struct fw_port_cmd pcmd;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adap = pi->adapter;
+	u32 pgid;
+	int i, err;
+
+	/* We're always "willing" -- the Switch Fabric always dictates the
+	 * DCBX parameters to us.
+	 */
+	pg->willing = true;
+
+	INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+	pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+		return err;
+	}
+	pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+
+	for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
+		pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
+
+	INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+	pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+	err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+	if (err != FW_PORT_DCB_CFG_SUCCESS) {
+		dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+			-err);
+		return err;
+	}
+
+	for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
+		pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+
+	pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
+	return 0;
+}
+
+/* Return Priority Flow Control information.
+ */
+static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+
+	cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
+
+	/* Firmware sends this to us in a formwat that is a bit flipped version
+	 * of spec, correct it before we send it to host. This is taken care of
+	 * by bit shifting in other uses of pfcen
+	 */
+	pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
+
+	pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
+	return 0;
+}
+
+const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
+	.ieee_getets		= cxgb4_ieee_get_ets,
+	.ieee_getpfc		= cxgb4_ieee_get_pfc,
+	.ieee_getapp		= cxgb4_ieee_getapp,
+	.ieee_setapp		= cxgb4_ieee_setapp,
+	.ieee_peer_getets	= cxgb4_ieee_peer_ets,
+	.ieee_peer_getpfc	= cxgb4_ieee_get_pfc,
+
+	/* CEE std */
+	.getstate		= cxgb4_getstate,
+	.setstate		= cxgb4_setstate,
+	.getpgtccfgtx		= cxgb4_getpgtccfg_tx,
+	.getpgbwgcfgtx		= cxgb4_getpgbwgcfg_tx,
+	.getpgtccfgrx		= cxgb4_getpgtccfg_rx,
+	.getpgbwgcfgrx		= cxgb4_getpgbwgcfg_rx,
+	.setpgtccfgtx		= cxgb4_setpgtccfg_tx,
+	.setpgbwgcfgtx		= cxgb4_setpgbwgcfg_tx,
+	.setpfccfg		= cxgb4_setpfccfg,
+	.getpfccfg		= cxgb4_getpfccfg,
+	.setall			= cxgb4_setall,
+	.getcap			= cxgb4_getcap,
+	.getnumtcs		= cxgb4_getnumtcs,
+	.setnumtcs		= cxgb4_setnumtcs,
+	.getpfcstate		= cxgb4_getpfcstate,
+	.setpfcstate		= cxgb4_setpfcstate,
+	.getapp			= cxgb4_getapp,
+	.setapp			= cxgb4_setapp,
+
+	/* DCBX configuration */
+	.getdcbx		= cxgb4_getdcbx,
+	.setdcbx		= cxgb4_setdcbx,
+
+	/* peer apps */
+	.peer_getappinfo	= cxgb4_getpeer_app,
+	.peer_getapptable	= cxgb4_getpeerapp_tbl,
+
+	/* CEE peer */
+	.cee_peer_getpg		= cxgb4_cee_peer_getpg,
+	.cee_peer_getpfc	= cxgb4_cee_peer_getpfc,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
new file mode 100644
index 0000000..ccf24d3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -0,0 +1,162 @@
+/*
+ *  Copyright (C) 2013-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Anish Bhatt (anish@chelsio.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#ifndef __CXGB4_DCB_H
+#define __CXGB4_DCB_H
+
+#include <linux/netdevice.h>
+#include <linux/dcbnl.h>
+#include <net/dcbnl.h>
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+
+#define CXGB4_DCBX_FW_SUPPORT \
+	(DCB_CAP_DCBX_VER_CEE | \
+	 DCB_CAP_DCBX_VER_IEEE | \
+	 DCB_CAP_DCBX_LLD_MANAGED)
+#define CXGB4_DCBX_HOST_SUPPORT \
+	(DCB_CAP_DCBX_VER_CEE | \
+	 DCB_CAP_DCBX_VER_IEEE | \
+	 DCB_CAP_DCBX_HOST)
+
+#define CXGB4_MAX_PRIORITY      CXGB4_MAX_DCBX_APP_SUPPORTED
+#define CXGB4_MAX_TCS           CXGB4_MAX_DCBX_APP_SUPPORTED
+
+#define INIT_PORT_DCB_CMD(__pcmd, __port, __op, __action) \
+	do { \
+		memset(&(__pcmd), 0, sizeof(__pcmd)); \
+		(__pcmd).op_to_portid = \
+			cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \
+				    FW_CMD_REQUEST_F | \
+				    FW_CMD_##__op##_F | \
+				    FW_PORT_CMD_PORTID_V(__port)); \
+		(__pcmd).action_to_len16 = \
+			cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \
+				    FW_LEN16(pcmd)); \
+	} while (0)
+
+#define INIT_PORT_DCB_READ_PEER_CMD(__pcmd, __port) \
+	INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_RECV)
+
+#define INIT_PORT_DCB_READ_LOCAL_CMD(__pcmd, __port) \
+	INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_TRANS)
+
+#define INIT_PORT_DCB_READ_SYNC_CMD(__pcmd, __port) \
+	INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_DET)
+
+#define INIT_PORT_DCB_WRITE_CMD(__pcmd, __port) \
+	INIT_PORT_DCB_CMD(__pcmd, __port, EXEC, FW_PORT_ACTION_L2_DCB_CFG)
+
+#define IEEE_FAUX_SYNC(__dev, __dcb) \
+	do { \
+		if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \
+			cxgb4_dcb_state_fsm((__dev), \
+					    CXGB4_DCB_STATE_FW_ALLSYNCED); \
+	} while (0)
+
+/* States we can be in for a port's Data Center Bridging.
+ */
+enum cxgb4_dcb_state {
+	CXGB4_DCB_STATE_START,		/* initial unknown state */
+	CXGB4_DCB_STATE_HOST,		/* we're using Host DCB (if at all) */
+	CXGB4_DCB_STATE_FW_INCOMPLETE,	/* using firmware DCB, incomplete */
+	CXGB4_DCB_STATE_FW_ALLSYNCED,	/* using firmware DCB, all sync'ed */
+};
+
+/* Data Center Bridging state input for the Finite State Machine.
+ */
+enum cxgb4_dcb_state_input {
+	/* Input from the firmware.
+	 */
+	CXGB4_DCB_INPUT_FW_DISABLED,	/* firmware DCB disabled */
+	CXGB4_DCB_INPUT_FW_ENABLED,	/* firmware DCB enabled */
+	CXGB4_DCB_INPUT_FW_INCOMPLETE,	/* firmware reports incomplete DCB */
+	CXGB4_DCB_INPUT_FW_ALLSYNCED,	/* firmware reports all sync'ed */
+
+};
+
+/* Firmware DCB messages that we've received so far ...
+ */
+enum cxgb4_dcb_fw_msgs {
+	CXGB4_DCB_FW_PGID	= 0x01,
+	CXGB4_DCB_FW_PGRATE	= 0x02,
+	CXGB4_DCB_FW_PRIORATE	= 0x04,
+	CXGB4_DCB_FW_PFC	= 0x08,
+	CXGB4_DCB_FW_APP_ID	= 0x10,
+};
+
+#define CXGB4_MAX_DCBX_APP_SUPPORTED 8
+
+/* Data Center Bridging support;
+ */
+struct port_dcb_info {
+	enum cxgb4_dcb_state state;	/* DCB State Machine */
+	enum cxgb4_dcb_fw_msgs msgs;	/* DCB Firmware messages received */
+	unsigned int supported;		/* OS DCB capabilities supported */
+	bool enabled;			/* OS Enabled state */
+
+	/* Cached copies of DCB information sent by the firmware (in Host
+	 * Native Endian format).
+	 */
+	u32	pgid;			/* Priority Group[0..7] */
+	u8	dcb_version;		/* Running DCBx version */
+	u8	pfcen;			/* Priority Flow Control[0..7] */
+	u8	pg_num_tcs_supported;	/* max PG Traffic Classes */
+	u8	pfc_num_tcs_supported;	/* max PFC Traffic Classes */
+	u8	pgrate[8];		/* Priority Group Rate[0..7] */
+	u8	priorate[8];		/* Priority Rate[0..7] */
+	u8	tsa[8];			/* TSA Algorithm[0..7] */
+	struct app_priority { /* Application Information */
+		u8	user_prio_map;	/* Priority Map bitfield */
+		u8	sel_field;	/* Protocol ID interpretation */
+		u16	protocolid;	/* Protocol ID */
+	} app_priority[CXGB4_MAX_DCBX_APP_SUPPORTED];
+};
+
+void cxgb4_dcb_state_init(struct net_device *);
+void cxgb4_dcb_version_init(struct net_device *);
+void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
+void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
+void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
+extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
+
+static inline __u8 bitswap_1(unsigned char val)
+{
+	return ((val & 0x80) >> 7) |
+	       ((val & 0x40) >> 5) |
+	       ((val & 0x20) >> 3) |
+	       ((val & 0x10) >> 1) |
+	       ((val & 0x08) << 1) |
+	       ((val & 0x04) << 3) |
+	       ((val & 0x02) << 5) |
+	       ((val & 0x01) << 7);
+}
+#define CXGB4_DCB_ENABLED true
+
+#else /* !CONFIG_CHELSIO_T4_DCB */
+
+static inline void cxgb4_dcb_state_init(struct net_device *dev)
+{
+}
+
+#define CXGB4_DCB_ENABLED false
+
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+
+#endif /* __CXGB4_DCB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
new file mode 100644
index 0000000..4269944
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -0,0 +1,3102 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+#include <linux/sort.h>
+#include <linux/ctype.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_values.h"
+#include "t4fw_api.h"
+#include "cxgb4_debugfs.h"
+#include "clip_tbl.h"
+#include "l2t.h"
+
+/* generic seq_file support for showing a table of size rows x width. */
+static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
+{
+	pos -= tb->skip_first;
+	return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
+}
+
+static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
+{
+	struct seq_tab *tb = seq->private;
+
+	if (tb->skip_first && *pos == 0)
+		return SEQ_START_TOKEN;
+
+	return seq_tab_get_idx(tb, *pos);
+}
+
+static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	v = seq_tab_get_idx(seq->private, *pos + 1);
+	if (v)
+		++*pos;
+	return v;
+}
+
+static void seq_tab_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int seq_tab_show(struct seq_file *seq, void *v)
+{
+	const struct seq_tab *tb = seq->private;
+
+	return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
+}
+
+static const struct seq_operations seq_tab_ops = {
+	.start = seq_tab_start,
+	.next  = seq_tab_next,
+	.stop  = seq_tab_stop,
+	.show  = seq_tab_show
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+			     unsigned int width, unsigned int have_header,
+			     int (*show)(struct seq_file *seq, void *v, int i))
+{
+	struct seq_tab *p;
+
+	p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
+	if (p) {
+		p->show = show;
+		p->rows = rows;
+		p->width = width;
+		p->skip_first = have_header != 0;
+	}
+	return p;
+}
+
+/* Trim the size of a seq_tab to the supplied number of rows.  The operation is
+ * irreversible.
+ */
+static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
+{
+	if (new_rows > p->rows)
+		return -EINVAL;
+	p->rows = new_rows;
+	return 0;
+}
+
+static int cim_la_show(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN)
+		seq_puts(seq, "Status   Data      PC     LS0Stat  LS0Addr "
+			 "            LS0Data\n");
+	else {
+		const u32 *p = v;
+
+		seq_printf(seq,
+			   "  %02x  %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
+			   (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
+			   p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
+			   p[6], p[7]);
+	}
+	return 0;
+}
+
+static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Data      PC\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %08x %08x\n", p[5] & 0xff, p[6],
+			   p[7]);
+		seq_printf(seq, "  %02x   %02x%06x %02x%06x\n",
+			   (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
+			   p[4] & 0xff, p[5] >> 8);
+		seq_printf(seq, "  %02x   %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
+			   p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
+	}
+	return 0;
+}
+
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Inst    Data      PC     LS0Stat  "
+			 "LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+			   (p[9] >> 16) & 0xff,       /* Status */
+			   p[9] & 0xffff, p[8] >> 16, /* Inst */
+			   p[8] & 0xffff, p[7] >> 16, /* Data */
+			   p[7] & 0xffff, p[6] >> 16, /* PC */
+			   p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
+			   p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
+	}
+	return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Inst    Data      PC\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %08x %08x %08x\n",
+			   p[3] & 0xff, p[2], p[1], p[0]);
+		seq_printf(seq, "  %02x   %02x%06x %02x%06x %02x%06x\n",
+			   (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+			   p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x\n",
+			   (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+			   p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+			   p[6] >> 16);
+	}
+	return 0;
+}
+
+static int cim_la_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	unsigned int cfg;
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+	if (ret)
+		return ret;
+
+	if (is_t6(adap->params.chip)) {
+		/* +1 to account for integer division of CIMLA_SIZE/10 */
+		p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+				 10 * sizeof(u32), 1,
+				 cfg & UPDBGLACAPTPCONLY_F ?
+					cim_la_show_pc_t6 : cim_la_show_t6);
+	} else {
+		p = seq_open_tab(file, adap->params.cim_la_size / 8,
+				 8 * sizeof(u32), 1,
+				 cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+							     cim_la_show);
+	}
+	if (!p)
+		return -ENOMEM;
+
+	ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
+	if (ret)
+		seq_release_private(inode, file);
+	return ret;
+}
+
+static const struct file_operations cim_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+static int cim_pif_la_show(struct seq_file *seq, void *v, int idx)
+{
+	const u32 *p = v;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Cntl ID DataBE   Addr                 Data\n");
+	} else if (idx < CIM_PIFLA_SIZE) {
+		seq_printf(seq, " %02x  %02x  %04x  %08x %08x%08x%08x%08x\n",
+			   (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
+			   p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
+	} else {
+		if (idx == CIM_PIFLA_SIZE)
+			seq_puts(seq, "\nCntl ID               Data\n");
+		seq_printf(seq, " %02x  %02x %08x%08x%08x%08x\n",
+			   (p[4] >> 6) & 0xff, p[4] & 0x3f,
+			   p[3], p[2], p[1], p[0]);
+	}
+	return 0;
+}
+
+static int cim_pif_la_open(struct inode *inode, struct file *file)
+{
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
+			 cim_pif_la_show);
+	if (!p)
+		return -ENOMEM;
+
+	t4_cim_read_pif_la(adap, (u32 *)p->data,
+			   (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+	return 0;
+}
+
+static const struct file_operations cim_pif_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_pif_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+static int cim_ma_la_show(struct seq_file *seq, void *v, int idx)
+{
+	const u32 *p = v;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "\n");
+	} else if (idx < CIM_MALA_SIZE) {
+		seq_printf(seq, "%02x%08x%08x%08x%08x\n",
+			   p[4], p[3], p[2], p[1], p[0]);
+	} else {
+		if (idx == CIM_MALA_SIZE)
+			seq_puts(seq,
+				 "\nCnt ID Tag UE       Data       RDY VLD\n");
+		seq_printf(seq, "%3u %2u  %x   %u %08x%08x  %u   %u\n",
+			   (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
+			   (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
+			   (p[1] >> 2) | ((p[2] & 3) << 30),
+			   (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
+			   p[0] & 1);
+	}
+	return 0;
+}
+
+static int cim_ma_la_open(struct inode *inode, struct file *file)
+{
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
+			 cim_ma_la_show);
+	if (!p)
+		return -ENOMEM;
+
+	t4_cim_read_ma_la(adap, (u32 *)p->data,
+			  (u32 *)p->data + 5 * CIM_MALA_SIZE);
+	return 0;
+}
+
+static const struct file_operations cim_ma_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_ma_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+static int cim_qcfg_show(struct seq_file *seq, void *v)
+{
+	static const char * const qname[] = {
+		"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
+		"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
+		"SGE0-RX", "SGE1-RX"
+	};
+
+	int i;
+	struct adapter *adap = seq->private;
+	u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+	u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+	u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
+	u16 thres[CIM_NUM_IBQ];
+	u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
+	u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
+	u32 *p = stat;
+	int cim_num_obq = is_t4(adap->params.chip) ?
+				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+	i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
+			UP_IBQ_0_SHADOW_RDADDR_A,
+			ARRAY_SIZE(stat), stat);
+	if (!i) {
+		if (is_t4(adap->params.chip)) {
+			i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
+					ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
+			wr = obq_wr_t4;
+		} else {
+			i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
+					ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
+			wr = obq_wr_t5;
+		}
+	}
+	if (i)
+		return i;
+
+	t4_read_cimq_cfg(adap, base, size, thres);
+
+	seq_printf(seq,
+		   "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail\n");
+	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
+		seq_printf(seq, "%7s %5x %5u %5u %6x  %4x %4u %4u %5u\n",
+			   qname[i], base[i], size[i], thres[i],
+			   IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
+			   QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+			   QUEREMFLITS_G(p[2]) * 16);
+	for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
+		seq_printf(seq, "%7s %5x %5u %12x  %4x %4u %4u %5u\n",
+			   qname[i], base[i], size[i],
+			   QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
+			   QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+			   QUEREMFLITS_G(p[2]) * 16);
+	return 0;
+}
+
+static int cim_qcfg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cim_qcfg_show, inode->i_private);
+}
+
+static const struct file_operations cim_qcfg_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_qcfg_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+static int cimq_show(struct seq_file *seq, void *v, int idx)
+{
+	const u32 *p = v;
+
+	seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
+		   p[2], p[3]);
+	return 0;
+}
+
+static int cim_ibq_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	struct seq_tab *p;
+	unsigned int qid = (uintptr_t)inode->i_private & 7;
+	struct adapter *adap = inode->i_private - qid;
+
+	p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
+	if (!p)
+		return -ENOMEM;
+
+	ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
+	if (ret < 0)
+		seq_release_private(inode, file);
+	else
+		ret = 0;
+	return ret;
+}
+
+static const struct file_operations cim_ibq_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_ibq_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+static int cim_obq_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	struct seq_tab *p;
+	unsigned int qid = (uintptr_t)inode->i_private & 7;
+	struct adapter *adap = inode->i_private - qid;
+
+	p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
+	if (!p)
+		return -ENOMEM;
+
+	ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
+	if (ret < 0) {
+		seq_release_private(inode, file);
+	} else {
+		seq_tab_trim(p, ret / 4);
+		ret = 0;
+	}
+	return ret;
+}
+
+static const struct file_operations cim_obq_fops = {
+	.owner   = THIS_MODULE,
+	.open    = cim_obq_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+struct field_desc {
+	const char *name;
+	unsigned int start;
+	unsigned int width;
+};
+
+static void field_desc_show(struct seq_file *seq, u64 v,
+			    const struct field_desc *p)
+{
+	char buf[32];
+	int line_size = 0;
+
+	while (p->name) {
+		u64 mask = (1ULL << p->width) - 1;
+		int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
+				    ((unsigned long long)v >> p->start) & mask);
+
+		if (line_size + len >= 79) {
+			line_size = 8;
+			seq_puts(seq, "\n        ");
+		}
+		seq_printf(seq, "%s ", buf);
+		line_size += len + 1;
+		p++;
+	}
+	seq_putc(seq, '\n');
+}
+
+static struct field_desc tp_la0[] = {
+	{ "RcfOpCodeOut", 60, 4 },
+	{ "State", 56, 4 },
+	{ "WcfState", 52, 4 },
+	{ "RcfOpcSrcOut", 50, 2 },
+	{ "CRxError", 49, 1 },
+	{ "ERxError", 48, 1 },
+	{ "SanityFailed", 47, 1 },
+	{ "SpuriousMsg", 46, 1 },
+	{ "FlushInputMsg", 45, 1 },
+	{ "FlushInputCpl", 44, 1 },
+	{ "RssUpBit", 43, 1 },
+	{ "RssFilterHit", 42, 1 },
+	{ "Tid", 32, 10 },
+	{ "InitTcb", 31, 1 },
+	{ "LineNumber", 24, 7 },
+	{ "Emsg", 23, 1 },
+	{ "EdataOut", 22, 1 },
+	{ "Cmsg", 21, 1 },
+	{ "CdataOut", 20, 1 },
+	{ "EreadPdu", 19, 1 },
+	{ "CreadPdu", 18, 1 },
+	{ "TunnelPkt", 17, 1 },
+	{ "RcfPeerFin", 16, 1 },
+	{ "RcfReasonOut", 12, 4 },
+	{ "TxCchannel", 10, 2 },
+	{ "RcfTxChannel", 8, 2 },
+	{ "RxEchannel", 6, 2 },
+	{ "RcfRxChannel", 5, 1 },
+	{ "RcfDataOutSrdy", 4, 1 },
+	{ "RxDvld", 3, 1 },
+	{ "RxOoDvld", 2, 1 },
+	{ "RxCongestion", 1, 1 },
+	{ "TxCongestion", 0, 1 },
+	{ NULL }
+};
+
+static int tp_la_show(struct seq_file *seq, void *v, int idx)
+{
+	const u64 *p = v;
+
+	field_desc_show(seq, *p, tp_la0);
+	return 0;
+}
+
+static int tp_la_show2(struct seq_file *seq, void *v, int idx)
+{
+	const u64 *p = v;
+
+	if (idx)
+		seq_putc(seq, '\n');
+	field_desc_show(seq, p[0], tp_la0);
+	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+		field_desc_show(seq, p[1], tp_la0);
+	return 0;
+}
+
+static int tp_la_show3(struct seq_file *seq, void *v, int idx)
+{
+	static struct field_desc tp_la1[] = {
+		{ "CplCmdIn", 56, 8 },
+		{ "CplCmdOut", 48, 8 },
+		{ "ESynOut", 47, 1 },
+		{ "EAckOut", 46, 1 },
+		{ "EFinOut", 45, 1 },
+		{ "ERstOut", 44, 1 },
+		{ "SynIn", 43, 1 },
+		{ "AckIn", 42, 1 },
+		{ "FinIn", 41, 1 },
+		{ "RstIn", 40, 1 },
+		{ "DataIn", 39, 1 },
+		{ "DataInVld", 38, 1 },
+		{ "PadIn", 37, 1 },
+		{ "RxBufEmpty", 36, 1 },
+		{ "RxDdp", 35, 1 },
+		{ "RxFbCongestion", 34, 1 },
+		{ "TxFbCongestion", 33, 1 },
+		{ "TxPktSumSrdy", 32, 1 },
+		{ "RcfUlpType", 28, 4 },
+		{ "Eread", 27, 1 },
+		{ "Ebypass", 26, 1 },
+		{ "Esave", 25, 1 },
+		{ "Static0", 24, 1 },
+		{ "Cread", 23, 1 },
+		{ "Cbypass", 22, 1 },
+		{ "Csave", 21, 1 },
+		{ "CPktOut", 20, 1 },
+		{ "RxPagePoolFull", 18, 2 },
+		{ "RxLpbkPkt", 17, 1 },
+		{ "TxLpbkPkt", 16, 1 },
+		{ "RxVfValid", 15, 1 },
+		{ "SynLearned", 14, 1 },
+		{ "SetDelEntry", 13, 1 },
+		{ "SetInvEntry", 12, 1 },
+		{ "CpcmdDvld", 11, 1 },
+		{ "CpcmdSave", 10, 1 },
+		{ "RxPstructsFull", 8, 2 },
+		{ "EpcmdDvld", 7, 1 },
+		{ "EpcmdFlush", 6, 1 },
+		{ "EpcmdTrimPrefix", 5, 1 },
+		{ "EpcmdTrimPostfix", 4, 1 },
+		{ "ERssIp4Pkt", 3, 1 },
+		{ "ERssIp6Pkt", 2, 1 },
+		{ "ERssTcpUdpPkt", 1, 1 },
+		{ "ERssFceFipPkt", 0, 1 },
+		{ NULL }
+	};
+	static struct field_desc tp_la2[] = {
+		{ "CplCmdIn", 56, 8 },
+		{ "MpsVfVld", 55, 1 },
+		{ "MpsPf", 52, 3 },
+		{ "MpsVf", 44, 8 },
+		{ "SynIn", 43, 1 },
+		{ "AckIn", 42, 1 },
+		{ "FinIn", 41, 1 },
+		{ "RstIn", 40, 1 },
+		{ "DataIn", 39, 1 },
+		{ "DataInVld", 38, 1 },
+		{ "PadIn", 37, 1 },
+		{ "RxBufEmpty", 36, 1 },
+		{ "RxDdp", 35, 1 },
+		{ "RxFbCongestion", 34, 1 },
+		{ "TxFbCongestion", 33, 1 },
+		{ "TxPktSumSrdy", 32, 1 },
+		{ "RcfUlpType", 28, 4 },
+		{ "Eread", 27, 1 },
+		{ "Ebypass", 26, 1 },
+		{ "Esave", 25, 1 },
+		{ "Static0", 24, 1 },
+		{ "Cread", 23, 1 },
+		{ "Cbypass", 22, 1 },
+		{ "Csave", 21, 1 },
+		{ "CPktOut", 20, 1 },
+		{ "RxPagePoolFull", 18, 2 },
+		{ "RxLpbkPkt", 17, 1 },
+		{ "TxLpbkPkt", 16, 1 },
+		{ "RxVfValid", 15, 1 },
+		{ "SynLearned", 14, 1 },
+		{ "SetDelEntry", 13, 1 },
+		{ "SetInvEntry", 12, 1 },
+		{ "CpcmdDvld", 11, 1 },
+		{ "CpcmdSave", 10, 1 },
+		{ "RxPstructsFull", 8, 2 },
+		{ "EpcmdDvld", 7, 1 },
+		{ "EpcmdFlush", 6, 1 },
+		{ "EpcmdTrimPrefix", 5, 1 },
+		{ "EpcmdTrimPostfix", 4, 1 },
+		{ "ERssIp4Pkt", 3, 1 },
+		{ "ERssIp6Pkt", 2, 1 },
+		{ "ERssTcpUdpPkt", 1, 1 },
+		{ "ERssFceFipPkt", 0, 1 },
+		{ NULL }
+	};
+	const u64 *p = v;
+
+	if (idx)
+		seq_putc(seq, '\n');
+	field_desc_show(seq, p[0], tp_la0);
+	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+		field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
+	return 0;
+}
+
+static int tp_la_open(struct inode *inode, struct file *file)
+{
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) {
+	case 2:
+		p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
+				 tp_la_show2);
+		break;
+	case 3:
+		p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
+				 tp_la_show3);
+		break;
+	default:
+		p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
+	}
+	if (!p)
+		return -ENOMEM;
+
+	t4_tp_read_la(adap, (u64 *)p->data, NULL);
+	return 0;
+}
+
+static ssize_t tp_la_write(struct file *file, const char __user *buf,
+			   size_t count, loff_t *pos)
+{
+	int err;
+	char s[32];
+	unsigned long val;
+	size_t size = min(sizeof(s) - 1, count);
+	struct adapter *adap = file_inode(file)->i_private;
+
+	if (copy_from_user(s, buf, size))
+		return -EFAULT;
+	s[size] = '\0';
+	err = kstrtoul(s, 0, &val);
+	if (err)
+		return err;
+	if (val > 0xffff)
+		return -EINVAL;
+	adap->params.tp.la_mask = val << 16;
+	t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U,
+			 adap->params.tp.la_mask);
+	return count;
+}
+
+static const struct file_operations tp_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = tp_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+	.write   = tp_la_write
+};
+
+static int ulprx_la_show(struct seq_file *seq, void *v, int idx)
+{
+	const u32 *p = v;
+
+	if (v == SEQ_START_TOKEN)
+		seq_puts(seq, "      Pcmd        Type   Message"
+			 "                Data\n");
+	else
+		seq_printf(seq, "%08x%08x  %4x  %08x  %08x%08x%08x%08x\n",
+			   p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
+	return 0;
+}
+
+static int ulprx_la_open(struct inode *inode, struct file *file)
+{
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
+			 ulprx_la_show);
+	if (!p)
+		return -ENOMEM;
+
+	t4_ulprx_read_la(adap, (u32 *)p->data);
+	return 0;
+}
+
+static const struct file_operations ulprx_la_fops = {
+	.owner   = THIS_MODULE,
+	.open    = ulprx_la_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+/* Show the PM memory stats.  These stats include:
+ *
+ * TX:
+ *   Read: memory read operation
+ *   Write Bypass: cut-through
+ *   Bypass + mem: cut-through and save copy
+ *
+ * RX:
+ *   Read: memory read
+ *   Write Bypass: cut-through
+ *   Flush: payload trim or drop
+ */
+static int pm_stats_show(struct seq_file *seq, void *v)
+{
+	static const char * const tx_pm_stats[] = {
+		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
+	};
+	static const char * const rx_pm_stats[] = {
+		"Read:", "Write bypass:", "Write mem:", "Flush:"
+	};
+
+	int i;
+	u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
+	u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
+	struct adapter *adap = seq->private;
+
+	t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
+	t4_pmrx_get_stats(adap, rx_cnt, rx_cyc);
+
+	seq_printf(seq, "%13s %10s  %20s\n", " ", "Tx pcmds", "Tx bytes");
+	for (i = 0; i < PM_NSTATS - 1; i++)
+		seq_printf(seq, "%-13s %10u  %20llu\n",
+			   tx_pm_stats[i], tx_cnt[i], tx_cyc[i]);
+
+	seq_printf(seq, "%13s %10s  %20s\n", " ", "Rx pcmds", "Rx bytes");
+	for (i = 0; i < PM_NSTATS - 1; i++)
+		seq_printf(seq, "%-13s %10u  %20llu\n",
+			   rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
+	return 0;
+}
+
+static int pm_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pm_stats_show, inode->i_private);
+}
+
+static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
+			      size_t count, loff_t *pos)
+{
+	struct adapter *adap = file_inode(file)->i_private;
+
+	t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
+	t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
+	return count;
+}
+
+static const struct file_operations pm_stats_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = pm_stats_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.write   = pm_stats_clear
+};
+
+static int tx_rate_show(struct seq_file *seq, void *v)
+{
+	u64 nrate[NCHAN], orate[NCHAN];
+	struct adapter *adap = seq->private;
+
+	t4_get_chan_txrate(adap, nrate, orate);
+	if (adap->params.arch.nchan == NCHAN) {
+		seq_puts(seq, "              channel 0   channel 1   "
+			 "channel 2   channel 3\n");
+		seq_printf(seq, "NIC B/s:     %10llu  %10llu  %10llu  %10llu\n",
+			   (unsigned long long)nrate[0],
+			   (unsigned long long)nrate[1],
+			   (unsigned long long)nrate[2],
+			   (unsigned long long)nrate[3]);
+		seq_printf(seq, "Offload B/s: %10llu  %10llu  %10llu  %10llu\n",
+			   (unsigned long long)orate[0],
+			   (unsigned long long)orate[1],
+			   (unsigned long long)orate[2],
+			   (unsigned long long)orate[3]);
+	} else {
+		seq_puts(seq, "              channel 0   channel 1\n");
+		seq_printf(seq, "NIC B/s:     %10llu  %10llu\n",
+			   (unsigned long long)nrate[0],
+			   (unsigned long long)nrate[1]);
+		seq_printf(seq, "Offload B/s: %10llu  %10llu\n",
+			   (unsigned long long)orate[0],
+			   (unsigned long long)orate[1]);
+	}
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+
+static int cctrl_tbl_show(struct seq_file *seq, void *v)
+{
+	static const char * const dec_fac[] = {
+		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
+		"0.9375" };
+
+	int i;
+	u16 (*incr)[NCCTRL_WIN];
+	struct adapter *adap = seq->private;
+
+	incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
+	if (!incr)
+		return -ENOMEM;
+
+	t4_read_cong_tbl(adap, incr);
+
+	for (i = 0; i < NCCTRL_WIN; ++i) {
+		seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
+			   incr[0][i], incr[1][i], incr[2][i], incr[3][i],
+			   incr[4][i], incr[5][i], incr[6][i], incr[7][i]);
+		seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
+			   incr[8][i], incr[9][i], incr[10][i], incr[11][i],
+			   incr[12][i], incr[13][i], incr[14][i], incr[15][i],
+			   adap->params.a_wnd[i],
+			   dec_fac[adap->params.b_wnd[i]]);
+	}
+
+	kfree(incr);
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
+
+/* Format a value in a unit that differs from the value's native unit by the
+ * given factor.
+ */
+static char *unit_conv(char *buf, size_t len, unsigned int val,
+		       unsigned int factor)
+{
+	unsigned int rem = val % factor;
+
+	if (rem == 0) {
+		snprintf(buf, len, "%u", val / factor);
+	} else {
+		while (rem % 10 == 0)
+			rem /= 10;
+		snprintf(buf, len, "%u.%u", val / factor, rem);
+	}
+	return buf;
+}
+
+static int clk_show(struct seq_file *seq, void *v)
+{
+	char buf[32];
+	struct adapter *adap = seq->private;
+	unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk;  /* in ps */
+	u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+	unsigned int tre = TIMERRESOLUTION_G(res);
+	unsigned int dack_re = DELAYEDACKRESOLUTION_G(res);
+	unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */
+
+	seq_printf(seq, "Core clock period: %s ns\n",
+		   unit_conv(buf, sizeof(buf), cclk_ps, 1000));
+	seq_printf(seq, "TP timer tick: %s us\n",
+		   unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000));
+	seq_printf(seq, "TCP timestamp tick: %s us\n",
+		   unit_conv(buf, sizeof(buf),
+			     (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000));
+	seq_printf(seq, "DACK tick: %s us\n",
+		   unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000));
+	seq_printf(seq, "DACK timer: %u us\n",
+		   ((cclk_ps << dack_re) / 1000000) *
+		   t4_read_reg(adap, TP_DACK_TIMER_A));
+	seq_printf(seq, "Retransmit min: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A));
+	seq_printf(seq, "Retransmit max: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A));
+	seq_printf(seq, "Persist timer min: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A));
+	seq_printf(seq, "Persist timer max: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A));
+	seq_printf(seq, "Keepalive idle timer: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A));
+	seq_printf(seq, "Keepalive interval: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A));
+	seq_printf(seq, "Initial SRTT: %llu us\n",
+		   tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A)));
+	seq_printf(seq, "FINWAIT2 timer: %llu us\n",
+		   tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A));
+
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(clk);
+
+/* Firmware Device Log dump. */
+static const char * const devlog_level_strings[] = {
+	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
+	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
+	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
+	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
+	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
+	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
+};
+
+static const char * const devlog_facility_strings[] = {
+	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
+	[FW_DEVLOG_FACILITY_CF]         = "CF",
+	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
+	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
+	[FW_DEVLOG_FACILITY_RES]	= "RES",
+	[FW_DEVLOG_FACILITY_HW]		= "HW",
+	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
+	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
+	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
+	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
+	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
+	[FW_DEVLOG_FACILITY_VI]		= "VI",
+	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
+	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
+	[FW_DEVLOG_FACILITY_TM]		= "TM",
+	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
+	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
+	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
+	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
+	[FW_DEVLOG_FACILITY_RI]		= "RI",
+	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
+	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
+	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
+	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
+};
+
+/* Information gathered by Device Log Open routine for the display routine.
+ */
+struct devlog_info {
+	unsigned int nentries;		/* number of entries in log[] */
+	unsigned int first;		/* first [temporal] entry in log[] */
+	struct fw_devlog_e log[0];	/* Firmware Device Log */
+};
+
+/* Dump a Firmaware Device Log entry.
+ */
+static int devlog_show(struct seq_file *seq, void *v)
+{
+	if (v == SEQ_START_TOKEN)
+		seq_printf(seq, "%10s  %15s  %8s  %8s  %s\n",
+			   "Seq#", "Tstamp", "Level", "Facility", "Message");
+	else {
+		struct devlog_info *dinfo = seq->private;
+		int fidx = (uintptr_t)v - 2;
+		unsigned long index;
+		struct fw_devlog_e *e;
+
+		/* Get a pointer to the log entry to display.  Skip unused log
+		 * entries.
+		 */
+		index = dinfo->first + fidx;
+		if (index >= dinfo->nentries)
+			index -= dinfo->nentries;
+		e = &dinfo->log[index];
+		if (e->timestamp == 0)
+			return 0;
+
+		/* Print the message.  This depends on the firmware using
+		 * exactly the same formating strings as the kernel so we may
+		 * eventually have to put a format interpreter in here ...
+		 */
+		seq_printf(seq, "%10d  %15llu  %8s  %8s  ",
+			   be32_to_cpu(e->seqno),
+			   be64_to_cpu(e->timestamp),
+			   (e->level < ARRAY_SIZE(devlog_level_strings)
+			    ? devlog_level_strings[e->level]
+			    : "UNKNOWN"),
+			   (e->facility < ARRAY_SIZE(devlog_facility_strings)
+			    ? devlog_facility_strings[e->facility]
+			    : "UNKNOWN"));
+		seq_printf(seq, e->fmt,
+			   be32_to_cpu(e->params[0]),
+			   be32_to_cpu(e->params[1]),
+			   be32_to_cpu(e->params[2]),
+			   be32_to_cpu(e->params[3]),
+			   be32_to_cpu(e->params[4]),
+			   be32_to_cpu(e->params[5]),
+			   be32_to_cpu(e->params[6]),
+			   be32_to_cpu(e->params[7]));
+	}
+	return 0;
+}
+
+/* Sequential File Operations for Device Log.
+ */
+static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
+{
+	if (pos > dinfo->nentries)
+		return NULL;
+
+	return (void *)(uintptr_t)(pos + 1);
+}
+
+static void *devlog_start(struct seq_file *seq, loff_t *pos)
+{
+	struct devlog_info *dinfo = seq->private;
+
+	return (*pos
+		? devlog_get_idx(dinfo, *pos)
+		: SEQ_START_TOKEN);
+}
+
+static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct devlog_info *dinfo = seq->private;
+
+	(*pos)++;
+	return devlog_get_idx(dinfo, *pos);
+}
+
+static void devlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations devlog_seq_ops = {
+	.start = devlog_start,
+	.next  = devlog_next,
+	.stop  = devlog_stop,
+	.show  = devlog_show
+};
+
+/* Set up for reading the firmware's device log.  We read the entire log here
+ * and then display it incrementally in devlog_show().
+ */
+static int devlog_open(struct inode *inode, struct file *file)
+{
+	struct adapter *adap = inode->i_private;
+	struct devlog_params *dparams = &adap->params.devlog;
+	struct devlog_info *dinfo;
+	unsigned int index;
+	u32 fseqno;
+	int ret;
+
+	/* If we don't know where the log is we can't do anything.
+	 */
+	if (dparams->start == 0)
+		return -ENXIO;
+
+	/* Allocate the space to read in the firmware's device log and set up
+	 * for the iterated call to our display function.
+	 */
+	dinfo = __seq_open_private(file, &devlog_seq_ops,
+				   sizeof(*dinfo) + dparams->size);
+	if (!dinfo)
+		return -ENOMEM;
+
+	/* Record the basic log buffer information and read in the raw log.
+	 */
+	dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
+	dinfo->first = 0;
+	spin_lock(&adap->win0_lock);
+	ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
+			   dparams->start, dparams->size, (__be32 *)dinfo->log,
+			   T4_MEMORY_READ);
+	spin_unlock(&adap->win0_lock);
+	if (ret) {
+		seq_release_private(inode, file);
+		return ret;
+	}
+
+	/* Find the earliest (lowest Sequence Number) log entry in the
+	 * circular Device Log.
+	 */
+	for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
+		struct fw_devlog_e *e = &dinfo->log[index];
+		__u32 seqno;
+
+		if (e->timestamp == 0)
+			continue;
+
+		seqno = be32_to_cpu(e->seqno);
+		if (seqno < fseqno) {
+			fseqno = seqno;
+			dinfo->first = index;
+		}
+	}
+	return 0;
+}
+
+static const struct file_operations devlog_fops = {
+	.owner   = THIS_MODULE,
+	.open    = devlog_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+static int mbox_show(struct seq_file *seq, void *v)
+{
+	static const char * const owner[] = { "none", "FW", "driver",
+					      "unknown", "<unread>" };
+
+	int i;
+	unsigned int mbox = (uintptr_t)seq->private & 7;
+	struct adapter *adap = seq->private - mbox;
+	void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+
+	/* For T4 we don't have a shadow copy of the Mailbox Control register.
+	 * And since reading that real register causes a side effect of
+	 * granting ownership, we're best of simply not reading it at all.
+	 */
+	if (is_t4(adap->params.chip)) {
+		i = 4; /* index of "<unread>" */
+	} else {
+		unsigned int ctrl_reg = CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A;
+		void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+		i = MBOWNER_G(readl(ctrl));
+	}
+
+	seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
+
+	for (i = 0; i < MBOX_LEN; i += 8)
+		seq_printf(seq, "%016llx\n",
+			   (unsigned long long)readq(addr + i));
+	return 0;
+}
+
+static int mbox_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mbox_show, inode->i_private);
+}
+
+static ssize_t mbox_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *pos)
+{
+	int i;
+	char c = '\n', s[256];
+	unsigned long long data[8];
+	const struct inode *ino;
+	unsigned int mbox;
+	struct adapter *adap;
+	void __iomem *addr;
+	void __iomem *ctrl;
+
+	if (count > sizeof(s) - 1 || !count)
+		return -EINVAL;
+	if (copy_from_user(s, buf, count))
+		return -EFAULT;
+	s[count] = '\0';
+
+	if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0],
+		   &data[1], &data[2], &data[3], &data[4], &data[5], &data[6],
+		   &data[7], &c) < 8 || c != '\n')
+		return -EINVAL;
+
+	ino = file_inode(file);
+	mbox = (uintptr_t)ino->i_private & 7;
+	adap = ino->i_private - mbox;
+	addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+	ctrl = addr + MBOX_LEN;
+
+	if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL)
+		return -EBUSY;
+
+	for (i = 0; i < 8; i++)
+		writeq(data[i], addr + 8 * i);
+
+	writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl);
+	return count;
+}
+
+static const struct file_operations mbox_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mbox_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.write   = mbox_write
+};
+
+static int mps_trc_show(struct seq_file *seq, void *v)
+{
+	int enabled, i;
+	struct trace_params tp;
+	unsigned int trcidx = (uintptr_t)seq->private & 3;
+	struct adapter *adap = seq->private - trcidx;
+
+	t4_get_trace_filter(adap, &tp, trcidx, &enabled);
+	if (!enabled) {
+		seq_puts(seq, "tracer is disabled\n");
+		return 0;
+	}
+
+	if (tp.skip_ofst * 8 >= TRACE_LEN) {
+		dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
+		return -EINVAL;
+	}
+	if (tp.port < 8) {
+		i = adap->chan_map[tp.port & 3];
+		if (i >= MAX_NPORTS) {
+			dev_err(adap->pdev_dev, "tracer %u is assigned "
+				"to non-existing port\n", trcidx);
+			return -EINVAL;
+		}
+		seq_printf(seq, "tracer is capturing %s %s, ",
+			   adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
+	} else
+		seq_printf(seq, "tracer is capturing loopback %d, ",
+			   tp.port - 8);
+	seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
+		   tp.min_len);
+	seq_printf(seq, "packets captured %smatch filter\n",
+		   tp.invert ? "do not " : "");
+
+	if (tp.skip_ofst) {
+		seq_puts(seq, "filter pattern: ");
+		for (i = 0; i < tp.skip_ofst * 2; i += 2)
+			seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+		seq_putc(seq, '/');
+		for (i = 0; i < tp.skip_ofst * 2; i += 2)
+			seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+		seq_puts(seq, "@0\n");
+	}
+
+	seq_puts(seq, "filter pattern: ");
+	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+		seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+	seq_putc(seq, '/');
+	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+		seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+	seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
+	return 0;
+}
+
+static int mps_trc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mps_trc_show, inode->i_private);
+}
+
+static unsigned int xdigit2int(unsigned char c)
+{
+	return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+#define TRC_PORT_NONE 0xff
+#define TRC_RSS_ENABLE 0x33
+#define TRC_RSS_DISABLE 0x13
+
+/* Set an MPS trace filter.  Syntax is:
+ *
+ * disable
+ *
+ * to disable tracing, or
+ *
+ * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
+ *
+ * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
+ * of the NIC's response qid obtained from sge_qinfo and pattern has the form
+ *
+ * <pattern data>[/<pattern mask>][@<anchor>]
+ *
+ * Up to 2 filter patterns can be specified.  If 2 are supplied the first one
+ * must be anchored at 0.  An omited mask is taken as a mask of 1s, an omitted
+ * anchor is taken as 0.
+ */
+static ssize_t mps_trc_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	int i, enable, ret;
+	u32 *data, *mask;
+	struct trace_params tp;
+	const struct inode *ino;
+	unsigned int trcidx;
+	char *s, *p, *word, *end;
+	struct adapter *adap;
+	u32 j;
+
+	ino = file_inode(file);
+	trcidx = (uintptr_t)ino->i_private & 3;
+	adap = ino->i_private - trcidx;
+
+	/* Don't accept input more than 1K, can't be anything valid except lots
+	 * of whitespace.  Well, use less.
+	 */
+	if (count > 1024)
+		return -EFBIG;
+	p = s = kzalloc(count + 1, GFP_USER);
+	if (!s)
+		return -ENOMEM;
+	if (copy_from_user(s, buf, count)) {
+		count = -EFAULT;
+		goto out;
+	}
+
+	if (s[count - 1] == '\n')
+		s[count - 1] = '\0';
+
+	enable = strcmp("disable", s) != 0;
+	if (!enable)
+		goto apply;
+
+	/* enable or disable trace multi rss filter */
+	if (adap->trace_rss)
+		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
+	else
+		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
+
+	memset(&tp, 0, sizeof(tp));
+	tp.port = TRC_PORT_NONE;
+	i = 0;	/* counts pattern nibbles */
+
+	while (p) {
+		while (isspace(*p))
+			p++;
+		word = strsep(&p, " ");
+		if (!*word)
+			break;
+
+		if (!strncmp(word, "qid=", 4)) {
+			end = (char *)word + 4;
+			ret = kstrtouint(end, 10, &j);
+			if (ret)
+				goto out;
+			if (!adap->trace_rss) {
+				t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
+				continue;
+			}
+
+			switch (trcidx) {
+			case 0:
+				t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
+				break;
+			case 1:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER1_RSS_CONTROL_A, j);
+				break;
+			case 2:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER2_RSS_CONTROL_A, j);
+				break;
+			case 3:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER3_RSS_CONTROL_A, j);
+				break;
+			}
+			continue;
+		}
+		if (!strncmp(word, "snaplen=", 8)) {
+			end = (char *)word + 8;
+			ret = kstrtouint(end, 10, &j);
+			if (ret || j > 9600) {
+inval:				count = -EINVAL;
+				goto out;
+			}
+			tp.snap_len = j;
+			continue;
+		}
+		if (!strncmp(word, "minlen=", 7)) {
+			end = (char *)word + 7;
+			ret = kstrtouint(end, 10, &j);
+			if (ret || j > TFMINPKTSIZE_M)
+				goto inval;
+			tp.min_len = j;
+			continue;
+		}
+		if (!strcmp(word, "not")) {
+			tp.invert = !tp.invert;
+			continue;
+		}
+		if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
+			if (word[8] < '0' || word[8] > '3' || word[9])
+				goto inval;
+			tp.port = word[8] - '0' + 8;
+			continue;
+		}
+		if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
+			if (word[2] < '0' || word[2] > '3' || word[3])
+				goto inval;
+			tp.port = word[2] - '0' + 4;
+			if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
+				goto inval;
+			continue;
+		}
+		if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
+			if (word[2] < '0' || word[2] > '3' || word[3])
+				goto inval;
+			tp.port = word[2] - '0';
+			if (adap->chan_map[tp.port] >= MAX_NPORTS)
+				goto inval;
+			continue;
+		}
+		if (!isxdigit(*word))
+			goto inval;
+
+		/* we have found a trace pattern */
+		if (i) {                            /* split pattern */
+			if (tp.skip_len)            /* too many splits */
+				goto inval;
+			tp.skip_ofst = i / 16;
+		}
+
+		data = &tp.data[i / 8];
+		mask = &tp.mask[i / 8];
+		j = i;
+
+		while (isxdigit(*word)) {
+			if (i >= TRACE_LEN * 2) {
+				count = -EFBIG;
+				goto out;
+			}
+			*data = (*data << 4) + xdigit2int(*word++);
+			if (++i % 8 == 0)
+				data++;
+		}
+		if (*word == '/') {
+			word++;
+			while (isxdigit(*word)) {
+				if (j >= i)         /* mask longer than data */
+					goto inval;
+				*mask = (*mask << 4) + xdigit2int(*word++);
+				if (++j % 8 == 0)
+					mask++;
+			}
+			if (i != j)                 /* mask shorter than data */
+				goto inval;
+		} else {                            /* no mask, use all 1s */
+			for ( ; i - j >= 8; j += 8)
+				*mask++ = 0xffffffff;
+			if (i % 8)
+				*mask = (1 << (i % 8) * 4) - 1;
+		}
+		if (*word == '@') {
+			end = (char *)word + 1;
+			ret = kstrtouint(end, 10, &j);
+			if (*end && *end != '\n')
+				goto inval;
+			if (j & 7)          /* doesn't start at multiple of 8 */
+				goto inval;
+			j /= 8;
+			if (j < tp.skip_ofst)     /* overlaps earlier pattern */
+				goto inval;
+			if (j - tp.skip_ofst > 31)            /* skip too big */
+				goto inval;
+			tp.skip_len = j - tp.skip_ofst;
+		}
+		if (i % 8) {
+			*data <<= (8 - i % 8) * 4;
+			*mask <<= (8 - i % 8) * 4;
+			i = (i + 15) & ~15;         /* 8-byte align */
+		}
+	}
+
+	if (tp.port == TRC_PORT_NONE)
+		goto inval;
+
+apply:
+	i = t4_set_trace_filter(adap, &tp, trcidx, enable);
+	if (i)
+		count = i;
+out:
+	kfree(s);
+	return count;
+}
+
+static const struct file_operations mps_trc_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mps_trc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.write   = mps_trc_write
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	loff_t avail = file_inode(file)->i_size;
+	struct adapter *adap = file->private_data;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= avail)
+		return 0;
+	if (count > avail - pos)
+		count = avail - pos;
+
+	while (count) {
+		size_t len;
+		int ret, ofst;
+		u8 data[256];
+
+		ofst = pos & 3;
+		len = min(count + ofst, sizeof(data));
+		ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4,
+				    (u32 *)data, 1);
+		if (ret)
+			return ret;
+
+		len -= ofst;
+		if (copy_to_user(buf, data + ofst, len))
+			return -EFAULT;
+
+		buf += len;
+		pos += len;
+		count -= len;
+	}
+	count = pos - *ppos;
+	*ppos = pos;
+	return count;
+}
+
+static const struct file_operations flash_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mem_open,
+	.read    = flash_read,
+};
+
+static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+	*mask = x | y;
+	y = (__force u64)cpu_to_be64(y);
+	memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static int mps_tcam_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	if (v == SEQ_START_TOKEN) {
+		if (adap->params.arch.mps_rplc_size > 128)
+			seq_puts(seq, "Idx  Ethernet address     Mask     "
+				 "Vld Ports PF  VF                           "
+				 "Replication                                "
+				 "    P0 P1 P2 P3  ML\n");
+		else
+			seq_puts(seq, "Idx  Ethernet address     Mask     "
+				 "Vld Ports PF  VF              Replication"
+				 "	         P0 P1 P2 P3  ML\n");
+	} else {
+		u64 mask;
+		u8 addr[ETH_ALEN];
+		bool replicate;
+		unsigned int idx = (uintptr_t)v - 2;
+		u64 tcamy, tcamx, val;
+		u32 cls_lo, cls_hi, ctl;
+		u32 rplc[8] = {0};
+
+		if (chip_ver > CHELSIO_T5) {
+			/* CtlCmdType - 0: Read, 1: Write
+			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
+			 * CtlXYBitSel- 0: Y bit, 1: X bit
+			 */
+
+			/* Read tcamy */
+			ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+			if (idx < 256)
+				ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+			else
+				ctl |= CTLTCAMINDEX_V(idx - 256) |
+				       CTLTCAMSEL_V(1);
+			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+			tcamy = DMACH_G(val) << 32;
+			tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+			/* Read tcamx. Change the control param */
+			ctl |= CTLXYBITSEL_V(1);
+			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+			tcamx = DMACH_G(val) << 32;
+			tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+		} else {
+			tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+			tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+		}
+
+		cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+		cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
+
+		if (tcamx & tcamy) {
+			seq_printf(seq, "%3u         -\n", idx);
+			goto out;
+		}
+
+		rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+		if (chip_ver > CHELSIO_T5)
+			replicate = (cls_lo & T6_REPLICATE_F);
+		else
+			replicate = (cls_lo & REPLICATE_F);
+
+		if (replicate) {
+			struct fw_ldst_cmd ldst_cmd;
+			int ret;
+			struct fw_ldst_mps_rplc mps_rplc;
+			u32 ldst_addrspc;
+
+			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+			ldst_addrspc =
+				FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
+			ldst_cmd.op_to_addrspace =
+				htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+				      FW_CMD_REQUEST_F |
+				      FW_CMD_READ_F |
+				      ldst_addrspc);
+			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+			ldst_cmd.u.mps.rplc.fid_idx =
+				htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+				      FW_LDST_CMD_IDX_V(idx));
+			ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
+					 sizeof(ldst_cmd), &ldst_cmd);
+			if (ret)
+				dev_warn(adap->pdev_dev, "Can't read MPS "
+					 "replication map for idx %d: %d\n",
+					 idx, -ret);
+			else {
+				mps_rplc = ldst_cmd.u.mps.rplc;
+				rplc[0] = ntohl(mps_rplc.rplc31_0);
+				rplc[1] = ntohl(mps_rplc.rplc63_32);
+				rplc[2] = ntohl(mps_rplc.rplc95_64);
+				rplc[3] = ntohl(mps_rplc.rplc127_96);
+				if (adap->params.arch.mps_rplc_size > 128) {
+					rplc[4] = ntohl(mps_rplc.rplc159_128);
+					rplc[5] = ntohl(mps_rplc.rplc191_160);
+					rplc[6] = ntohl(mps_rplc.rplc223_192);
+					rplc[7] = ntohl(mps_rplc.rplc255_224);
+				}
+			}
+		}
+
+		tcamxy2valmask(tcamx, tcamy, addr, &mask);
+		if (chip_ver > CHELSIO_T5)
+			seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+				   "%012llx%3c   %#x%4u%4d",
+				   idx, addr[0], addr[1], addr[2], addr[3],
+				   addr[4], addr[5], (unsigned long long)mask,
+				   (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+				   PORTMAP_G(cls_hi),
+				   T6_PF_G(cls_lo),
+				   (cls_lo & T6_VF_VALID_F) ?
+				   T6_VF_G(cls_lo) : -1);
+		else
+			seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+				   "%012llx%3c   %#x%4u%4d",
+				   idx, addr[0], addr[1], addr[2], addr[3],
+				   addr[4], addr[5], (unsigned long long)mask,
+				   (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+				   PORTMAP_G(cls_hi),
+				   PF_G(cls_lo),
+				   (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+		if (replicate) {
+			if (adap->params.arch.mps_rplc_size > 128)
+				seq_printf(seq, " %08x %08x %08x %08x "
+					   "%08x %08x %08x %08x",
+					   rplc[7], rplc[6], rplc[5], rplc[4],
+					   rplc[3], rplc[2], rplc[1], rplc[0]);
+			else
+				seq_printf(seq, " %08x %08x %08x %08x",
+					   rplc[3], rplc[2], rplc[1], rplc[0]);
+		} else {
+			if (adap->params.arch.mps_rplc_size > 128)
+				seq_printf(seq, "%72c", ' ');
+			else
+				seq_printf(seq, "%36c", ' ');
+		}
+
+		if (chip_ver > CHELSIO_T5)
+			seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+				   T6_SRAM_PRIO0_G(cls_lo),
+				   T6_SRAM_PRIO1_G(cls_lo),
+				   T6_SRAM_PRIO2_G(cls_lo),
+				   T6_SRAM_PRIO3_G(cls_lo),
+				   (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
+		else
+			seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+				   SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+				   SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+				   (cls_lo >> MULTILISTEN0_S) & 0xf);
+	}
+out:	return 0;
+}
+
+static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct adapter *adap = seq->private;
+	int max_mac_addr = is_t4(adap->params.chip) ?
+				NUM_MPS_CLS_SRAM_L_INSTANCES :
+				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+	return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return mps_tcam_get_idx(seq, *pos);
+}
+
+static void mps_tcam_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mps_tcam_seq_ops = {
+	.start = mps_tcam_start,
+	.next  = mps_tcam_next,
+	.stop  = mps_tcam_stop,
+	.show  = mps_tcam_show
+};
+
+static int mps_tcam_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &mps_tcam_seq_ops);
+
+	if (!res) {
+		struct seq_file *seq = file->private_data;
+
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations mps_tcam_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mps_tcam_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/* Display various sensor information.
+ */
+static int sensors_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+	u32 param[7], val[7];
+	int ret;
+
+	/* Note that if the sensors haven't been initialized and turned on
+	 * we'll get values of 0, so treat those as "<unknown>" ...
+	 */
+	param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+		    FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
+	param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+		    FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+			      param, val);
+
+	if (ret < 0 || val[0] == 0)
+		seq_puts(seq, "Temperature: <unknown>\n");
+	else
+		seq_printf(seq, "Temperature: %dC\n", val[0]);
+
+	if (ret < 0 || val[1] == 0)
+		seq_puts(seq, "Core VDD:    <unknown>\n");
+	else
+		seq_printf(seq, "Core VDD:    %dmV\n", val[1]);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int clip_tbl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, clip_tbl_show, inode->i_private);
+}
+
+static const struct file_operations clip_tbl_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = clip_tbl_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release
+};
+#endif
+
+/*RSS Table.
+ */
+
+static int rss_show(struct seq_file *seq, void *v, int idx)
+{
+	u16 *entry = v;
+
+	seq_printf(seq, "%4d:  %4u  %4u  %4u  %4u  %4u  %4u  %4u  %4u\n",
+		   idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4],
+		   entry[5], entry[6], entry[7]);
+	return 0;
+}
+
+static int rss_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	struct seq_tab *p;
+	struct adapter *adap = inode->i_private;
+
+	p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+	if (!p)
+		return -ENOMEM;
+
+	ret = t4_read_rss(adap, (u16 *)p->data);
+	if (ret)
+		seq_release_private(inode, file);
+
+	return ret;
+}
+
+static const struct file_operations rss_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = rss_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+/* RSS Configuration.
+ */
+
+/* Small utility function to return the strings "yes" or "no" if the supplied
+ * argument is non-zero.
+ */
+static const char *yesno(int x)
+{
+	static const char *yes = "yes";
+	static const char *no = "no";
+
+	return x ? yes : no;
+}
+
+static int rss_config_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	static const char * const keymode[] = {
+		"global",
+		"global and per-VF scramble",
+		"per-PF and per-VF scramble",
+		"per-VF and per-VF scramble",
+	};
+	u32 rssconf;
+
+	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A);
+	seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf);
+	seq_printf(seq, "  Tnl4TupEnIpv6: %3s\n", yesno(rssconf &
+							TNL4TUPENIPV6_F));
+	seq_printf(seq, "  Tnl2TupEnIpv6: %3s\n", yesno(rssconf &
+							TNL2TUPENIPV6_F));
+	seq_printf(seq, "  Tnl4TupEnIpv4: %3s\n", yesno(rssconf &
+							TNL4TUPENIPV4_F));
+	seq_printf(seq, "  Tnl2TupEnIpv4: %3s\n", yesno(rssconf &
+							TNL2TUPENIPV4_F));
+	seq_printf(seq, "  TnlTcpSel:     %3s\n", yesno(rssconf & TNLTCPSEL_F));
+	seq_printf(seq, "  TnlIp6Sel:     %3s\n", yesno(rssconf & TNLIP6SEL_F));
+	seq_printf(seq, "  TnlVrtSel:     %3s\n", yesno(rssconf & TNLVRTSEL_F));
+	seq_printf(seq, "  TnlMapEn:      %3s\n", yesno(rssconf & TNLMAPEN_F));
+	seq_printf(seq, "  OfdHashSave:   %3s\n", yesno(rssconf &
+							OFDHASHSAVE_F));
+	seq_printf(seq, "  OfdVrtSel:     %3s\n", yesno(rssconf & OFDVRTSEL_F));
+	seq_printf(seq, "  OfdMapEn:      %3s\n", yesno(rssconf & OFDMAPEN_F));
+	seq_printf(seq, "  OfdLkpEn:      %3s\n", yesno(rssconf & OFDLKPEN_F));
+	seq_printf(seq, "  Syn4TupEnIpv6: %3s\n", yesno(rssconf &
+							SYN4TUPENIPV6_F));
+	seq_printf(seq, "  Syn2TupEnIpv6: %3s\n", yesno(rssconf &
+							SYN2TUPENIPV6_F));
+	seq_printf(seq, "  Syn4TupEnIpv4: %3s\n", yesno(rssconf &
+							SYN4TUPENIPV4_F));
+	seq_printf(seq, "  Syn2TupEnIpv4: %3s\n", yesno(rssconf &
+							SYN2TUPENIPV4_F));
+	seq_printf(seq, "  Syn4TupEnIpv6: %3s\n", yesno(rssconf &
+							SYN4TUPENIPV6_F));
+	seq_printf(seq, "  SynIp6Sel:     %3s\n", yesno(rssconf & SYNIP6SEL_F));
+	seq_printf(seq, "  SynVrt6Sel:    %3s\n", yesno(rssconf & SYNVRTSEL_F));
+	seq_printf(seq, "  SynMapEn:      %3s\n", yesno(rssconf & SYNMAPEN_F));
+	seq_printf(seq, "  SynLkpEn:      %3s\n", yesno(rssconf & SYNLKPEN_F));
+	seq_printf(seq, "  ChnEn:         %3s\n", yesno(rssconf &
+							CHANNELENABLE_F));
+	seq_printf(seq, "  PrtEn:         %3s\n", yesno(rssconf &
+							PORTENABLE_F));
+	seq_printf(seq, "  TnlAllLkp:     %3s\n", yesno(rssconf &
+							TNLALLLOOKUP_F));
+	seq_printf(seq, "  VrtEn:         %3s\n", yesno(rssconf &
+							VIRTENABLE_F));
+	seq_printf(seq, "  CngEn:         %3s\n", yesno(rssconf &
+							CONGESTIONENABLE_F));
+	seq_printf(seq, "  HashToeplitz:  %3s\n", yesno(rssconf &
+							HASHTOEPLITZ_F));
+	seq_printf(seq, "  Udp4En:        %3s\n", yesno(rssconf & UDPENABLE_F));
+	seq_printf(seq, "  Disable:       %3s\n", yesno(rssconf & DISABLE_F));
+
+	seq_puts(seq, "\n");
+
+	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A);
+	seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf);
+	seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
+	seq_printf(seq, "  MaskFilter:    %3d\n", MASKFILTER_G(rssconf));
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
+		seq_printf(seq, "  HashAll:     %3s\n",
+			   yesno(rssconf & HASHALL_F));
+		seq_printf(seq, "  HashEth:     %3s\n",
+			   yesno(rssconf & HASHETH_F));
+	}
+	seq_printf(seq, "  UseWireCh:     %3s\n", yesno(rssconf & USEWIRECH_F));
+
+	seq_puts(seq, "\n");
+
+	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A);
+	seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf);
+	seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
+	seq_printf(seq, "  RRCplMapEn:    %3s\n", yesno(rssconf &
+							RRCPLMAPEN_F));
+	seq_printf(seq, "  RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf));
+
+	seq_puts(seq, "\n");
+
+	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A);
+	seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf);
+	seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
+	seq_printf(seq, "  UseWireCh:     %3s\n", yesno(rssconf & USEWIRECH_F));
+
+	seq_puts(seq, "\n");
+
+	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
+	seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf);
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
+		seq_printf(seq, "  KeyWrAddrX:     %3d\n",
+			   KEYWRADDRX_G(rssconf));
+		seq_printf(seq, "  KeyExtend:      %3s\n",
+			   yesno(rssconf & KEYEXTEND_F));
+	}
+	seq_printf(seq, "  VfRdRg:        %3s\n", yesno(rssconf & VFRDRG_F));
+	seq_printf(seq, "  VfRdEn:        %3s\n", yesno(rssconf & VFRDEN_F));
+	seq_printf(seq, "  VfPerrEn:      %3s\n", yesno(rssconf & VFPERREN_F));
+	seq_printf(seq, "  KeyPerrEn:     %3s\n", yesno(rssconf & KEYPERREN_F));
+	seq_printf(seq, "  DisVfVlan:     %3s\n", yesno(rssconf &
+							DISABLEVLAN_F));
+	seq_printf(seq, "  EnUpSwt:       %3s\n", yesno(rssconf & ENABLEUP0_F));
+	seq_printf(seq, "  HashDelay:     %3d\n", HASHDELAY_G(rssconf));
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+		seq_printf(seq, "  VfWrAddr:      %3d\n", VFWRADDR_G(rssconf));
+	else
+		seq_printf(seq, "  VfWrAddr:      %3d\n",
+			   T6_VFWRADDR_G(rssconf));
+	seq_printf(seq, "  KeyMode:       %s\n", keymode[KEYMODE_G(rssconf)]);
+	seq_printf(seq, "  VfWrEn:        %3s\n", yesno(rssconf & VFWREN_F));
+	seq_printf(seq, "  KeyWrEn:       %3s\n", yesno(rssconf & KEYWREN_F));
+	seq_printf(seq, "  KeyWrAddr:     %3d\n", KEYWRADDR_G(rssconf));
+
+	seq_puts(seq, "\n");
+
+	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A);
+	seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf);
+	seq_printf(seq, "  ChnCount3:     %3s\n", yesno(rssconf & CHNCOUNT3_F));
+	seq_printf(seq, "  ChnCount2:     %3s\n", yesno(rssconf & CHNCOUNT2_F));
+	seq_printf(seq, "  ChnCount1:     %3s\n", yesno(rssconf & CHNCOUNT1_F));
+	seq_printf(seq, "  ChnCount0:     %3s\n", yesno(rssconf & CHNCOUNT0_F));
+	seq_printf(seq, "  ChnUndFlow3:   %3s\n", yesno(rssconf &
+							CHNUNDFLOW3_F));
+	seq_printf(seq, "  ChnUndFlow2:   %3s\n", yesno(rssconf &
+							CHNUNDFLOW2_F));
+	seq_printf(seq, "  ChnUndFlow1:   %3s\n", yesno(rssconf &
+							CHNUNDFLOW1_F));
+	seq_printf(seq, "  ChnUndFlow0:   %3s\n", yesno(rssconf &
+							CHNUNDFLOW0_F));
+	seq_printf(seq, "  RstChn3:       %3s\n", yesno(rssconf & RSTCHN3_F));
+	seq_printf(seq, "  RstChn2:       %3s\n", yesno(rssconf & RSTCHN2_F));
+	seq_printf(seq, "  RstChn1:       %3s\n", yesno(rssconf & RSTCHN1_F));
+	seq_printf(seq, "  RstChn0:       %3s\n", yesno(rssconf & RSTCHN0_F));
+	seq_printf(seq, "  UpdVld:        %3s\n", yesno(rssconf & UPDVLD_F));
+	seq_printf(seq, "  Xoff:          %3s\n", yesno(rssconf & XOFF_F));
+	seq_printf(seq, "  UpdChn3:       %3s\n", yesno(rssconf & UPDCHN3_F));
+	seq_printf(seq, "  UpdChn2:       %3s\n", yesno(rssconf & UPDCHN2_F));
+	seq_printf(seq, "  UpdChn1:       %3s\n", yesno(rssconf & UPDCHN1_F));
+	seq_printf(seq, "  UpdChn0:       %3s\n", yesno(rssconf & UPDCHN0_F));
+	seq_printf(seq, "  Queue:         %3d\n", QUEUE_G(rssconf));
+
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
+
+/* RSS Secret Key.
+ */
+
+static int rss_key_show(struct seq_file *seq, void *v)
+{
+	u32 key[10];
+
+	t4_read_rss_key(seq->private, key);
+	seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+		   key[9], key[8], key[7], key[6], key[5], key[4], key[3],
+		   key[2], key[1], key[0]);
+	return 0;
+}
+
+static int rss_key_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rss_key_show, inode->i_private);
+}
+
+static ssize_t rss_key_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	int i, j;
+	u32 key[10];
+	char s[100], *p;
+	struct adapter *adap = file_inode(file)->i_private;
+
+	if (count > sizeof(s) - 1)
+		return -EINVAL;
+	if (copy_from_user(s, buf, count))
+		return -EFAULT;
+	for (i = count; i > 0 && isspace(s[i - 1]); i--)
+		;
+	s[i] = '\0';
+
+	for (p = s, i = 9; i >= 0; i--) {
+		key[i] = 0;
+		for (j = 0; j < 8; j++, p++) {
+			if (!isxdigit(*p))
+				return -EINVAL;
+			key[i] = (key[i] << 4) | hex2val(*p);
+		}
+	}
+
+	t4_write_rss_key(adap, key, -1);
+	return count;
+}
+
+static const struct file_operations rss_key_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = rss_key_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.write   = rss_key_write
+};
+
+/* PF RSS Configuration.
+ */
+
+struct rss_pf_conf {
+	u32 rss_pf_map;
+	u32 rss_pf_mask;
+	u32 rss_pf_config;
+};
+
+static int rss_pf_config_show(struct seq_file *seq, void *v, int idx)
+{
+	struct rss_pf_conf *pfconf;
+
+	if (v == SEQ_START_TOKEN) {
+		/* use the 0th entry to dump the PF Map Index Size */
+		pfconf = seq->private + offsetof(struct seq_tab, data);
+		seq_printf(seq, "PF Map Index Size = %d\n\n",
+			   LKPIDXSIZE_G(pfconf->rss_pf_map));
+
+		seq_puts(seq, "     RSS              PF   VF    Hash Tuple Enable         Default\n");
+		seq_puts(seq, "     Enable       IPF Mask Mask  IPv6      IPv4      UDP   Queue\n");
+		seq_puts(seq, " PF  Map Chn Prt  Map Size Size  Four Two  Four Two  Four  Ch1  Ch0\n");
+	} else {
+		#define G_PFnLKPIDX(map, n) \
+			(((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
+		#define G_PFnMSKSIZE(mask, n) \
+			(((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M)
+
+		pfconf = v;
+		seq_printf(seq, "%3d  %3s %3s %3s  %3d  %3d  %3d   %3s %3s   %3s %3s   %3s  %3d  %3d\n",
+			   idx,
+			   yesno(pfconf->rss_pf_config & MAPENABLE_F),
+			   yesno(pfconf->rss_pf_config & CHNENABLE_F),
+			   yesno(pfconf->rss_pf_config & PRTENABLE_F),
+			   G_PFnLKPIDX(pfconf->rss_pf_map, idx),
+			   G_PFnMSKSIZE(pfconf->rss_pf_mask, idx),
+			   IVFWIDTH_G(pfconf->rss_pf_config),
+			   yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F),
+			   yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F),
+			   yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F),
+			   yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F),
+			   yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F),
+			   CH1DEFAULTQUEUE_G(pfconf->rss_pf_config),
+			   CH0DEFAULTQUEUE_G(pfconf->rss_pf_config));
+
+		#undef G_PFnLKPIDX
+		#undef G_PFnMSKSIZE
+	}
+	return 0;
+}
+
+static int rss_pf_config_open(struct inode *inode, struct file *file)
+{
+	struct adapter *adapter = inode->i_private;
+	struct seq_tab *p;
+	u32 rss_pf_map, rss_pf_mask;
+	struct rss_pf_conf *pfconf;
+	int pf;
+
+	p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
+	if (!p)
+		return -ENOMEM;
+
+	pfconf = (struct rss_pf_conf *)p->data;
+	rss_pf_map = t4_read_rss_pf_map(adapter);
+	rss_pf_mask = t4_read_rss_pf_mask(adapter);
+	for (pf = 0; pf < 8; pf++) {
+		pfconf[pf].rss_pf_map = rss_pf_map;
+		pfconf[pf].rss_pf_mask = rss_pf_mask;
+		t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
+	}
+	return 0;
+}
+
+static const struct file_operations rss_pf_config_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = rss_pf_config_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+/* VF RSS Configuration.
+ */
+
+struct rss_vf_conf {
+	u32 rss_vf_vfl;
+	u32 rss_vf_vfh;
+};
+
+static int rss_vf_config_show(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "     RSS                     Hash Tuple Enable\n");
+		seq_puts(seq, "     Enable   IVF  Dis  Enb  IPv6      IPv4      UDP    Def  Secret Key\n");
+		seq_puts(seq, " VF  Chn Prt  Map  VLAN  uP  Four Two  Four Two  Four   Que  Idx       Hash\n");
+	} else {
+		struct rss_vf_conf *vfconf = v;
+
+		seq_printf(seq, "%3d  %3s %3s  %3d   %3s %3s   %3s %3s   %3s  %3s   %3s  %4d  %3d %#10x\n",
+			   idx,
+			   yesno(vfconf->rss_vf_vfh & VFCHNEN_F),
+			   yesno(vfconf->rss_vf_vfh & VFPRTEN_F),
+			   VFLKPIDX_G(vfconf->rss_vf_vfh),
+			   yesno(vfconf->rss_vf_vfh & VFVLNEX_F),
+			   yesno(vfconf->rss_vf_vfh & VFUPEN_F),
+			   yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
+			   yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F),
+			   yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
+			   yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F),
+			   yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F),
+			   DEFAULTQUEUE_G(vfconf->rss_vf_vfh),
+			   KEYINDEX_G(vfconf->rss_vf_vfh),
+			   vfconf->rss_vf_vfl);
+	}
+	return 0;
+}
+
+static int rss_vf_config_open(struct inode *inode, struct file *file)
+{
+	struct adapter *adapter = inode->i_private;
+	struct seq_tab *p;
+	struct rss_vf_conf *vfconf;
+	int vf, vfcount = adapter->params.arch.vfcount;
+
+	p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
+	if (!p)
+		return -ENOMEM;
+
+	vfconf = (struct rss_vf_conf *)p->data;
+	for (vf = 0; vf < vfcount; vf++) {
+		t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
+				      &vfconf[vf].rss_vf_vfh);
+	}
+	return 0;
+}
+
+static const struct file_operations rss_vf_config_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = rss_vf_config_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private
+};
+
+/**
+ * ethqset2pinfo - return port_info of an Ethernet Queue Set
+ * @adap: the adapter
+ * @qset: Ethernet Queue Set
+ */
+static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
+{
+	int pidx;
+
+	for_each_port(adap, pidx) {
+		struct port_info *pi = adap2pinfo(adap, pidx);
+
+		if (qset >= pi->first_qset &&
+		    qset < pi->first_qset + pi->nqsets)
+			return pi;
+	}
+
+	/* should never happen! */
+	BUG_ON(1);
+	return NULL;
+}
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+	int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+	int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+	int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
+	int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+	int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+	int i, r = (uintptr_t)v - 1;
+	int iscsi_idx = r - eth_entries;
+	int rdma_idx = iscsi_idx - iscsi_entries;
+	int ciq_idx = rdma_idx - rdma_entries;
+	int ctrl_idx =  ciq_idx - ciq_entries;
+	int fq_idx =  ctrl_idx - ctrl_entries;
+
+	if (r)
+		seq_putc(seq, '\n');
+
+#define S3(fmt_spec, s, v) \
+do { \
+	seq_printf(seq, "%-12s", s); \
+	for (i = 0; i < n; ++i) \
+		seq_printf(seq, " %16" fmt_spec, v); \
+		seq_putc(seq, '\n'); \
+} while (0)
+#define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
+#define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
+#define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
+
+	if (r < eth_entries) {
+		int base_qset = r * 4;
+		const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
+		const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
+		int n = min(4, adap->sge.ethqsets - 4 * r);
+
+		S("QType:", "Ethernet");
+		S("Interface:",
+		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+		T("TxQ ID:", q.cntxt_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ CIDX:", q.cidx);
+		T("TxQ PIDX:", q.pidx);
+#ifdef CONFIG_CHELSIO_T4_DCB
+		T("DCB Prio:", dcb_prio);
+		S3("u", "DCB PGID:",
+		   (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >>
+		    4*(7-tx[i].dcb_prio)) & 0xf);
+		S3("u", "DCB PFC:",
+		   (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >>
+		    1*(7-tx[i].dcb_prio)) & 0x1);
+#endif
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",
+		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxCSO:", stats.rx_cso);
+		RL("VLANxtract:", stats.vlan_ex);
+		RL("LROmerged:", stats.lro_merged);
+		RL("LROpackets:", stats.lro_pkts);
+		RL("RxDrops:", stats.rx_drops);
+		TL("TSO:", tso);
+		TL("TxCSO:", tx_cso);
+		TL("VLANins:", vlan_ins);
+		TL("TxQFull:", q.stops);
+		TL("TxQRestarts:", q.restarts);
+		TL("TxMapErr:", mapping_err);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
+	} else if (iscsi_idx < iscsi_entries) {
+		const struct sge_ofld_rxq *rx =
+			&adap->sge.ofldrxq[iscsi_idx * 4];
+		const struct sge_ofld_txq *tx =
+			&adap->sge.ofldtxq[iscsi_idx * 4];
+		int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
+
+		S("QType:", "iSCSI");
+		T("TxQ ID:", q.cntxt_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ CIDX:", q.cidx);
+		T("TxQ PIDX:", q.pidx);
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",
+		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxImmPkts:", stats.imm);
+		RL("RxNoMem:", stats.nomem);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
+	} else if (rdma_idx < rdma_entries) {
+		const struct sge_ofld_rxq *rx =
+				&adap->sge.rdmarxq[rdma_idx * 4];
+		int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
+
+		S("QType:", "RDMA-CPL");
+		S("Interface:",
+		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",
+		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		R("FL ID:", fl.cntxt_id);
+		R("FL size:", fl.size - 8);
+		R("FL pend:", fl.pend_cred);
+		R("FL avail:", fl.avail);
+		R("FL PIDX:", fl.pidx);
+		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxImmPkts:", stats.imm);
+		RL("RxNoMem:", stats.nomem);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
+	} else if (ciq_idx < ciq_entries) {
+		const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
+		int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
+
+		S("QType:", "RDMA-CIQ");
+		S("Interface:",
+		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		R("RspQ CIDX:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+		S3("u", "Intr pktcnt:",
+		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		RL("RxAN:", stats.an);
+		RL("RxNoMem:", stats.nomem);
+
+	} else if (ctrl_idx < ctrl_entries) {
+		const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
+		int n = min(4, adap->params.nports - 4 * ctrl_idx);
+
+		S("QType:", "Control");
+		T("TxQ ID:", q.cntxt_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ CIDX:", q.cidx);
+		T("TxQ PIDX:", q.pidx);
+		TL("TxQFull:", q.stops);
+		TL("TxQRestarts:", q.restarts);
+	} else if (fq_idx == 0) {
+		const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+		seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size);
+		seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len);
+		seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+			   qtimer_val(adap, evtq));
+		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+			   adap->sge.counter_val[evtq->pktcnt_idx]);
+	}
+#undef R
+#undef RL
+#undef T
+#undef TL
+#undef S
+#undef R3
+#undef T3
+#undef S3
+	return 0;
+}
+
+static int sge_queue_entries(const struct adapter *adap)
+{
+	return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+	       DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+	       DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
+	       DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+	       DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+	int entries = sge_queue_entries(seq->private);
+
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int entries = sge_queue_entries(seq->private);
+
+	++*pos;
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+	.start = sge_queue_start,
+	.next  = sge_queue_next,
+	.stop  = sge_queue_stop,
+	.show  = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &sge_qinfo_seq_ops);
+
+	if (!res) {
+		struct seq_file *seq = file->private_data;
+
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = sge_qinfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+int mem_open(struct inode *inode, struct file *file)
+{
+	unsigned int mem;
+	struct adapter *adap;
+
+	file->private_data = inode->i_private;
+
+	mem = (uintptr_t)file->private_data & 0x3;
+	adap = file->private_data - mem;
+
+	(void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
+
+	return 0;
+}
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+			loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	loff_t avail = file_inode(file)->i_size;
+	unsigned int mem = (uintptr_t)file->private_data & 3;
+	struct adapter *adap = file->private_data - mem;
+	__be32 *data;
+	int ret;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= avail)
+		return 0;
+	if (count > avail - pos)
+		count = avail - pos;
+
+	data = t4_alloc_mem(count);
+	if (!data)
+		return -ENOMEM;
+
+	spin_lock(&adap->win0_lock);
+	ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
+	spin_unlock(&adap->win0_lock);
+	if (ret) {
+		t4_free_mem(data);
+		return ret;
+	}
+	ret = copy_to_user(buf, data, count);
+
+	t4_free_mem(data);
+	if (ret)
+		return -EFAULT;
+
+	*ppos = pos + count;
+	return count;
+}
+static const struct file_operations mem_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = simple_open,
+	.read    = mem_read,
+	.llseek  = default_llseek,
+};
+
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+	const struct tid_info *t = &adap->tids;
+	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+		unsigned int sb;
+
+		if (chip <= CHELSIO_T5)
+			sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+		else
+			sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+		if (sb) {
+			seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u/%u\n",
+				   atomic_read(&t->tids_in_use),
+				   atomic_read(&t->hash_tids_in_use));
+		} else if (adap->flags & FW_OFLD_CONN) {
+			seq_printf(seq, "TID range: %u..%u/%u..%u",
+				   t->aftid_base,
+				   t->aftid_end,
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u/%u\n",
+				   atomic_read(&t->tids_in_use),
+				   atomic_read(&t->hash_tids_in_use));
+		} else {
+			seq_printf(seq, "TID range: %u..%u",
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u\n",
+				   atomic_read(&t->hash_tids_in_use));
+		}
+	} else if (t->ntids) {
+		seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+		seq_printf(seq, ", in use: %u\n",
+			   atomic_read(&t->tids_in_use));
+	}
+
+	if (t->nstids)
+		seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+			   (!t->stid_base &&
+			   (chip <= CHELSIO_T5)) ?
+			   t->stid_base + 1 : t->stid_base,
+			   t->stid_base + t->nstids - 1, t->stids_in_use);
+	if (t->natids)
+		seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+			   t->natids - 1, t->atids_in_use);
+	seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+		   t->ftid_base + t->nftids - 1);
+	if (t->nsftids)
+		seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+			   t->sftid_base, t->sftid_base + t->nsftids - 2,
+			   t->sftids_in_use);
+	if (t->ntids)
+		seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
+static void add_debugfs_mem(struct adapter *adap, const char *name,
+			    unsigned int idx, unsigned int size_mb)
+{
+	debugfs_create_file_size(name, S_IRUSR, adap->debugfs_root,
+				 (void *)adap + idx, &mem_debugfs_fops,
+				 size_mb << 20);
+}
+
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+			       size_t count, loff_t *ppos)
+{
+	int len;
+	const struct adapter *adap = filp->private_data;
+	char *buf;
+	ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+			adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len = snprintf(buf, size - 1, "%*pb\n",
+		       adap->sge.egr_sz, adap->sge.blocked_fl);
+	len += sprintf(buf + len, "\n");
+	size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	t4_free_mem(buf);
+	return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	int err;
+	unsigned long *t;
+	struct adapter *adap = filp->private_data;
+
+	t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+	if (err)
+		return err;
+
+	bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+	t4_free_mem(t);
+	return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+	.owner   = THIS_MODULE,
+	.open    = blocked_fl_open,
+	.read    = blocked_fl_read,
+	.write   = blocked_fl_write,
+	.llseek  = generic_file_llseek,
+};
+
+struct mem_desc {
+	unsigned int base;
+	unsigned int limit;
+	unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+	return ((const struct mem_desc *)a)->base -
+	       ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+			    unsigned int from, unsigned int to)
+{
+	char buf[40];
+
+	string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+			sizeof(buf));
+	seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+	static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+					"MC0:", "MC1:"};
+	static const char * const region[] = {
+		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+		"RQUDP region:", "PBL region:", "TXPBL region:",
+		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+		"On-chip queues:"
+	};
+
+	int i, n;
+	u32 lo, hi, used, alloc;
+	struct mem_desc avail[4];
+	struct mem_desc mem[ARRAY_SIZE(region) + 3];      /* up to 3 holes */
+	struct mem_desc *md = mem;
+	struct adapter *adap = seq->private;
+
+	for (i = 0; i < ARRAY_SIZE(mem); i++) {
+		mem[i].limit = 0;
+		mem[i].idx = i;
+	}
+
+	/* Find and sort the populated memory ranges */
+	i = 0;
+	lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+	if (lo & EDRAM0_ENABLE_F) {
+		hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+		avail[i].base = EDRAM0_BASE_G(hi) << 20;
+		avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+		avail[i].idx = 0;
+		i++;
+	}
+	if (lo & EDRAM1_ENABLE_F) {
+		hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+		avail[i].base = EDRAM1_BASE_G(hi) << 20;
+		avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+		avail[i].idx = 1;
+		i++;
+	}
+
+	if (is_t5(adap->params.chip)) {
+		if (lo & EXT_MEM0_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+			avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+			avail[i].idx = 3;
+			i++;
+		}
+		if (lo & EXT_MEM1_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+			avail[i].idx = 4;
+			i++;
+		}
+	} else {
+		if (lo & EXT_MEM_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+			avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+			avail[i].idx = 2;
+			i++;
+		}
+	}
+	if (!i)                                    /* no memory available */
+		return 0;
+	sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+	(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+	/* the next few have explicit upper bounds */
+	md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+		    PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+	md++;
+
+	md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+		    PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+	md++;
+
+	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+			hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+			md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+		 } else {
+			hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+			md->base = t4_read_reg(adap,
+					       LE_DB_HASH_TBL_BASE_ADDR_A);
+		}
+		md->limit = 0;
+	} else {
+		md->base = 0;
+		md->idx = ARRAY_SIZE(region);  /* hide it */
+	}
+	md++;
+
+#define ulp_region(reg) do { \
+	md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+	(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+	ulp_region(RX_ISCSI);
+	ulp_region(RX_TDDP);
+	ulp_region(TX_TPT);
+	ulp_region(RX_STAG);
+	ulp_region(RX_RQ);
+	ulp_region(RX_RQUDP);
+	ulp_region(RX_PBL);
+	ulp_region(TX_PBL);
+#undef ulp_region
+	md->base = 0;
+	md->idx = ARRAY_SIZE(region);
+	if (!is_t4(adap->params.chip)) {
+		u32 size = 0;
+		u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+		u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+		if (is_t5(adap->params.chip)) {
+			if (sge_ctrl & VFIFO_ENABLE_F)
+				size = DBVFIFO_SIZE_G(fifo_size);
+		} else {
+			size = T6_DBVFIFO_SIZE_G(fifo_size);
+		}
+
+		if (size) {
+			md->base = BASEADDR_G(t4_read_reg(adap,
+					SGE_DBVFIFO_BADDR_A));
+			md->limit = md->base + (size << 2) - 1;
+		}
+	}
+
+	md++;
+
+	md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+	md->limit = 0;
+	md++;
+	md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+	md->limit = 0;
+	md++;
+
+	md->base = adap->vres.ocq.start;
+	if (adap->vres.ocq.size)
+		md->limit = md->base + adap->vres.ocq.size - 1;
+	else
+		md->idx = ARRAY_SIZE(region);  /* hide it */
+	md++;
+
+	/* add any address-space holes, there can be up to 3 */
+	for (n = 0; n < i - 1; n++)
+		if (avail[n].limit < avail[n + 1].base)
+			(md++)->base = avail[n].limit;
+	if (avail[n].limit)
+		(md++)->base = avail[n].limit;
+
+	n = md - mem;
+	sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+	for (lo = 0; lo < i; lo++)
+		mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+				avail[lo].limit - 1);
+
+	seq_putc(seq, '\n');
+	for (i = 0; i < n; i++) {
+		if (mem[i].idx >= ARRAY_SIZE(region))
+			continue;                        /* skip holes */
+		if (!mem[i].limit)
+			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+		mem_region_show(seq, region[mem[i].idx], mem[i].base,
+				mem[i].limit);
+	}
+
+	seq_putc(seq, '\n');
+	lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+	hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+	mem_region_show(seq, "uP RAM:", lo, hi);
+
+	lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+	hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+	mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+	lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+	seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+		   PMRXMAXPAGE_G(lo),
+		   t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+		   (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+	lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+	hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+	seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+		   PMTXMAXPAGE_G(lo),
+		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+		   hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+	seq_printf(seq, "%u p-structs\n\n",
+		   t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+	for (i = 0; i < 4; i++) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+		if (is_t5(adap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		/* For T6 these are MAC buffer groups */
+		seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+			   i, used, alloc);
+	}
+	for (i = 0; i < adap->params.arch.nchan; i++) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(adap,
+					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+		if (is_t5(adap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		/* For T6 these are MAC buffer groups */
+		seq_printf(seq,
+			   "Loopback %d using %u pages out of %u allocated\n",
+			   i, used, alloc);
+	}
+	return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+	.owner   = THIS_MODULE,
+	.open    = meminfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+/* Add an array of Debug FS files.
+ */
+void add_debugfs_files(struct adapter *adap,
+		       struct t4_debugfs_entry *files,
+		       unsigned int nfiles)
+{
+	int i;
+
+	/* debugfs support is best effort */
+	for (i = 0; i < nfiles; i++)
+		debugfs_create_file(files[i].name, files[i].mode,
+				    adap->debugfs_root,
+				    (void *)adap + files[i].data,
+				    files[i].ops);
+}
+
+int t4_setup_debugfs(struct adapter *adap)
+{
+	int i;
+	u32 size = 0;
+	struct dentry *de;
+
+	static struct t4_debugfs_entry t4_debugfs_files[] = {
+		{ "cim_la", &cim_la_fops, S_IRUSR, 0 },
+		{ "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 },
+		{ "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 },
+		{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
+		{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
+		{ "devlog", &devlog_fops, S_IRUSR, 0 },
+		{ "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+		{ "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+		{ "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
+		{ "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 },
+		{ "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
+		{ "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
+		{ "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
+		{ "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+		{ "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+		{ "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
+		{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
+		{ "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
+		{ "rss", &rss_debugfs_fops, S_IRUSR, 0 },
+		{ "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 },
+		{ "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 },
+		{ "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 },
+		{ "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 },
+		{ "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 },
+		{ "ibq_tp0",  &cim_ibq_fops, S_IRUSR, 0 },
+		{ "ibq_tp1",  &cim_ibq_fops, S_IRUSR, 1 },
+		{ "ibq_ulp",  &cim_ibq_fops, S_IRUSR, 2 },
+		{ "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 },
+		{ "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 },
+		{ "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 },
+		{ "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 },
+		{ "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 },
+		{ "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 },
+		{ "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 },
+		{ "obq_sge",  &cim_obq_fops, S_IRUSR, 4 },
+		{ "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 },
+		{ "tp_la", &tp_la_fops, S_IRUSR, 0 },
+		{ "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
+		{ "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
+		{ "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+		{ "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 },
+		{ "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
+#if IS_ENABLED(CONFIG_IPV6)
+		{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
+#endif
+		{ "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
+		{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "meminfo", &meminfo_fops, S_IRUSR, 0 },
+	};
+
+	/* Debug FS nodes common to all T5 and later adapters.
+	 */
+	static struct t4_debugfs_entry t5_debugfs_files[] = {
+		{ "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 },
+		{ "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 },
+	};
+
+	add_debugfs_files(adap,
+			  t4_debugfs_files,
+			  ARRAY_SIZE(t4_debugfs_files));
+	if (!is_t4(adap->params.chip))
+		add_debugfs_files(adap,
+				  t5_debugfs_files,
+				  ARRAY_SIZE(t5_debugfs_files));
+
+	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+	if (i & EDRAM0_ENABLE_F) {
+		size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+		add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size));
+	}
+	if (i & EDRAM1_ENABLE_F) {
+		size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+		add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
+	}
+	if (is_t5(adap->params.chip)) {
+		if (i & EXT_MEM0_ENABLE_F) {
+			size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+			add_debugfs_mem(adap, "mc0", MEM_MC0,
+					EXT_MEM0_SIZE_G(size));
+		}
+		if (i & EXT_MEM1_ENABLE_F) {
+			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			add_debugfs_mem(adap, "mc1", MEM_MC1,
+					EXT_MEM1_SIZE_G(size));
+		}
+	} else {
+		if (i & EXT_MEM_ENABLE_F) {
+			size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+			add_debugfs_mem(adap, "mc", MEM_MC,
+					EXT_MEM_SIZE_G(size));
+		}
+	}
+
+	de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
+				      &flash_debugfs_fops, adap->params.sf_size);
+	debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+			    adap->debugfs_root, &adap->use_bd);
+	debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+			    adap->debugfs_root, &adap->trace_rss);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
new file mode 100644
index 0000000..23f43a0
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -0,0 +1,83 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_DEBUGFS_H
+#define __CXGB4_DEBUGFS_H
+
+#include <linux/export.h>
+
+#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+	return single_open(file, name##_show, inode->i_private); \
+} \
+static const struct file_operations name##_debugfs_fops = { \
+	.owner   = THIS_MODULE, \
+	.open    = name##_open, \
+	.read    = seq_read, \
+	.llseek  = seq_lseek, \
+	.release = single_release \
+}
+
+struct t4_debugfs_entry {
+	const char *name;
+	const struct file_operations *ops;
+	umode_t mode;
+	unsigned char data;
+};
+
+struct seq_tab {
+	int (*show)(struct seq_file *seq, void *v, int idx);
+	unsigned int rows;        /* # of entries */
+	unsigned char width;      /* size in bytes of each entry */
+	unsigned char skip_first; /* whether the first line is a header */
+	char data[0];             /* the table data */
+};
+
+static inline unsigned int hex2val(char c)
+{
+	return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+			     unsigned int width, unsigned int have_header,
+			     int (*show)(struct seq_file *seq, void *v, int i));
+
+int t4_setup_debugfs(struct adapter *adap);
+void add_debugfs_files(struct adapter *adap,
+		       struct t4_debugfs_entry *files,
+		       unsigned int nfiles);
+int mem_open(struct inode *inode, struct file *file);
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
new file mode 100644
index 0000000..a077f94
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -0,0 +1,1129 @@
+/*
+ *  Copyright (C) 2013-2015 Chelsio Communications.  All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mdio.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	netdev2adap(dev)->msg_enable = val;
+}
+
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+	"tx_octets_ok		",
+	"tx_frames_ok		",
+	"tx_broadcast_frames	",
+	"tx_multicast_frames	",
+	"tx_unicast_frames	",
+	"tx_error_frames	",
+
+	"tx_frames_64		",
+	"tx_frames_65_to_127	",
+	"tx_frames_128_to_255	",
+	"tx_frames_256_to_511	",
+	"tx_frames_512_to_1023	",
+	"tx_frames_1024_to_1518	",
+	"tx_frames_1519_to_max	",
+
+	"tx_frames_dropped	",
+	"tx_pause_frames	",
+	"tx_ppp0_frames		",
+	"tx_ppp1_frames		",
+	"tx_ppp2_frames		",
+	"tx_ppp3_frames		",
+	"tx_ppp4_frames		",
+	"tx_ppp5_frames		",
+	"tx_ppp6_frames		",
+	"tx_ppp7_frames		",
+
+	"rx_octets_ok		",
+	"rx_frames_ok		",
+	"rx_broadcast_frames	",
+	"rx_multicast_frames	",
+	"rx_unicast_frames	",
+
+	"rx_frames_too_long	",
+	"rx_jabber_errors	",
+	"rx_fcs_errors		",
+	"rx_length_errors	",
+	"rx_symbol_errors	",
+	"rx_runt_frames		",
+
+	"rx_frames_64		",
+	"rx_frames_65_to_127	",
+	"rx_frames_128_to_255	",
+	"rx_frames_256_to_511	",
+	"rx_frames_512_to_1023	",
+	"rx_frames_1024_to_1518	",
+	"rx_frames_1519_to_max	",
+
+	"rx_pause_frames	",
+	"rx_ppp0_frames		",
+	"rx_ppp1_frames		",
+	"rx_ppp2_frames		",
+	"rx_ppp3_frames		",
+	"rx_ppp4_frames		",
+	"rx_ppp5_frames		",
+	"rx_ppp6_frames		",
+	"rx_ppp7_frames		",
+
+	"rx_bg0_frames_dropped	",
+	"rx_bg1_frames_dropped	",
+	"rx_bg2_frames_dropped	",
+	"rx_bg3_frames_dropped	",
+	"rx_bg0_frames_trunc	",
+	"rx_bg1_frames_trunc	",
+	"rx_bg2_frames_trunc	",
+	"rx_bg3_frames_trunc	",
+
+	"tso			",
+	"tx_csum_offload	",
+	"rx_csum_good		",
+	"vlan_extractions	",
+	"vlan_insertions	",
+	"gro_packets		",
+	"gro_merged		",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+	"db_drop                ",
+	"db_full                ",
+	"db_empty               ",
+	"tcp_ipv4_out_rsts      ",
+	"tcp_ipv4_in_segs       ",
+	"tcp_ipv4_out_segs      ",
+	"tcp_ipv4_retrans_segs  ",
+	"tcp_ipv6_out_rsts      ",
+	"tcp_ipv6_in_segs       ",
+	"tcp_ipv6_out_segs      ",
+	"tcp_ipv6_retrans_segs  ",
+	"usm_ddp_frames         ",
+	"usm_ddp_octets         ",
+	"usm_ddp_drops          ",
+	"rdma_no_rqe_mod_defer  ",
+	"rdma_no_rqe_pkt_defer  ",
+	"tp_err_ofld_no_neigh   ",
+	"tp_err_ofld_cong_defer ",
+	"write_coal_success     ",
+	"write_coal_fail        ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+	"--------Channel--------- ",
+	"tp_cpl_requests        ",
+	"tp_cpl_responses       ",
+	"tp_mac_in_errs         ",
+	"tp_hdr_in_errs         ",
+	"tp_tcp_in_errs         ",
+	"tp_tcp6_in_errs        ",
+	"tp_tnl_cong_drops      ",
+	"tp_tnl_tx_drops        ",
+	"tp_ofld_vlan_drops     ",
+	"tp_ofld_chan_drops     ",
+	"fcoe_octets_ddp        ",
+	"fcoe_frames_ddp        ",
+	"fcoe_frames_drop       ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+	"-------Loopback----------- ",
+	"octets_ok              ",
+	"frames_ok              ",
+	"bcast_frames           ",
+	"mcast_frames           ",
+	"ucast_frames           ",
+	"error_frames           ",
+	"frames_64              ",
+	"frames_65_to_127       ",
+	"frames_128_to_255      ",
+	"frames_256_to_511      ",
+	"frames_512_to_1023     ",
+	"frames_1024_to_1518    ",
+	"frames_1519_to_max     ",
+	"frames_dropped         ",
+	"bg0_frames_dropped     ",
+	"bg1_frames_dropped     ",
+	"bg2_frames_dropped     ",
+	"bg3_frames_dropped     ",
+	"bg0_frames_trunc       ",
+	"bg1_frames_trunc       ",
+	"bg2_frames_trunc       ",
+	"bg3_frames_trunc       ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(stats_strings) +
+		       ARRAY_SIZE(adapter_stats_strings) +
+		       ARRAY_SIZE(channel_stats_strings) +
+		       ARRAY_SIZE(loopback_stats_strings);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+	struct adapter *adap = netdev2adap(dev);
+
+	return t4_get_regs_len(adap);
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+	return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct adapter *adapter = netdev2adap(dev);
+	u32 exprom_vers;
+
+	strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+	strlcpy(info->version, cxgb4_driver_version,
+		sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(adapter->pdev),
+		sizeof(info->bus_info));
+	info->regdump_len = get_regs_len(dev);
+
+	if (!adapter->params.fw_vers)
+		strcpy(info->fw_version, "N/A");
+	else
+		snprintf(info->fw_version, sizeof(info->fw_version),
+			 "%u.%u.%u.%u, TP %u.%u.%u.%u",
+			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+	if (!t4_get_exprom_version(adapter, &exprom_vers))
+		snprintf(info->erom_version, sizeof(info->erom_version),
+			 "%u.%u.%u.%u",
+			 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
+			 FW_HDR_FW_VER_MINOR_G(exprom_vers),
+			 FW_HDR_FW_VER_MICRO_G(exprom_vers),
+			 FW_HDR_FW_VER_BUILD_G(exprom_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	if (stringset == ETH_SS_STATS) {
+		memcpy(data, stats_strings, sizeof(stats_strings));
+		data += sizeof(stats_strings);
+		memcpy(data, adapter_stats_strings,
+		       sizeof(adapter_stats_strings));
+		data += sizeof(adapter_stats_strings);
+		memcpy(data, channel_stats_strings,
+		       sizeof(channel_stats_strings));
+		data += sizeof(channel_stats_strings);
+		memcpy(data, loopback_stats_strings,
+		       sizeof(loopback_stats_strings));
+	}
+}
+
+/* port stats maintained per queue of the port. They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+	u64 tso;
+	u64 tx_csum;
+	u64 rx_csum;
+	u64 vlan_ex;
+	u64 vlan_ins;
+	u64 gro_pkts;
+	u64 gro_merged;
+};
+
+struct adapter_stats {
+	u64 db_drop;
+	u64 db_full;
+	u64 db_empty;
+	u64 tcp_v4_out_rsts;
+	u64 tcp_v4_in_segs;
+	u64 tcp_v4_out_segs;
+	u64 tcp_v4_retrans_segs;
+	u64 tcp_v6_out_rsts;
+	u64 tcp_v6_in_segs;
+	u64 tcp_v6_out_segs;
+	u64 tcp_v6_retrans_segs;
+	u64 frames;
+	u64 octets;
+	u64 drops;
+	u64 rqe_dfr_mod;
+	u64 rqe_dfr_pkt;
+	u64 ofld_no_neigh;
+	u64 ofld_cong_defer;
+	u64 wc_success;
+	u64 wc_fail;
+};
+
+struct channel_stats {
+	u64 cpl_req;
+	u64 cpl_rsp;
+	u64 mac_in_errs;
+	u64 hdr_in_errs;
+	u64 tcp_in_errs;
+	u64 tcp6_in_errs;
+	u64 tnl_cong_drops;
+	u64 tnl_tx_drops;
+	u64 ofld_vlan_drops;
+	u64 ofld_chan_drops;
+	u64 octets_ddp;
+	u64 frames_ddp;
+	u64 frames_drop;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+				   const struct port_info *p,
+				   struct queue_port_stats *s)
+{
+	int i;
+	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+	memset(s, 0, sizeof(*s));
+	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+		s->tso += tx->tso;
+		s->tx_csum += tx->tx_cso;
+		s->rx_csum += rx->stats.rx_cso;
+		s->vlan_ex += rx->stats.vlan_ex;
+		s->vlan_ins += tx->vlan_ins;
+		s->gro_pkts += rx->stats.lro_pkts;
+		s->gro_merged += rx->stats.lro_merged;
+	}
+}
+
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+	struct tp_tcp_stats v4, v6;
+	struct tp_rdma_stats rdma_stats;
+	struct tp_err_stats err_stats;
+	struct tp_usm_stats usm_stats;
+	u64 val1, val2;
+
+	memset(s, 0, sizeof(*s));
+
+	spin_lock(&adap->stats_lock);
+	t4_tp_get_tcp_stats(adap, &v4, &v6);
+	t4_tp_get_rdma_stats(adap, &rdma_stats);
+	t4_get_usm_stats(adap, &usm_stats);
+	t4_tp_get_err_stats(adap, &err_stats);
+	spin_unlock(&adap->stats_lock);
+
+	s->db_drop = adap->db_stats.db_drop;
+	s->db_full = adap->db_stats.db_full;
+	s->db_empty = adap->db_stats.db_empty;
+
+	s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+	s->tcp_v4_in_segs = v4.tcp_in_segs;
+	s->tcp_v4_out_segs = v4.tcp_out_segs;
+	s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+	s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+	s->tcp_v6_in_segs = v6.tcp_in_segs;
+	s->tcp_v6_out_segs = v6.tcp_out_segs;
+	s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+	if (is_offload(adap)) {
+		s->frames = usm_stats.frames;
+		s->octets = usm_stats.octets;
+		s->drops = usm_stats.drops;
+		s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+		s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+	}
+
+	s->ofld_no_neigh = err_stats.ofld_no_neigh;
+	s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+	if (!is_t4(adap->params.chip)) {
+		int v;
+
+		v = t4_read_reg(adap, SGE_STAT_CFG_A);
+		if (STATSOURCE_T5_G(v) == 7) {
+			val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+			val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+			s->wc_success = val1 - val2;
+			s->wc_fail = val2;
+		}
+	}
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+				  u8 i)
+{
+	struct tp_cpl_stats cpl_stats;
+	struct tp_err_stats err_stats;
+	struct tp_fcoe_stats fcoe_stats;
+
+	memset(s, 0, sizeof(*s));
+
+	spin_lock(&adap->stats_lock);
+	t4_tp_get_cpl_stats(adap, &cpl_stats);
+	t4_tp_get_err_stats(adap, &err_stats);
+	t4_get_fcoe_stats(adap, i, &fcoe_stats);
+	spin_unlock(&adap->stats_lock);
+
+	s->cpl_req = cpl_stats.req[i];
+	s->cpl_rsp = cpl_stats.rsp[i];
+	s->mac_in_errs = err_stats.mac_in_errs[i];
+	s->hdr_in_errs = err_stats.hdr_in_errs[i];
+	s->tcp_in_errs = err_stats.tcp_in_errs[i];
+	s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+	s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+	s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+	s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+	s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+	s->octets_ddp = fcoe_stats.octets_ddp;
+	s->frames_ddp = fcoe_stats.frames_ddp;
+	s->frames_drop = fcoe_stats.frames_drop;
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct lb_port_stats s;
+	int i;
+	u64 *p0;
+
+	t4_get_port_stats_offset(adapter, pi->tx_chan,
+				 (struct port_stats *)data,
+				 &pi->stats_base);
+
+	data += sizeof(struct port_stats) / sizeof(u64);
+	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+	data += sizeof(struct queue_port_stats) / sizeof(u64);
+	collect_adapter_stats(adapter, (struct adapter_stats *)data);
+	data += sizeof(struct adapter_stats) / sizeof(u64);
+
+	*data++ = (u64)pi->port_id;
+	collect_channel_stats(adapter, (struct channel_stats *)data,
+			      pi->port_id);
+	data += sizeof(struct channel_stats) / sizeof(u64);
+
+	*data++ = (u64)pi->port_id;
+	memset(&s, 0, sizeof(s));
+	t4_get_lb_stats(adapter, pi->port_id, &s);
+
+	p0 = &s.octets;
+	for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+		*data++ = (unsigned long long)*p0++;
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	struct adapter *adap = netdev2adap(dev);
+	size_t buf_size;
+
+	buf_size = t4_get_regs_len(adap);
+	regs->version = mk_adap_vers(adap);
+	t4_get_regs(adap, buf, buf_size);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+	t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
+	return 0;
+}
+
+static int identify_port(struct net_device *dev,
+			 enum ethtool_phys_id_state state)
+{
+	unsigned int val;
+	struct adapter *adap = netdev2adap(dev);
+
+	if (state == ETHTOOL_ID_ACTIVE)
+		val = 0xffff;
+	else if (state == ETHTOOL_ID_INACTIVE)
+		val = 0;
+	else
+		return -EINVAL;
+
+	return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
+}
+
+static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
+{
+	unsigned int v = 0;
+
+	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+	    type == FW_PORT_TYPE_BT_XAUI) {
+		v |= SUPPORTED_TP;
+		if (caps & FW_PORT_CAP_SPEED_100M)
+			v |= SUPPORTED_100baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseT_Full;
+	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+		v |= SUPPORTED_Backplane;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseKX_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseKX4_Full;
+	} else if (type == FW_PORT_TYPE_KR) {
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+	} else if (type == FW_PORT_TYPE_BP_AP) {
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+	} else if (type == FW_PORT_TYPE_BP4_AP) {
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+		     SUPPORTED_10000baseKX4_Full;
+	} else if (type == FW_PORT_TYPE_FIBER_XFI ||
+		   type == FW_PORT_TYPE_FIBER_XAUI ||
+		   type == FW_PORT_TYPE_SFP ||
+		   type == FW_PORT_TYPE_QSFP_10G ||
+		   type == FW_PORT_TYPE_QSA) {
+		v |= SUPPORTED_FIBRE;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseT_Full;
+	} else if (type == FW_PORT_TYPE_BP40_BA ||
+		   type == FW_PORT_TYPE_QSFP) {
+		v |= SUPPORTED_40000baseSR4_Full;
+		v |= SUPPORTED_FIBRE;
+	}
+
+	if (caps & FW_PORT_CAP_ANEG)
+		v |= SUPPORTED_Autoneg;
+	return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+	unsigned int v = 0;
+
+	if (caps & ADVERTISED_100baseT_Full)
+		v |= FW_PORT_CAP_SPEED_100M;
+	if (caps & ADVERTISED_1000baseT_Full)
+		v |= FW_PORT_CAP_SPEED_1G;
+	if (caps & ADVERTISED_10000baseT_Full)
+		v |= FW_PORT_CAP_SPEED_10G;
+	if (caps & ADVERTISED_40000baseSR4_Full)
+		v |= FW_PORT_CAP_SPEED_40G;
+	return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	const struct port_info *p = netdev_priv(dev);
+
+	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+	    p->port_type == FW_PORT_TYPE_BT_XFI ||
+	    p->port_type == FW_PORT_TYPE_BT_XAUI) {
+		cmd->port = PORT_TP;
+	} else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+		   p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
+		cmd->port = PORT_FIBRE;
+	} else if (p->port_type == FW_PORT_TYPE_SFP ||
+		   p->port_type == FW_PORT_TYPE_QSFP_10G ||
+		   p->port_type == FW_PORT_TYPE_QSA ||
+		   p->port_type == FW_PORT_TYPE_QSFP) {
+		if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+		    p->mod_type == FW_PORT_MOD_TYPE_SR ||
+		    p->mod_type == FW_PORT_MOD_TYPE_ER ||
+		    p->mod_type == FW_PORT_MOD_TYPE_LRM)
+			cmd->port = PORT_FIBRE;
+		else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+			 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+			cmd->port = PORT_DA;
+		else
+			cmd->port = PORT_OTHER;
+	} else {
+		cmd->port = PORT_OTHER;
+	}
+
+	if (p->mdio_addr >= 0) {
+		cmd->phy_address = p->mdio_addr;
+		cmd->transceiver = XCVR_EXTERNAL;
+		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+	} else {
+		cmd->phy_address = 0;  /* not really, but no better option */
+		cmd->transceiver = XCVR_INTERNAL;
+		cmd->mdio_support = 0;
+	}
+
+	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+	cmd->advertising = from_fw_linkcaps(p->port_type,
+					    p->link_cfg.advertising);
+	ethtool_cmd_speed_set(cmd,
+			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
+	cmd->duplex = DUPLEX_FULL;
+	cmd->autoneg = p->link_cfg.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+	if (speed == 100)
+		return FW_PORT_CAP_SPEED_100M;
+	if (speed == 1000)
+		return FW_PORT_CAP_SPEED_1G;
+	if (speed == 10000)
+		return FW_PORT_CAP_SPEED_10G;
+	if (speed == 40000)
+		return FW_PORT_CAP_SPEED_40G;
+	return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	unsigned int cap;
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_cfg;
+	u32 speed = ethtool_cmd_speed(cmd);
+	struct link_config old_lc;
+	int ret;
+
+	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
+		return -EINVAL;
+
+	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+		/* PHY offers a single speed.  See if that's what's
+		 * being requested.
+		 */
+		if (cmd->autoneg == AUTONEG_DISABLE &&
+		    (lc->supported & speed_to_caps(speed)))
+			return 0;
+		return -EINVAL;
+	}
+
+	old_lc = *lc;
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		cap = speed_to_caps(speed);
+
+		if (!(lc->supported & cap))
+			return -EINVAL;
+		lc->requested_speed = cap;
+		lc->advertising = 0;
+	} else {
+		cap = to_fw_linkcaps(cmd->advertising);
+		if (!(lc->supported & cap))
+			return -EINVAL;
+		lc->requested_speed = 0;
+		lc->advertising = cap | FW_PORT_CAP_ANEG;
+	}
+	lc->autoneg = cmd->autoneg;
+
+	/* If the firmware rejects the Link Configuration request, back out
+	 * the changes and report the error.
+	 */
+	ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+	if (ret)
+		*lc = old_lc;
+
+	return ret;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_cfg;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & FW_PORT_CAP_ANEG)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (netif_running(dev))
+		return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
+				     lc);
+	return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct sge *s = &pi->adapter->sge;
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+	e->rx_jumbo_max_pending = 0;
+	e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+	e->rx_jumbo_pending = 0;
+	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	int i;
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct sge *s = &adapter->sge;
+
+	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+	    e->tx_pending > MAX_TXQ_ENTRIES ||
+	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	for (i = 0; i < pi->nqsets; ++i) {
+		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+	}
+	return 0;
+}
+
+/**
+ * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
+ * @dev: the network device
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Set the RX interrupt hold-off parameters for a network device.
+ */
+static int set_rx_intr_params(struct net_device *dev,
+			      unsigned int us, unsigned int cnt)
+{
+	int i, err;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+	for (i = 0; i < pi->nqsets; i++, q++) {
+		err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
+{
+	int i;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+	for (i = 0; i < pi->nqsets; i++, q++)
+		q->rspq.adaptive_rx = adaptive_rx;
+
+	return 0;
+}
+
+static int get_adaptive_rx_setting(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+	return q->rspq.adaptive_rx;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
+	return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+				  c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct adapter *adap = pi->adapter;
+	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+	c->rx_coalesce_usecs = qtimer_val(adap, rq);
+	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
+		adap->sge.counter_val[rq->pktcnt_idx] : 0;
+	c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
+	return 0;
+}
+
+/**
+ *	eeprom_ptov - translate a physical EEPROM address to virtual
+ *	@phys_addr: the physical EEPROM address
+ *	@fn: the PCI function number
+ *	@sz: size of function-specific area
+ *
+ *	Translate a physical EEPROM address to virtual.  The first 1K is
+ *	accessed through virtual addresses starting at 31K, the rest is
+ *	accessed through virtual addresses starting at 0.
+ *
+ *	The mapping is as follows:
+ *	[0..1K) -> [31K..32K)
+ *	[1K..1K+A) -> [31K-A..31K)
+ *	[1K+A..ES) -> [0..ES-A-1K)
+ *
+ *	where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+	fn *= sz;
+	if (phys_addr < 1024)
+		return phys_addr + (31 << 10);
+	if (phys_addr < 1024 + fn)
+		return 31744 - fn + phys_addr - 1024;
+	if (phys_addr < EEPROMSIZE)
+		return phys_addr - 1024 - fn;
+	return -EINVAL;
+}
+
+/* The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+	if (vaddr >= 0)
+		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+	return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+	if (vaddr >= 0)
+		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+	return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 *data)
+{
+	int i, err = 0;
+	struct adapter *adapter = netdev2adap(dev);
+	u8 *buf = t4_alloc_mem(EEPROMSIZE);
+
+	if (!buf)
+		return -ENOMEM;
+
+	e->magic = EEPROM_MAGIC;
+	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+	if (!err)
+		memcpy(data, buf + e->offset, e->len);
+	t4_free_mem(buf);
+	return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		      u8 *data)
+{
+	u8 *buf;
+	int err = 0;
+	u32 aligned_offset, aligned_len, *p;
+	struct adapter *adapter = netdev2adap(dev);
+
+	if (eeprom->magic != EEPROM_MAGIC)
+		return -EINVAL;
+
+	aligned_offset = eeprom->offset & ~3;
+	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+	if (adapter->pf > 0) {
+		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
+
+		if (aligned_offset < start ||
+		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
+			return -EPERM;
+	}
+
+	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+		/* RMW possibly needed for first or last words.
+		 */
+		buf = t4_alloc_mem(aligned_len);
+		if (!buf)
+			return -ENOMEM;
+		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+		if (!err && aligned_len > 4)
+			err = eeprom_rd_phys(adapter,
+					     aligned_offset + aligned_len - 4,
+					     (u32 *)&buf[aligned_len - 4]);
+		if (err)
+			goto out;
+		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+	} else {
+		buf = data;
+	}
+
+	err = t4_seeprom_wp(adapter, false);
+	if (err)
+		goto out;
+
+	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+		err = eeprom_wr_phys(adapter, aligned_offset, *p);
+		aligned_offset += 4;
+	}
+
+	if (!err)
+		err = t4_seeprom_wp(adapter, true);
+out:
+	if (buf != data)
+		t4_free_mem(buf);
+	return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+	int ret;
+	const struct firmware *fw;
+	struct adapter *adap = netdev2adap(netdev);
+	unsigned int mbox = PCIE_FW_MASTER_M + 1;
+	u32 pcie_fw;
+	unsigned int master;
+	u8 master_vld = 0;
+
+	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+	master = PCIE_FW_MASTER_G(pcie_fw);
+	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+		master_vld = 1;
+	/* if csiostor is the master return */
+	if (master_vld && (master != adap->pf)) {
+		dev_warn(adap->pdev_dev,
+			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+		return -EOPNOTSUPP;
+	}
+
+	ef->data[sizeof(ef->data) - 1] = '\0';
+	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+	if (ret < 0)
+		return ret;
+
+	/* If the adapter has been fully initialized then we'll go ahead and
+	 * try to get the firmware's cooperation in upgrading to the new
+	 * firmware image otherwise we'll try to do the entire job from the
+	 * host ... and we always "force" the operation in this path.
+	 */
+	if (adap->flags & FULL_INIT_DONE)
+		mbox = adap->mbox;
+
+	ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
+	release_firmware(fw);
+	if (!ret)
+		dev_info(adap->pdev_dev,
+			 "loaded firmware %s, reload cxgb4 driver\n", ef->data);
+	return ret;
+}
+
+static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+{
+	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				   SOF_TIMESTAMPING_RX_SOFTWARE |
+				   SOF_TIMESTAMPING_SOFTWARE;
+
+	ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+				    SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	ts_info->phc_index = -1;
+
+	return 0;
+}
+
+static u32 get_rss_table_size(struct net_device *dev)
+{
+	const struct port_info *pi = netdev_priv(dev);
+
+	return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	unsigned int n = pi->rss_size;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+	if (!p)
+		return 0;
+	while (n--)
+		p[n] = pi->rss[n];
+	return 0;
+}
+
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
+			 const u8 hfunc)
+{
+	unsigned int i;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* We require at least one supported parameter to be changed and no
+	 * change in any of the unsupported parameters
+	 */
+	if (key ||
+	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+		return -EOPNOTSUPP;
+	if (!p)
+		return 0;
+
+	/* Interface must be brought up atleast once */
+	if (pi->adapter->flags & FULL_INIT_DONE) {
+		for (i = 0; i < pi->rss_size; i++)
+			pi->rss[i] = p[i];
+
+		return cxgb4_write_rss(pi, pi->rss);
+	}
+
+	return -EPERM;
+}
+
+static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+		     u32 *rules)
+{
+	const struct port_info *pi = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXFH: {
+		unsigned int v = pi->rss_mode;
+
+		info->data = 0;
+		switch (info->flow_type) {
+		case TCP_V4_FLOW:
+			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST |
+					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		case UDP_V4_FLOW:
+			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+				info->data = RXH_IP_SRC | RXH_IP_DST |
+					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		case SCTP_V4_FLOW:
+		case AH_ESP_V4_FLOW:
+		case IPV4_FLOW:
+			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		case TCP_V6_FLOW:
+			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST |
+					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		case UDP_V6_FLOW:
+			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+				info->data = RXH_IP_SRC | RXH_IP_DST |
+					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
+			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		case SCTP_V6_FLOW:
+		case AH_ESP_V6_FLOW:
+		case IPV6_FLOW:
+			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+				info->data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		}
+		return 0;
+	}
+	case ETHTOOL_GRXRINGS:
+		info->data = pi->nqsets;
+		return 0;
+	}
+	return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+	.get_settings      = get_settings,
+	.set_settings      = set_settings,
+	.get_drvinfo       = get_drvinfo,
+	.get_msglevel      = get_msglevel,
+	.set_msglevel      = set_msglevel,
+	.get_ringparam     = get_sge_param,
+	.set_ringparam     = set_sge_param,
+	.get_coalesce      = get_coalesce,
+	.set_coalesce      = set_coalesce,
+	.get_eeprom_len    = get_eeprom_len,
+	.get_eeprom        = get_eeprom,
+	.set_eeprom        = set_eeprom,
+	.get_pauseparam    = get_pauseparam,
+	.set_pauseparam    = set_pauseparam,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = get_strings,
+	.set_phys_id       = identify_port,
+	.nway_reset        = restart_autoneg,
+	.get_sset_count    = get_sset_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len      = get_regs_len,
+	.get_regs          = get_regs,
+	.get_rxnfc         = get_rxnfc,
+	.get_rxfh_indir_size = get_rss_table_size,
+	.get_rxfh	   = get_rss_table,
+	.set_rxfh	   = set_rss_table,
+	.flash_device      = set_flash,
+	.get_ts_info       = get_ts_info
+};
+
+void cxgb4_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &cxgb_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
new file mode 100644
index 0000000..6c8a62e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -0,0 +1,122 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/libfcoe.h>
+#include "cxgb4.h"
+
+bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb)
+{
+	struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb);
+	u8 sof = fcoeh->fcoe_sof;
+	u8 eof = 0;
+
+	if ((sof != FC_SOF_I3) && (sof != FC_SOF_N3)) {
+		dev_err(adap->pdev_dev, "Unsupported SOF 0x%x\n", sof);
+		return false;
+	}
+
+	skb_copy_bits(skb, skb->len - 4, &eof, 1);
+
+	if ((eof != FC_EOF_N) && (eof != FC_EOF_T)) {
+		dev_err(adap->pdev_dev, "Unsupported EOF 0x%x\n", eof);
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * cxgb_fcoe_enable - enable FCoE offload features
+ * @netdev: net device
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int cxgb_fcoe_enable(struct net_device *netdev)
+{
+	struct port_info *pi = netdev_priv(netdev);
+	struct adapter *adap = pi->adapter;
+	struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+	if (is_t4(adap->params.chip))
+		return -EINVAL;
+
+	if (!(adap->flags & FULL_INIT_DONE))
+		return -EINVAL;
+
+	dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
+
+	netdev->features |= NETIF_F_FCOE_CRC;
+	netdev->vlan_features |= NETIF_F_FCOE_CRC;
+	netdev->features |= NETIF_F_FCOE_MTU;
+	netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+	netdev_features_change(netdev);
+
+	fcoe->flags |= CXGB_FCOE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * cxgb_fcoe_disable - disable FCoE offload
+ * @netdev: net device
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int cxgb_fcoe_disable(struct net_device *netdev)
+{
+	struct port_info *pi = netdev_priv(netdev);
+	struct adapter *adap = pi->adapter;
+	struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
+		return -EINVAL;
+
+	dev_info(adap->pdev_dev, "Disabling FCoE offload features\n");
+
+	fcoe->flags &= ~CXGB_FCOE_ENABLED;
+
+	netdev->features &= ~NETIF_F_FCOE_CRC;
+	netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
+	netdev->features &= ~NETIF_F_FCOE_MTU;
+	netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+
+	netdev_features_change(netdev);
+
+	return 0;
+}
+#endif /* CONFIG_CHELSIO_T4_FCOE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h
new file mode 100644
index 0000000..bf9258a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FCOE_H__
+#define __CXGB4_FCOE_H__
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+
+#define CXGB_FCOE_TXPKT_CSUM_START	28
+#define CXGB_FCOE_TXPKT_CSUM_END	8
+
+/* fcoe flags */
+enum {
+	CXGB_FCOE_ENABLED     = (1 << 0),
+};
+
+struct cxgb_fcoe {
+	u8	flags;
+};
+
+int cxgb_fcoe_enable(struct net_device *);
+int cxgb_fcoe_disable(struct net_device *);
+bool cxgb_fcoe_sof_eof_supported(struct adapter *, struct sk_buff *);
+
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+#endif /* __CXGB4_FCOE_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
new file mode 100644
index 0000000..090e006
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -0,0 +1,5089 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sockios.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/addrconf.h>
+#include <net/bonding.h>
+#include <net/addrconf.h>
+#include <asm/uaccess.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_values.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "t4fw_version.h"
+#include "cxgb4_dcb.h"
+#include "cxgb4_debugfs.h"
+#include "clip_tbl.h"
+#include "l2t.h"
+
+char cxgb4_driver_name[] = KBUILD_MODNAME;
+
+#ifdef DRV_VERSION
+#undef DRV_VERSION
+#endif
+#define DRV_VERSION "2.0.0-ko"
+const char cxgb4_driver_version[] = DRV_VERSION;
+#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
+
+/* Host shadow copy of ingress filter entry.  This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command.  The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+	/* Administrative fields for filter.
+	 */
+	u32 valid:1;            /* filter allocated and valid */
+	u32 locked:1;           /* filter is administratively locked */
+
+	u32 pending:1;          /* filter action is pending firmware reply */
+	u32 smtidx:8;           /* Source MAC Table index for smac */
+	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
+
+	/* The filter itself.  Most of this is a straight copy of information
+	 * provided by the extended ioctl().  Some fields are translated to
+	 * internal forms -- for instance the Ingress Queue ID passed in from
+	 * the ioctl() is translated into the Absolute Ingress Queue ID.
+	 */
+	struct ch_filter_specification fs;
+};
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+	static const struct pci_device_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
+ * called for both.
+ */
+#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+		{PCI_VDEVICE(CHELSIO, (devid)), 4}
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+		{ 0, } \
+	}
+
+#include "t4_pci_id_tbl.h"
+
+#define FW4_FNAME "cxgb4/t4fw.bin"
+#define FW5_FNAME "cxgb4/t5fw.bin"
+#define FW6_FNAME "cxgb4/t6fw.bin"
+#define FW4_CFNAME "cxgb4/t4-config.txt"
+#define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
+#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
+#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
+#define PHY_AQ1202_DEVICEID 0x4409
+#define PHY_BCM84834_DEVICEID 0x4486
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
+MODULE_FIRMWARE(FW4_FNAME);
+MODULE_FIRMWARE(FW5_FNAME);
+MODULE_FIRMWARE(FW6_FNAME);
+
+/*
+ * Normally we're willing to become the firmware's Master PF but will be happy
+ * if another PF has already become the Master and initialized the adapter.
+ * Setting "force_init" will cause this driver to forcibly establish itself as
+ * the Master PF and initialize the adapter.
+ */
+static uint force_init;
+
+module_param(force_init, uint, 0644);
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
+
+/*
+ * Normally if the firmware we connect to has Configuration File support, we
+ * use that and only fall back to the old Driver-based initialization if the
+ * Configuration File fails for some reason.  If force_old_init is set, then
+ * we'll always use the old Driver-based initialization sequence.
+ */
+static uint force_old_init;
+
+module_param(force_old_init, uint, 0644);
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
+		 " parameter");
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and INTx interrupts
+ * msi = 0: force INTx interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
+
+/*
+ * Queue interrupt hold-off timer values.  Queues default to the first of these
+ * upon creation.
+ */
+static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
+
+module_param_array(intr_holdoff, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
+		 "0..4 in microseconds, deprecated parameter");
+
+static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
+
+module_param_array(intr_cnt, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_cnt,
+		 "thresholds 1..3 for queue interrupt packet counters, "
+		 "deprecated parameter");
+
+/*
+ * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
+ * offset by 2 bytes in order to have the IP headers line up on 4-byte
+ * boundaries.  This is a requirement for many architectures which will throw
+ * a machine check fault if an attempt is made to access one of the 4-byte IP
+ * header fields on a non-4-byte boundary.  And it's a major performance issue
+ * even on some architectures which allow it like some implementations of the
+ * x86 ISA.  However, some architectures don't mind this and for some very
+ * edge-case performance sensitive applications (like forwarding large volumes
+ * of small packets), setting this DMA offset to 0 will decrease the number of
+ * PCI-E Bus transfers enough to measurably affect performance.
+ */
+static int rx_dma_offset = 2;
+
+static bool vf_acls;
+
+#ifdef CONFIG_PCI_IOV
+module_param(vf_acls, bool, 0644);
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
+		 "deprecated parameter");
+
+/* Configure the number of PCI-E Virtual Function which are to be instantiated
+ * on SR-IOV Capable Physical Functions.
+ */
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
+
+module_param_array(num_vf, uint, NULL, 0644);
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+#endif
+
+/* TX Queue select used to determine what algorithm to use for selecting TX
+ * queue. Select between the kernel provided function (select_queue=0) or user
+ * cxgb_select_queue function (select_queue=1)
+ *
+ * Default: select_queue=0
+ */
+static int select_queue;
+module_param(select_queue, int, 0644);
+MODULE_PARM_DESC(select_queue,
+		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
+
+static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
+
+module_param(tp_vlan_pri_map, uint, 0644);
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
+		 "deprecated parameter");
+
+static struct dentry *cxgb4_debugfs_root;
+
+static LIST_HEAD(adapter_list);
+static DEFINE_MUTEX(uld_mutex);
+/* Adapter list to be accessed from atomic context */
+static LIST_HEAD(adap_rcu_list);
+static DEFINE_SPINLOCK(adap_rcu_lock);
+static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
+static const char *uld_str[] = { "RDMA", "iSCSI" };
+
+static void link_report(struct net_device *dev)
+{
+	if (!netif_carrier_ok(dev))
+		netdev_info(dev, "link down\n");
+	else {
+		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+
+		const char *s;
+		const struct port_info *p = netdev_priv(dev);
+
+		switch (p->link_cfg.speed) {
+		case 10000:
+			s = "10Gbps";
+			break;
+		case 1000:
+			s = "1000Mbps";
+			break;
+		case 100:
+			s = "100Mbps";
+			break;
+		case 40000:
+			s = "40Gbps";
+			break;
+		default:
+			pr_info("%s: unsupported speed: %d\n",
+				dev->name, p->link_cfg.speed);
+			return;
+		}
+
+		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
+			    fc[p->link_cfg.fc]);
+	}
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
+static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+	int i;
+
+	/* We use a simple mapping of Port TX Queue Index to DCB
+	 * Priority when we're enabling DCB.
+	 */
+	for (i = 0; i < pi->nqsets; i++, txq++) {
+		u32 name, value;
+		int err;
+
+		name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+			FW_PARAMS_PARAM_X_V(
+				FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
+			FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
+		value = enable ? i : 0xffffffff;
+
+		/* Since we can be called while atomic (from "interrupt
+		 * level") we need to issue the Set Parameters Commannd
+		 * without sleeping (timeout < 0).
+		 */
+		err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+					    &name, &value,
+					    -FW_CMD_MAX_TIMEOUT);
+
+		if (err)
+			dev_err(adap->pdev_dev,
+				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
+				enable ? "set" : "unset", pi->port_id, i, -err);
+		else
+			txq->dcb_prio = value;
+	}
+}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
+{
+	struct net_device *dev = adapter->port[port_id];
+
+	/* Skip changes from disabled ports. */
+	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
+		if (link_stat)
+			netif_carrier_on(dev);
+		else {
+#ifdef CONFIG_CHELSIO_T4_DCB
+			cxgb4_dcb_state_init(dev);
+			dcb_tx_queue_prio_enable(dev, false);
+#endif /* CONFIG_CHELSIO_T4_DCB */
+			netif_carrier_off(dev);
+		}
+
+		link_report(dev);
+	}
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+	static const char *mod_str[] = {
+		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+	};
+
+	const struct net_device *dev = adap->port[port_id];
+	const struct port_info *pi = netdev_priv(dev);
+
+	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+		netdev_info(dev, "port module unplugged\n");
+	else if (pi->mod_type < ARRAY_SIZE(mod_str))
+		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+	u64 mhash = 0;
+	u64 uhash = 0;
+	bool free = true;
+	u16 filt_idx[7];
+	const u8 *addr[7];
+	int ret, naddr = 0;
+	const struct netdev_hw_addr *ha;
+	int uc_cnt = netdev_uc_count(dev);
+	int mc_cnt = netdev_mc_count(dev);
+	const struct port_info *pi = netdev_priv(dev);
+	unsigned int mb = pi->adapter->pf;
+
+	/* first do the secondary unicast addresses */
+	netdev_for_each_uc_addr(ha, dev) {
+		addr[naddr++] = ha->addr;
+		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
+					naddr, addr, filt_idx, &uhash, sleep);
+			if (ret < 0)
+				return ret;
+
+			free = false;
+			naddr = 0;
+		}
+	}
+
+	/* next set up the multicast addresses */
+	netdev_for_each_mc_addr(ha, dev) {
+		addr[naddr++] = ha->addr;
+		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
+					naddr, addr, filt_idx, &mhash, sleep);
+			if (ret < 0)
+				return ret;
+
+			free = false;
+			naddr = 0;
+		}
+	}
+
+	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
+				uhash | mhash, sleep);
+}
+
+int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
+module_param(dbfifo_int_thresh, int, 0644);
+MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
+
+/*
+ * usecs to sleep while draining the dbfifo
+ */
+static int dbfifo_drain_delay = 1000;
+module_param(dbfifo_drain_delay, int, 0644);
+MODULE_PARM_DESC(dbfifo_drain_delay,
+		 "usecs to sleep while draining the dbfifo");
+
+/*
+ * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	ret = set_addr_filters(dev, sleep_ok);
+	if (ret == 0)
+		ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
+				    (dev->flags & IFF_PROMISC) ? 1 : 0,
+				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
+				    sleep_ok);
+	return ret;
+}
+
+/**
+ *	link_start - enable a port
+ *	@dev: the port to enable
+ *
+ *	Performs the MAC and PHY actions needed to enable a port.
+ */
+static int link_start(struct net_device *dev)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+	unsigned int mb = pi->adapter->pf;
+
+	/*
+	 * We do not set address filters and promiscuity here, the stack does
+	 * that step explicitly.
+	 */
+	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
+			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
+	if (ret == 0) {
+		ret = t4_change_mac(pi->adapter, mb, pi->viid,
+				    pi->xact_addr_filt, dev->dev_addr, true,
+				    true);
+		if (ret >= 0) {
+			pi->xact_addr_filt = ret;
+			ret = 0;
+		}
+	}
+	if (ret == 0)
+		ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
+				    &pi->link_cfg);
+	if (ret == 0) {
+		local_bh_disable();
+		ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
+					  true, CXGB4_DCB_ENABLED);
+		local_bh_enable();
+	}
+
+	return ret;
+}
+
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+	struct port_info *pi = netdev_priv(dev);
+
+	if (!pi->dcb.enabled)
+		return 0;
+
+	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
+#else
+	return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+/* Handle a Data Center Bridging update message from the firmware. */
+static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
+{
+	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
+	struct net_device *dev = adap->port[port];
+	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
+	int new_dcb_enabled;
+
+	cxgb4_dcb_handle_fw_update(adap, pcmd);
+	new_dcb_enabled = cxgb4_dcb_enabled(dev);
+
+	/* If the DCB has become enabled or disabled on the port then we're
+	 * going to need to set up/tear down DCB Priority parameters for the
+	 * TX Queues associated with the port.
+	 */
+	if (new_dcb_enabled != old_dcb_enabled)
+		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
+}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+/* Clear a filter and release any of its resources that we own.  This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+	/* If the new or old filter have loopback rewriteing rules then we'll
+	 * need to free any existing Layer Two Table (L2T) entries of the old
+	 * filter rule.  The firmware will handle freeing up any Source MAC
+	 * Table (SMT) entries used for rewriting Source MAC Addresses in
+	 * loopback rules.
+	 */
+	if (f->l2t)
+		cxgb4_l2t_release(f->l2t);
+
+	/* The zeroing of the filter rule below clears the filter valid,
+	 * pending, locked flags, l2t pointer, etc. so it's all we need for
+	 * this operation.
+	 */
+	memset(f, 0, sizeof(*f));
+}
+
+/* Handle a filter write/deletion reply.
+ */
+static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+	unsigned int idx = GET_TID(rpl);
+	unsigned int nidx = idx - adap->tids.ftid_base;
+	unsigned int ret;
+	struct filter_entry *f;
+
+	if (idx >= adap->tids.ftid_base && nidx <
+	   (adap->tids.nftids + adap->tids.nsftids)) {
+		idx = nidx;
+		ret = TCB_COOKIE_G(rpl->cookie);
+		f = &adap->tids.ftid_tab[idx];
+
+		if (ret == FW_FILTER_WR_FLT_DELETED) {
+			/* Clear the filter when we get confirmation from the
+			 * hardware that the filter has been deleted.
+			 */
+			clear_filter(adap, f);
+		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+				idx);
+			clear_filter(adap, f);
+		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
+			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+			f->pending = 0;  /* asynchronous setup completed */
+			f->valid = 1;
+		} else {
+			/* Something went wrong.  Issue a warning about the
+			 * problem and clear everything out.
+			 */
+			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+				idx, ret);
+			clear_filter(adap, f);
+		}
+	}
+}
+
+/* Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+			  const struct pkt_gl *gl)
+{
+	u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+	rsp++;                                          /* skip RSS header */
+
+	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+	 */
+	if (unlikely(opcode == CPL_FW4_MSG &&
+	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
+		rsp++;
+		opcode = ((const struct rss_header *)rsp)->opcode;
+		rsp++;
+		if (opcode != CPL_SGE_EGR_UPDATE) {
+			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
+				, opcode);
+			goto out;
+		}
+	}
+
+	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+		const struct cpl_sge_egr_update *p = (void *)rsp;
+		unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
+		struct sge_txq *txq;
+
+		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
+		txq->restarts++;
+		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
+			struct sge_eth_txq *eq;
+
+			eq = container_of(txq, struct sge_eth_txq, q);
+			netif_tx_wake_queue(eq->txq);
+		} else {
+			struct sge_ofld_txq *oq;
+
+			oq = container_of(txq, struct sge_ofld_txq, q);
+			tasklet_schedule(&oq->qresume_tsk);
+		}
+	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+		const struct cpl_fw6_msg *p = (void *)rsp;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+		const struct fw_port_cmd *pcmd = (const void *)p->data;
+		unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
+		unsigned int action =
+			FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
+
+		if (cmd == FW_PORT_CMD &&
+		    action == FW_PORT_ACTION_GET_PORT_INFO) {
+			int port = FW_PORT_CMD_PORTID_G(
+					be32_to_cpu(pcmd->op_to_portid));
+			struct net_device *dev = q->adap->port[port];
+			int state_input = ((pcmd->u.info.dcbxdis_pkd &
+					    FW_PORT_CMD_DCBXDIS_F)
+					   ? CXGB4_DCB_INPUT_FW_DISABLED
+					   : CXGB4_DCB_INPUT_FW_ENABLED);
+
+			cxgb4_dcb_state_fsm(dev, state_input);
+		}
+
+		if (cmd == FW_PORT_CMD &&
+		    action == FW_PORT_ACTION_L2_DCB_CFG)
+			dcb_rpl(q->adap, pcmd);
+		else
+#endif
+			if (p->type == 0)
+				t4_handle_fw_rpl(q->adap, p->data);
+	} else if (opcode == CPL_L2T_WRITE_RPL) {
+		const struct cpl_l2t_write_rpl *p = (void *)rsp;
+
+		do_l2t_write_rpl(q->adap, p);
+	} else if (opcode == CPL_SET_TCB_RPL) {
+		const struct cpl_set_tcb_rpl *p = (void *)rsp;
+
+		filter_rpl(q->adap, p);
+	} else
+		dev_err(q->adap->pdev_dev,
+			"unexpected CPL %#x on FW event queue\n", opcode);
+out:
+	return 0;
+}
+
+/**
+ *	uldrx_handler - response queue handler for ULD queues
+ *	@q: the response queue that received the packet
+ *	@rsp: the response queue descriptor holding the offload message
+ *	@gl: the gather list of packet fragments
+ *
+ *	Deliver an ingress offload packet to a ULD.  All processing is done by
+ *	the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+			 const struct pkt_gl *gl)
+{
+	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+
+	/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
+	 */
+	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
+	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
+		rsp += 2;
+
+	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+		rxq->stats.nomem++;
+		return -1;
+	}
+	if (gl == NULL)
+		rxq->stats.imm++;
+	else if (gl == CXGB4_MSG_AN)
+		rxq->stats.an++;
+	else
+		rxq->stats.pkts++;
+	return 0;
+}
+
+static void disable_msi(struct adapter *adapter)
+{
+	if (adapter->flags & USING_MSIX) {
+		pci_disable_msix(adapter->pdev);
+		adapter->flags &= ~USING_MSIX;
+	} else if (adapter->flags & USING_MSI) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~USING_MSI;
+	}
+}
+
+/*
+ * Interrupt handler for non-data events used with MSI-X.
+ */
+static irqreturn_t t4_nondata_intr(int irq, void *cookie)
+{
+	struct adapter *adap = cookie;
+	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
+
+	if (v & PFSW_F) {
+		adap->swintr = 1;
+		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
+	}
+	if (adap->flags & MASTER_PF)
+		t4_slow_intr_handler(adap);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
+
+	/* non-data interrupts */
+	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
+
+	/* FW events */
+	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
+		 adap->port[0]->name);
+
+	/* Ethernet queues */
+	for_each_port(adap, j) {
+		struct net_device *d = adap->port[j];
+		const struct port_info *pi = netdev_priv(d);
+
+		for (i = 0; i < pi->nqsets; i++, msi_idx++)
+			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
+				 d->name, i);
+	}
+
+	/* offload queues */
+	for_each_ofldrxq(&adap->sge, i)
+		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+			 adap->port[0]->name, i);
+
+	for_each_rdmarxq(&adap->sge, i)
+		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
+			 adap->port[0]->name, i);
+
+	for_each_rdmaciq(&adap->sge, i)
+		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
+			 adap->port[0]->name, i);
+}
+
+static int request_msix_queue_irqs(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+	int msi_index = 2;
+
+	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
+			  adap->msix_info[1].desc, &s->fw_evtq);
+	if (err)
+		return err;
+
+	for_each_ethrxq(s, ethqidx) {
+		err = request_irq(adap->msix_info[msi_index].vec,
+				  t4_sge_intr_msix, 0,
+				  adap->msix_info[msi_index].desc,
+				  &s->ethrxq[ethqidx].rspq);
+		if (err)
+			goto unwind;
+		msi_index++;
+	}
+	for_each_ofldrxq(s, ofldqidx) {
+		err = request_irq(adap->msix_info[msi_index].vec,
+				  t4_sge_intr_msix, 0,
+				  adap->msix_info[msi_index].desc,
+				  &s->ofldrxq[ofldqidx].rspq);
+		if (err)
+			goto unwind;
+		msi_index++;
+	}
+	for_each_rdmarxq(s, rdmaqidx) {
+		err = request_irq(adap->msix_info[msi_index].vec,
+				  t4_sge_intr_msix, 0,
+				  adap->msix_info[msi_index].desc,
+				  &s->rdmarxq[rdmaqidx].rspq);
+		if (err)
+			goto unwind;
+		msi_index++;
+	}
+	for_each_rdmaciq(s, rdmaciqqidx) {
+		err = request_irq(adap->msix_info[msi_index].vec,
+				  t4_sge_intr_msix, 0,
+				  adap->msix_info[msi_index].desc,
+				  &s->rdmaciq[rdmaciqqidx].rspq);
+		if (err)
+			goto unwind;
+		msi_index++;
+	}
+	return 0;
+
+unwind:
+	while (--rdmaciqqidx >= 0)
+		free_irq(adap->msix_info[--msi_index].vec,
+			 &s->rdmaciq[rdmaciqqidx].rspq);
+	while (--rdmaqidx >= 0)
+		free_irq(adap->msix_info[--msi_index].vec,
+			 &s->rdmarxq[rdmaqidx].rspq);
+	while (--ofldqidx >= 0)
+		free_irq(adap->msix_info[--msi_index].vec,
+			 &s->ofldrxq[ofldqidx].rspq);
+	while (--ethqidx >= 0)
+		free_irq(adap->msix_info[--msi_index].vec,
+			 &s->ethrxq[ethqidx].rspq);
+	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+	return err;
+}
+
+static void free_msix_queue_irqs(struct adapter *adap)
+{
+	int i, msi_index = 2;
+	struct sge *s = &adap->sge;
+
+	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+	for_each_ethrxq(s, i)
+		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
+	for_each_ofldrxq(s, i)
+		free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
+	for_each_rdmarxq(s, i)
+		free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
+	for_each_rdmaciq(s, i)
+		free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
+}
+
+/**
+ *	cxgb4_write_rss - write the RSS table for a given port
+ *	@pi: the port
+ *	@queues: array of queue indices for RSS
+ *
+ *	Sets up the portion of the HW RSS table for the port's VI to distribute
+ *	packets to the Rx queues in @queues.
+ *	Should never be called before setting up sge eth rx queues
+ */
+int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
+{
+	u16 *rss;
+	int i, err;
+	struct adapter *adapter = pi->adapter;
+	const struct sge_eth_rxq *rxq;
+
+	rxq = &adapter->sge.ethrxq[pi->first_qset];
+	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
+	if (!rss)
+		return -ENOMEM;
+
+	/* map the queue indices to queue ids */
+	for (i = 0; i < pi->rss_size; i++, queues++)
+		rss[i] = rxq[*queues].rspq.abs_id;
+
+	err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
+				  pi->rss_size, rss, pi->rss_size);
+	/* If Tunnel All Lookup isn't specified in the global RSS
+	 * Configuration, then we need to specify a default Ingress
+	 * Queue for any ingress packets which aren't hashed.  We'll
+	 * use our first ingress queue ...
+	 */
+	if (!err)
+		err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+				       FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+				       FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+				       rss[0]);
+	kfree(rss);
+	return err;
+}
+
+/**
+ *	setup_rss - configure RSS
+ *	@adap: the adapter
+ *
+ *	Sets up RSS for each port.
+ */
+static int setup_rss(struct adapter *adap)
+{
+	int i, j, err;
+
+	for_each_port(adap, i) {
+		const struct port_info *pi = adap2pinfo(adap, i);
+
+		/* Fill default values with equal distribution */
+		for (j = 0; j < pi->rss_size; j++)
+			pi->rss[j] = j % pi->nqsets;
+
+		err = cxgb4_write_rss(pi, pi->rss);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+/*
+ * Return the channel of the ingress queue with the given qid.
+ */
+static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
+{
+	qid -= p->ingr_start;
+	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < adap->sge.ingr_sz; i++) {
+		struct sge_rspq *q = adap->sge.ingr_map[i];
+
+		if (q && q->handler) {
+			napi_disable(&q->napi);
+			local_bh_disable();
+			while (!cxgb_poll_lock_napi(q))
+				mdelay(1);
+			local_bh_enable();
+		}
+
+	}
+}
+
+/* Disable interrupt and napi handler */
+static void disable_interrupts(struct adapter *adap)
+{
+	if (adap->flags & FULL_INIT_DONE) {
+		t4_intr_disable(adap);
+		if (adap->flags & USING_MSIX) {
+			free_msix_queue_irqs(adap);
+			free_irq(adap->msix_info[0].vec, adap);
+		} else {
+			free_irq(adap->pdev->irq, adap);
+		}
+		quiesce_rx(adap);
+	}
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < adap->sge.ingr_sz; i++) {
+		struct sge_rspq *q = adap->sge.ingr_map[i];
+
+		if (!q)
+			continue;
+		if (q->handler) {
+			cxgb_busy_poll_init_lock(q);
+			napi_enable(&q->napi);
+		}
+		/* 0-increment GTS to start the timer and enable interrupts */
+		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+			     SEINTARM_V(q->intr_params) |
+			     INGRESSQID_V(q->cntxt_id));
+	}
+}
+
+static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
+			   unsigned int nq, unsigned int per_chan, int msi_idx,
+			   u16 *ids)
+{
+	int i, err;
+
+	for (i = 0; i < nq; i++, q++) {
+		if (msi_idx > 0)
+			msi_idx++;
+		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+				       adap->port[i / per_chan],
+				       msi_idx, q->fl.size ? &q->fl : NULL,
+				       uldrx_handler, 0);
+		if (err)
+			return err;
+		memset(&q->stats, 0, sizeof(q->stats));
+		if (ids)
+			ids[i] = q->rspq.abs_id;
+	}
+	return 0;
+}
+
+/**
+ *	setup_sge_queues - configure SGE Tx/Rx/response queues
+ *	@adap: the adapter
+ *
+ *	Determines how many sets of SGE queues to use and initializes them.
+ *	We support multiple queue sets per port if we have MSI-X, otherwise
+ *	just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+	int err, msi_idx, i, j;
+	struct sge *s = &adap->sge;
+
+	bitmap_zero(s->starving_fl, s->egr_sz);
+	bitmap_zero(s->txq_maperr, s->egr_sz);
+
+	if (adap->flags & USING_MSIX)
+		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
+	else {
+		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+				       NULL, NULL, -1);
+		if (err)
+			return err;
+		msi_idx = -((int)s->intrq.abs_id + 1);
+	}
+
+	/* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
+	 * don't forget to update the following which need to be
+	 * synchronized to and changes here.
+	 *
+	 * 1. The calculations of MAX_INGQ in cxgb4.h.
+	 *
+	 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
+	 *    to accommodate any new/deleted Ingress Queues
+	 *    which need MSI-X Vectors.
+	 *
+	 * 3. Update sge_qinfo_show() to include information on the
+	 *    new/deleted queues.
+	 */
+	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+			       msi_idx, NULL, fwevtq_handler, -1);
+	if (err) {
+freeout:	t4_free_sge_resources(adap);
+		return err;
+	}
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		struct port_info *pi = netdev_priv(dev);
+		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
+		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
+
+		for (j = 0; j < pi->nqsets; j++, q++) {
+			if (msi_idx > 0)
+				msi_idx++;
+			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
+					       msi_idx, &q->fl,
+					       t4_ethrx_handler,
+					       t4_get_mps_bg_map(adap,
+								 pi->tx_chan));
+			if (err)
+				goto freeout;
+			q->rspq.idx = j;
+			memset(&q->stats, 0, sizeof(q->stats));
+		}
+		for (j = 0; j < pi->nqsets; j++, t++) {
+			err = t4_sge_alloc_eth_txq(adap, t, dev,
+					netdev_get_tx_queue(dev, j),
+					s->fw_evtq.cntxt_id);
+			if (err)
+				goto freeout;
+		}
+	}
+
+	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
+	for_each_ofldrxq(s, i) {
+		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
+					    adap->port[i / j],
+					    s->fw_evtq.cntxt_id);
+		if (err)
+			goto freeout;
+	}
+
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
+	err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+	if (err) \
+		goto freeout; \
+	if (msi_idx > 0) \
+		msi_idx += nq; \
+} while (0)
+
+	ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
+	ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+	j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
+	ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
+
+#undef ALLOC_OFLD_RXQS
+
+	for_each_port(adap, i) {
+		/*
+		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+		 * have RDMA queues, and that's the right value.
+		 */
+		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
+					    s->fw_evtq.cntxt_id,
+					    s->rdmarxq[i].rspq.cntxt_id);
+		if (err)
+			goto freeout;
+	}
+
+	t4_write_reg(adap, is_t4(adap->params.chip) ?
+				MPS_TRC_RSS_CONTROL_A :
+				MPS_T5_TRC_RSS_CONTROL_A,
+		     RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
+		     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
+	return 0;
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+	void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+	if (!p)
+		p = vzalloc(size);
+	return p;
+}
+
+/*
+ * Free memory allocated through alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+	kvfree(addr);
+}
+
+/* Send a Work Request to write the filter at a specified index.  We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+static int set_filter_wr(struct adapter *adapter, int fidx)
+{
+	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+	struct sk_buff *skb;
+	struct fw_filter_wr *fwr;
+	unsigned int ftid;
+
+	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	/* If the new filter requires loopback Destination MAC and/or VLAN
+	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+	 * the filter.
+	 */
+	if (f->fs.newdmac || f->fs.newvlan) {
+		/* allocate L2T entry for new filter */
+		f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+		if (f->l2t == NULL) {
+			kfree_skb(skb);
+			return -EAGAIN;
+		}
+		if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
+					f->fs.eport, f->fs.dmac)) {
+			cxgb4_l2t_release(f->l2t);
+			f->l2t = NULL;
+			kfree_skb(skb);
+			return -ENOMEM;
+		}
+	}
+
+	ftid = adapter->tids.ftid_base + fidx;
+
+	fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+	memset(fwr, 0, sizeof(*fwr));
+
+	/* It would be nice to put most of the following in t4_hw.c but most
+	 * of the work is translating the cxgbtool ch_filter_specification
+	 * into the Work Request and the definition of that structure is
+	 * currently in cxgbtool.h which isn't appropriate to pull into the
+	 * common code.  We may eventually try to come up with a more neutral
+	 * filter specification structure but for now it's easiest to simply
+	 * put this fairly direct code in line ...
+	 */
+	fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+	fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
+	fwr->tid_to_iq =
+		htonl(FW_FILTER_WR_TID_V(ftid) |
+		      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+		      FW_FILTER_WR_NOREPLY_V(0) |
+		      FW_FILTER_WR_IQ_V(f->fs.iq));
+	fwr->del_filter_to_l2tix =
+		htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+		      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+		      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+		      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+		      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+		      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+		      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+		      FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+		      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+					     f->fs.newvlan == VLAN_REWRITE) |
+		      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+					    f->fs.newvlan == VLAN_REWRITE) |
+		      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+		      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+		      FW_FILTER_WR_PRIO_V(f->fs.prio) |
+		      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
+	fwr->ethtype = htons(f->fs.val.ethtype);
+	fwr->ethtypem = htons(f->fs.mask.ethtype);
+	fwr->frag_to_ovlan_vldm =
+		(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+		 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+		 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+	fwr->smac_sel = 0;
+	fwr->rx_chan_rx_rpl_iq =
+		htons(FW_FILTER_WR_RX_CHAN_V(0) |
+		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+	fwr->maci_to_matchtypem =
+		htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+		      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+		      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+		      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+		      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+		      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+		      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+		      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
+	fwr->ptcl = f->fs.val.proto;
+	fwr->ptclm = f->fs.mask.proto;
+	fwr->ttyp = f->fs.val.tos;
+	fwr->ttypm = f->fs.mask.tos;
+	fwr->ivlan = htons(f->fs.val.ivlan);
+	fwr->ivlanm = htons(f->fs.mask.ivlan);
+	fwr->ovlan = htons(f->fs.val.ovlan);
+	fwr->ovlanm = htons(f->fs.mask.ovlan);
+	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+	fwr->lp = htons(f->fs.val.lport);
+	fwr->lpm = htons(f->fs.mask.lport);
+	fwr->fp = htons(f->fs.val.fport);
+	fwr->fpm = htons(f->fs.mask.fport);
+	if (f->fs.newsmac)
+		memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+	/* Mark the filter as "pending" and ship off the Filter Work Request.
+	 * When we get the Work Request Reply we'll clear the pending status.
+	 */
+	f->pending = 1;
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+	t4_ofld_send(adapter, skb);
+	return 0;
+}
+
+/* Delete the filter at a specified index.
+ */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+	struct sk_buff *skb;
+	struct fw_filter_wr *fwr;
+	unsigned int len, ftid;
+
+	len = sizeof(*fwr);
+	ftid = adapter->tids.ftid_base + fidx;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+	t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
+
+	/* Mark the filter as "pending" and ship off the Filter Work Request.
+	 * When we get the Work Request Reply we'll clear the pending status.
+	 */
+	f->pending = 1;
+	t4_mgmt_tx(adapter, skb);
+	return 0;
+}
+
+static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
+			     void *accel_priv, select_queue_fallback_t fallback)
+{
+	int txq;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+	/* If a Data Center Bridging has been successfully negotiated on this
+	 * link then we'll use the skb's priority to map it to a TX Queue.
+	 * The skb's priority is determined via the VLAN Tag Priority Code
+	 * Point field.
+	 */
+	if (cxgb4_dcb_enabled(dev)) {
+		u16 vlan_tci;
+		int err;
+
+		err = vlan_get_tag(skb, &vlan_tci);
+		if (unlikely(err)) {
+			if (net_ratelimit())
+				netdev_warn(dev,
+					    "TX Packet without VLAN Tag on DCB Link\n");
+			txq = 0;
+		} else {
+			txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+			if (skb->protocol == htons(ETH_P_FCOE))
+				txq = skb->priority & 0x7;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+		}
+		return txq;
+	}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+	if (select_queue) {
+		txq = (skb_rx_queue_recorded(skb)
+			? skb_get_rx_queue(skb)
+			: smp_processor_id());
+
+		while (unlikely(txq >= dev->real_num_tx_queues))
+			txq -= dev->real_num_tx_queues;
+
+		return txq;
+	}
+
+	return fallback(dev, skb) % dev->real_num_tx_queues;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+	int i, delta, match = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+		delta = time - s->timer_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			match = i;
+		}
+	}
+	return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+	int i, delta, match = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+		delta = thres - s->counter_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			match = i;
+		}
+	}
+	return match;
+}
+
+/**
+ *	cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
+ *	@q: the Rx queue
+ *	@us: the hold-off time in us, or 0 to disable timer
+ *	@cnt: the hold-off packet count, or 0 to disable counter
+ *
+ *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
+ *	one of the two needs to be enabled for the queue to generate interrupts.
+ */
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
+			       unsigned int us, unsigned int cnt)
+{
+	struct adapter *adap = q->adap;
+
+	if ((us | cnt) == 0)
+		cnt = 1;
+
+	if (cnt) {
+		int err;
+		u32 v, new_idx;
+
+		new_idx = closest_thres(&adap->sge, cnt);
+		if (q->desc && q->pktcnt_idx != new_idx) {
+			/* the queue has already been created, update it */
+			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+			    FW_PARAMS_PARAM_X_V(
+					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+			    FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
+			err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+					    &v, &new_idx);
+			if (err)
+				return err;
+		}
+		q->pktcnt_idx = new_idx;
+	}
+
+	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
+	q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
+	return 0;
+}
+
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	netdev_features_t changed = dev->features ^ features;
+	int err;
+
+	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
+		return 0;
+
+	err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
+			    -1, -1, -1,
+			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
+	if (unlikely(err))
+		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
+	return err;
+}
+
+static int setup_debugfs(struct adapter *adap)
+{
+	if (IS_ERR_OR_NULL(adap->debugfs_root))
+		return -1;
+
+#ifdef CONFIG_DEBUG_FS
+	t4_setup_debugfs(adap);
+#endif
+	return 0;
+}
+
+/*
+ * upper-layer driver support
+ */
+
+/*
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgb4_alloc_atid(struct tid_info *t, void *data)
+{
+	int atid = -1;
+
+	spin_lock_bh(&t->atid_lock);
+	if (t->afree) {
+		union aopen_entry *p = t->afree;
+
+		atid = (p - t->atid_tab) + t->atid_base;
+		t->afree = p->next;
+		p->data = data;
+		t->atids_in_use++;
+	}
+	spin_unlock_bh(&t->atid_lock);
+	return atid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_atid);
+
+/*
+ * Release an active-open TID.
+ */
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
+{
+	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
+
+	spin_lock_bh(&t->atid_lock);
+	p->next = t->afree;
+	t->afree = p;
+	t->atids_in_use--;
+	spin_unlock_bh(&t->atid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_atid);
+
+/*
+ * Allocate a server TID and set it to the supplied value.
+ */
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
+{
+	int stid;
+
+	spin_lock_bh(&t->stid_lock);
+	if (family == PF_INET) {
+		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
+		if (stid < t->nstids)
+			__set_bit(stid, t->stid_bmap);
+		else
+			stid = -1;
+	} else {
+		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+		if (stid < 0)
+			stid = -1;
+	}
+	if (stid >= 0) {
+		t->stid_tab[stid].data = data;
+		stid += t->stid_base;
+		/* IPv6 requires max of 520 bits or 16 cells in TCAM
+		 * This is equivalent to 4 TIDs. With CLIP enabled it
+		 * needs 2 TIDs.
+		 */
+		if (family == PF_INET)
+			t->stids_in_use++;
+		else
+			t->stids_in_use += 4;
+	}
+	spin_unlock_bh(&t->stid_lock);
+	return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_stid);
+
+/* Allocate a server filter TID and set it to the supplied value.
+ */
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
+{
+	int stid;
+
+	spin_lock_bh(&t->stid_lock);
+	if (family == PF_INET) {
+		stid = find_next_zero_bit(t->stid_bmap,
+				t->nstids + t->nsftids, t->nstids);
+		if (stid < (t->nstids + t->nsftids))
+			__set_bit(stid, t->stid_bmap);
+		else
+			stid = -1;
+	} else {
+		stid = -1;
+	}
+	if (stid >= 0) {
+		t->stid_tab[stid].data = data;
+		stid -= t->nstids;
+		stid += t->sftid_base;
+		t->sftids_in_use++;
+	}
+	spin_unlock_bh(&t->stid_lock);
+	return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_sftid);
+
+/* Release a server TID.
+ */
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
+{
+	/* Is it a server filter TID? */
+	if (t->nsftids && (stid >= t->sftid_base)) {
+		stid -= t->sftid_base;
+		stid += t->nstids;
+	} else {
+		stid -= t->stid_base;
+	}
+
+	spin_lock_bh(&t->stid_lock);
+	if (family == PF_INET)
+		__clear_bit(stid, t->stid_bmap);
+	else
+		bitmap_release_region(t->stid_bmap, stid, 2);
+	t->stid_tab[stid].data = NULL;
+	if (stid < t->nstids) {
+		if (family == PF_INET)
+			t->stids_in_use--;
+		else
+			t->stids_in_use -= 4;
+	} else {
+		t->sftids_in_use--;
+	}
+	spin_unlock_bh(&t->stid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_stid);
+
+/*
+ * Populate a TID_RELEASE WR.  Caller must properly size the skb.
+ */
+static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+			   unsigned int tid)
+{
+	struct cpl_tid_release *req;
+
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+/*
+ * Queue a TID release request and if necessary schedule a work queue to
+ * process it.
+ */
+static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+				    unsigned int tid)
+{
+	void **p = &t->tid_tab[tid];
+	struct adapter *adap = container_of(t, struct adapter, tids);
+
+	spin_lock_bh(&adap->tid_release_lock);
+	*p = adap->tid_release_head;
+	/* Low 2 bits encode the Tx channel number */
+	adap->tid_release_head = (void **)((uintptr_t)p | chan);
+	if (!adap->tid_release_task_busy) {
+		adap->tid_release_task_busy = true;
+		queue_work(adap->workq, &adap->tid_release_task);
+	}
+	spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Process the list of pending TID release requests.
+ */
+static void process_tid_release_list(struct work_struct *work)
+{
+	struct sk_buff *skb;
+	struct adapter *adap;
+
+	adap = container_of(work, struct adapter, tid_release_task);
+
+	spin_lock_bh(&adap->tid_release_lock);
+	while (adap->tid_release_head) {
+		void **p = adap->tid_release_head;
+		unsigned int chan = (uintptr_t)p & 3;
+		p = (void *)p - chan;
+
+		adap->tid_release_head = *p;
+		*p = NULL;
+		spin_unlock_bh(&adap->tid_release_lock);
+
+		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
+					 GFP_KERNEL)))
+			schedule_timeout_uninterruptible(1);
+
+		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
+		t4_ofld_send(adap, skb);
+		spin_lock_bh(&adap->tid_release_lock);
+	}
+	adap->tid_release_task_busy = false;
+	spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Release a TID and inform HW.  If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+{
+	struct sk_buff *skb;
+	struct adapter *adap = container_of(t, struct adapter, tids);
+
+	WARN_ON(tid >= t->ntids);
+
+	if (t->tid_tab[tid]) {
+		t->tid_tab[tid] = NULL;
+		if (t->hash_base && (tid >= t->hash_base))
+			atomic_dec(&t->hash_tids_in_use);
+		else
+			atomic_dec(&t->tids_in_use);
+	}
+
+	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+	if (likely(skb)) {
+		mk_tid_release(skb, chan, tid);
+		t4_ofld_send(adap, skb);
+	} else
+		cxgb4_queue_tid_release(t, chan, tid);
+}
+EXPORT_SYMBOL(cxgb4_remove_tid);
+
+/*
+ * Allocate and initialize the TID tables.  Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+	size_t size;
+	unsigned int stid_bmap_size;
+	unsigned int natids = t->natids;
+	struct adapter *adap = container_of(t, struct adapter, tids);
+
+	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+	size = t->ntids * sizeof(*t->tid_tab) +
+	       natids * sizeof(*t->atid_tab) +
+	       t->nstids * sizeof(*t->stid_tab) +
+	       t->nsftids * sizeof(*t->stid_tab) +
+	       stid_bmap_size * sizeof(long) +
+	       t->nftids * sizeof(*t->ftid_tab) +
+	       t->nsftids * sizeof(*t->ftid_tab);
+
+	t->tid_tab = t4_alloc_mem(size);
+	if (!t->tid_tab)
+		return -ENOMEM;
+
+	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
+	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
+	t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+	spin_lock_init(&t->stid_lock);
+	spin_lock_init(&t->atid_lock);
+
+	t->stids_in_use = 0;
+	t->sftids_in_use = 0;
+	t->afree = NULL;
+	t->atids_in_use = 0;
+	atomic_set(&t->tids_in_use, 0);
+	atomic_set(&t->hash_tids_in_use, 0);
+
+	/* Setup the free list for atid_tab and clear the stid bitmap. */
+	if (natids) {
+		while (--natids)
+			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+		t->afree = t->atid_tab;
+	}
+	bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+	/* Reserve stid 0 for T4/T5 adapters */
+	if (!t->stid_base &&
+	    (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
+		__set_bit(0, t->stid_bmap);
+
+	return 0;
+}
+
+/**
+ *	cxgb4_create_server - create an IP server
+ *	@dev: the device
+ *	@stid: the server TID
+ *	@sip: local IP address to bind server to
+ *	@sport: the server's TCP port
+ *	@queue: queue to direct messages from this server to
+ *
+ *	Create an IP server for the given port and address.
+ *	Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+			__be32 sip, __be16 sport, __be16 vlan,
+			unsigned int queue)
+{
+	unsigned int chan;
+	struct sk_buff *skb;
+	struct adapter *adap;
+	struct cpl_pass_open_req *req;
+	int ret;
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	adap = netdev2adap(dev);
+	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+	req->local_port = sport;
+	req->peer_port = htons(0);
+	req->local_ip = sip;
+	req->peer_ip = htonl(0);
+	chan = rxq_to_chan(&adap->sge, queue);
+	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
+	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
+	ret = t4_mgmt_tx(adap, skb);
+	return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_create_server);
+
+/*	cxgb4_create_server6 - create an IPv6 server
+ *	@dev: the device
+ *	@stid: the server TID
+ *	@sip: local IPv6 address to bind server to
+ *	@sport: the server's TCP port
+ *	@queue: queue to direct messages from this server to
+ *
+ *	Create an IPv6 server for the given port and address.
+ *	Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+			 const struct in6_addr *sip, __be16 sport,
+			 unsigned int queue)
+{
+	unsigned int chan;
+	struct sk_buff *skb;
+	struct adapter *adap;
+	struct cpl_pass_open_req6 *req;
+	int ret;
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	adap = netdev2adap(dev);
+	req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+	req->local_port = sport;
+	req->peer_port = htons(0);
+	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+	req->peer_ip_hi = cpu_to_be64(0);
+	req->peer_ip_lo = cpu_to_be64(0);
+	chan = rxq_to_chan(&adap->sge, queue);
+	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
+	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
+	ret = t4_mgmt_tx(adap, skb);
+	return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_create_server6);
+
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+			unsigned int queue, bool ipv6)
+{
+	struct sk_buff *skb;
+	struct adapter *adap;
+	struct cpl_close_listsvr_req *req;
+	int ret;
+
+	adap = netdev2adap(dev);
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
+				LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
+	ret = t4_mgmt_tx(adap, skb);
+	return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_remove_server);
+
+/**
+ *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
+ *	@mtus: the HW MTU table
+ *	@mtu: the target MTU
+ *	@idx: index of selected entry in the MTU table
+ *
+ *	Returns the index and the value in the HW MTU table that is closest to
+ *	but does not exceed @mtu, unless @mtu is smaller than any value in the
+ *	table, in which case that smallest available value is selected.
+ */
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+			    unsigned int *idx)
+{
+	unsigned int i = 0;
+
+	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
+		++i;
+	if (idx)
+		*idx = i;
+	return mtus[i];
+}
+EXPORT_SYMBOL(cxgb4_best_mtu);
+
+/**
+ *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
+ *     @mtus: the HW MTU table
+ *     @header_size: Header Size
+ *     @data_size_max: maximum Data Segment Size
+ *     @data_size_align: desired Data Segment Size Alignment (2^N)
+ *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
+ *
+ *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
+ *     MTU Table based solely on a Maximum MTU parameter, we break that
+ *     parameter up into a Header Size and Maximum Data Segment Size, and
+ *     provide a desired Data Segment Size Alignment.  If we find an MTU in
+ *     the Hardware MTU Table which will result in a Data Segment Size with
+ *     the requested alignment _and_ that MTU isn't "too far" from the
+ *     closest MTU, then we'll return that rather than the closest MTU.
+ */
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+				    unsigned short header_size,
+				    unsigned short data_size_max,
+				    unsigned short data_size_align,
+				    unsigned int *mtu_idxp)
+{
+	unsigned short max_mtu = header_size + data_size_max;
+	unsigned short data_size_align_mask = data_size_align - 1;
+	int mtu_idx, aligned_mtu_idx;
+
+	/* Scan the MTU Table till we find an MTU which is larger than our
+	 * Maximum MTU or we reach the end of the table.  Along the way,
+	 * record the last MTU found, if any, which will result in a Data
+	 * Segment Length matching the requested alignment.
+	 */
+	for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
+		unsigned short data_size = mtus[mtu_idx] - header_size;
+
+		/* If this MTU minus the Header Size would result in a
+		 * Data Segment Size of the desired alignment, remember it.
+		 */
+		if ((data_size & data_size_align_mask) == 0)
+			aligned_mtu_idx = mtu_idx;
+
+		/* If we're not at the end of the Hardware MTU Table and the
+		 * next element is larger than our Maximum MTU, drop out of
+		 * the loop.
+		 */
+		if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
+			break;
+	}
+
+	/* If we fell out of the loop because we ran to the end of the table,
+	 * then we just have to use the last [largest] entry.
+	 */
+	if (mtu_idx == NMTUS)
+		mtu_idx--;
+
+	/* If we found an MTU which resulted in the requested Data Segment
+	 * Length alignment and that's "not far" from the largest MTU which is
+	 * less than or equal to the maximum MTU, then use that.
+	 */
+	if (aligned_mtu_idx >= 0 &&
+	    mtu_idx - aligned_mtu_idx <= 1)
+		mtu_idx = aligned_mtu_idx;
+
+	/* If the caller has passed in an MTU Index pointer, pass the
+	 * MTU Index back.  Return the MTU value.
+	 */
+	if (mtu_idxp)
+		*mtu_idxp = mtu_idx;
+	return mtus[mtu_idx];
+}
+EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
+
+/**
+ *	cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
+ *	@chip: chip type
+ *	@viid: VI id of the given port
+ *
+ *	Return the SMT index for this VI.
+ */
+unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
+{
+	/* In T4/T5, SMT contains 256 SMAC entries organized in
+	 * 128 rows of 2 entries each.
+	 * In T6, SMT contains 256 SMAC entries in 256 rows.
+	 * TODO: The below code needs to be updated when we add support
+	 * for 256 VFs.
+	 */
+	if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+		return ((viid & 0x7f) << 1);
+	else
+		return (viid & 0x7f);
+}
+EXPORT_SYMBOL(cxgb4_tp_smt_idx);
+
+/**
+ *	cxgb4_port_chan - get the HW channel of a port
+ *	@dev: the net device for the port
+ *
+ *	Return the HW Tx channel of the given port.
+ */
+unsigned int cxgb4_port_chan(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->tx_chan;
+}
+EXPORT_SYMBOL(cxgb4_port_chan);
+
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
+{
+	struct adapter *adap = netdev2adap(dev);
+	u32 v1, v2, lp_count, hp_count;
+
+	v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
+	if (is_t4(adap->params.chip)) {
+		lp_count = LP_COUNT_G(v1);
+		hp_count = HP_COUNT_G(v1);
+	} else {
+		lp_count = LP_COUNT_T5_G(v1);
+		hp_count = HP_COUNT_T5_G(v2);
+	}
+	return lpfifo ? lp_count : hp_count;
+}
+EXPORT_SYMBOL(cxgb4_dbfifo_count);
+
+/**
+ *	cxgb4_port_viid - get the VI id of a port
+ *	@dev: the net device for the port
+ *
+ *	Return the VI id of the given port.
+ */
+unsigned int cxgb4_port_viid(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->viid;
+}
+EXPORT_SYMBOL(cxgb4_port_viid);
+
+/**
+ *	cxgb4_port_idx - get the index of a port
+ *	@dev: the net device for the port
+ *
+ *	Return the index of the given port.
+ */
+unsigned int cxgb4_port_idx(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->port_id;
+}
+EXPORT_SYMBOL(cxgb4_port_idx);
+
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6)
+{
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	spin_lock(&adap->stats_lock);
+	t4_tp_get_tcp_stats(adap, v4, v6);
+	spin_unlock(&adap->stats_lock);
+}
+EXPORT_SYMBOL(cxgb4_get_tcp_stats);
+
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+		      const unsigned int *pgsz_order)
+{
+	struct adapter *adap = netdev2adap(dev);
+
+	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
+	t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
+		     HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
+		     HPZ3_V(pgsz_order[3]));
+}
+EXPORT_SYMBOL(cxgb4_iscsi_init);
+
+int cxgb4_flush_eq_cache(struct net_device *dev)
+{
+	struct adapter *adap = netdev2adap(dev);
+
+	return t4_sge_ctxt_flush(adap, adap->mbox);
+}
+EXPORT_SYMBOL(cxgb4_flush_eq_cache);
+
+static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
+{
+	u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
+	__be64 indices;
+	int ret;
+
+	spin_lock(&adap->win0_lock);
+	ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
+			   sizeof(indices), (__be32 *)&indices,
+			   T4_MEMORY_READ);
+	spin_unlock(&adap->win0_lock);
+	if (!ret) {
+		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
+		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
+	}
+	return ret;
+}
+
+int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
+			u16 size)
+{
+	struct adapter *adap = netdev2adap(dev);
+	u16 hw_pidx, hw_cidx;
+	int ret;
+
+	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
+	if (ret)
+		goto out;
+
+	if (pidx != hw_pidx) {
+		u16 delta;
+		u32 val;
+
+		if (pidx >= hw_pidx)
+			delta = pidx - hw_pidx;
+		else
+			delta = size - hw_pidx + pidx;
+
+		if (is_t4(adap->params.chip))
+			val = PIDX_V(delta);
+		else
+			val = PIDX_T5_V(delta);
+		wmb();
+		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+			     QID_V(qid) | val);
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+
+int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
+{
+	struct adapter *adap;
+	u32 offset, memtype, memaddr;
+	u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
+	u32 edc0_end, edc1_end, mc0_end, mc1_end;
+	int ret;
+
+	adap = netdev2adap(dev);
+
+	offset = ((stag >> 8) * 32) + adap->vres.stag.start;
+
+	/* Figure out where the offset lands in the Memory Type/Address scheme.
+	 * This code assumes that the memory is laid out starting at offset 0
+	 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
+	 * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
+	 * MC0, and some have both MC0 and MC1.
+	 */
+	size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+	edc0_size = EDRAM0_SIZE_G(size) << 20;
+	size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+	edc1_size = EDRAM1_SIZE_G(size) << 20;
+	size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+	mc0_size = EXT_MEM0_SIZE_G(size) << 20;
+
+	edc0_end = edc0_size;
+	edc1_end = edc0_end + edc1_size;
+	mc0_end = edc1_end + mc0_size;
+
+	if (offset < edc0_end) {
+		memtype = MEM_EDC0;
+		memaddr = offset;
+	} else if (offset < edc1_end) {
+		memtype = MEM_EDC1;
+		memaddr = offset - edc0_end;
+	} else {
+		if (offset < mc0_end) {
+			memtype = MEM_MC0;
+			memaddr = offset - edc1_end;
+		} else if (is_t5(adap->params.chip)) {
+			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			mc1_size = EXT_MEM1_SIZE_G(size) << 20;
+			mc1_end = mc0_end + mc1_size;
+			if (offset < mc1_end) {
+				memtype = MEM_MC1;
+				memaddr = offset - mc0_end;
+			} else {
+				/* offset beyond the end of any memory */
+				goto err;
+			}
+		} else {
+			/* T4/T6 only has a single memory channel */
+			goto err;
+		}
+	}
+
+	spin_lock(&adap->win0_lock);
+	ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
+	spin_unlock(&adap->win0_lock);
+	return ret;
+
+err:
+	dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
+		stag, offset);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(cxgb4_read_tpte);
+
+u64 cxgb4_read_sge_timestamp(struct net_device *dev)
+{
+	u32 hi, lo;
+	struct adapter *adap;
+
+	adap = netdev2adap(dev);
+	lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
+	hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
+
+	return ((u64)hi << 32) | (u64)lo;
+}
+EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
+
+int cxgb4_bar2_sge_qregs(struct net_device *dev,
+			 unsigned int qid,
+			 enum cxgb4_bar2_qtype qtype,
+			 int user,
+			 u64 *pbar2_qoffset,
+			 unsigned int *pbar2_qid)
+{
+	return t4_bar2_sge_qregs(netdev2adap(dev),
+				 qid,
+				 (qtype == CXGB4_BAR2_QTYPE_EGRESS
+				  ? T4_BAR2_QTYPE_EGRESS
+				  : T4_BAR2_QTYPE_INGRESS),
+				 user,
+				 pbar2_qoffset,
+				 pbar2_qid);
+}
+EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
+
+static struct pci_driver cxgb4_driver;
+
+static void check_neigh_update(struct neighbour *neigh)
+{
+	const struct device *parent;
+	const struct net_device *netdev = neigh->dev;
+
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		netdev = vlan_dev_real_dev(netdev);
+	parent = netdev->dev.parent;
+	if (parent && parent->driver == &cxgb4_driver.driver)
+		t4_l2t_update(dev_get_drvdata(parent), neigh);
+}
+
+static int netevent_cb(struct notifier_block *nb, unsigned long event,
+		       void *data)
+{
+	switch (event) {
+	case NETEVENT_NEIGH_UPDATE:
+		check_neigh_update(data);
+		break;
+	case NETEVENT_REDIRECT:
+	default:
+		break;
+	}
+	return 0;
+}
+
+static bool netevent_registered;
+static struct notifier_block cxgb4_netevent_nb = {
+	.notifier_call = netevent_cb
+};
+
+static void drain_db_fifo(struct adapter *adap, int usecs)
+{
+	u32 v1, v2, lp_count, hp_count;
+
+	do {
+		v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
+		if (is_t4(adap->params.chip)) {
+			lp_count = LP_COUNT_G(v1);
+			hp_count = HP_COUNT_G(v1);
+		} else {
+			lp_count = LP_COUNT_T5_G(v1);
+			hp_count = HP_COUNT_T5_G(v2);
+		}
+
+		if (lp_count == 0 && hp_count == 0)
+			break;
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(usecs_to_jiffies(usecs));
+	} while (1);
+}
+
+static void disable_txq_db(struct sge_txq *q)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->db_lock, flags);
+	q->db_disabled = 1;
+	spin_unlock_irqrestore(&q->db_lock, flags);
+}
+
+static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
+{
+	spin_lock_irq(&q->db_lock);
+	if (q->db_pidx_inc) {
+		/* Make sure that all writes to the TX descriptors
+		 * are committed before we tell HW about them.
+		 */
+		wmb();
+		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+			     QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
+		q->db_pidx_inc = 0;
+	}
+	q->db_disabled = 0;
+	spin_unlock_irq(&q->db_lock);
+}
+
+static void disable_dbs(struct adapter *adap)
+{
+	int i;
+
+	for_each_ethrxq(&adap->sge, i)
+		disable_txq_db(&adap->sge.ethtxq[i].q);
+	for_each_ofldrxq(&adap->sge, i)
+		disable_txq_db(&adap->sge.ofldtxq[i].q);
+	for_each_port(adap, i)
+		disable_txq_db(&adap->sge.ctrlq[i].q);
+}
+
+static void enable_dbs(struct adapter *adap)
+{
+	int i;
+
+	for_each_ethrxq(&adap->sge, i)
+		enable_txq_db(adap, &adap->sge.ethtxq[i].q);
+	for_each_ofldrxq(&adap->sge, i)
+		enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
+	for_each_port(adap, i)
+		enable_txq_db(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+	if (adap->uld_handle[CXGB4_ULD_RDMA])
+		ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+				cmd);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+	struct adapter *adap;
+
+	adap = container_of(work, struct adapter, db_full_task);
+
+	drain_db_fifo(adap, dbfifo_drain_delay);
+	enable_dbs(adap);
+	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+	else
+		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+				 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
+}
+
+static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
+{
+	u16 hw_pidx, hw_cidx;
+	int ret;
+
+	spin_lock_irq(&q->db_lock);
+	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
+	if (ret)
+		goto out;
+	if (q->db_pidx != hw_pidx) {
+		u16 delta;
+		u32 val;
+
+		if (q->db_pidx >= hw_pidx)
+			delta = q->db_pidx - hw_pidx;
+		else
+			delta = q->size - hw_pidx + q->db_pidx;
+
+		if (is_t4(adap->params.chip))
+			val = PIDX_V(delta);
+		else
+			val = PIDX_T5_V(delta);
+		wmb();
+		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+			     QID_V(q->cntxt_id) | val);
+	}
+out:
+	q->db_disabled = 0;
+	q->db_pidx_inc = 0;
+	spin_unlock_irq(&q->db_lock);
+	if (ret)
+		CH_WARN(adap, "DB drop recovery failed.\n");
+}
+static void recover_all_queues(struct adapter *adap)
+{
+	int i;
+
+	for_each_ethrxq(&adap->sge, i)
+		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
+	for_each_ofldrxq(&adap->sge, i)
+		sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
+	for_each_port(adap, i)
+		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void process_db_drop(struct work_struct *work)
+{
+	struct adapter *adap;
+
+	adap = container_of(work, struct adapter, db_drop_task);
+
+	if (is_t4(adap->params.chip)) {
+		drain_db_fifo(adap, dbfifo_drain_delay);
+		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+		drain_db_fifo(adap, dbfifo_drain_delay);
+		recover_all_queues(adap);
+		drain_db_fifo(adap, dbfifo_drain_delay);
+		enable_dbs(adap);
+		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+	} else if (is_t5(adap->params.chip)) {
+		u32 dropped_db = t4_read_reg(adap, 0x010ac);
+		u16 qid = (dropped_db >> 15) & 0x1ffff;
+		u16 pidx_inc = dropped_db & 0x1fff;
+		u64 bar2_qoffset;
+		unsigned int bar2_qid;
+		int ret;
+
+		ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+					0, &bar2_qoffset, &bar2_qid);
+		if (ret)
+			dev_err(adap->pdev_dev, "doorbell drop recovery: "
+				"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
+		else
+			writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
+			       adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
+
+		/* Re-enable BAR2 WC */
+		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
+	}
+
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+}
+
+void t4_db_full(struct adapter *adap)
+{
+	if (is_t4(adap->params.chip)) {
+		disable_dbs(adap);
+		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
+		queue_work(adap->workq, &adap->db_full_task);
+	}
+}
+
+void t4_db_dropped(struct adapter *adap)
+{
+	if (is_t4(adap->params.chip)) {
+		disable_dbs(adap);
+		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+	}
+	queue_work(adap->workq, &adap->db_drop_task);
+}
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+	void *handle;
+	struct cxgb4_lld_info lli;
+	unsigned short i;
+
+	lli.pdev = adap->pdev;
+	lli.pf = adap->pf;
+	lli.l2t = adap->l2t;
+	lli.tids = &adap->tids;
+	lli.ports = adap->port;
+	lli.vr = &adap->vres;
+	lli.mtus = adap->params.mtus;
+	if (uld == CXGB4_ULD_RDMA) {
+		lli.rxq_ids = adap->sge.rdma_rxq;
+		lli.ciq_ids = adap->sge.rdma_ciq;
+		lli.nrxq = adap->sge.rdmaqs;
+		lli.nciq = adap->sge.rdmaciqs;
+	} else if (uld == CXGB4_ULD_ISCSI) {
+		lli.rxq_ids = adap->sge.ofld_rxq;
+		lli.nrxq = adap->sge.ofldqsets;
+	}
+	lli.ntxq = adap->sge.ofldqsets;
+	lli.nchan = adap->params.nports;
+	lli.nports = adap->params.nports;
+	lli.wr_cred = adap->params.ofldq_wr_cred;
+	lli.adapter_type = adap->params.chip;
+	lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+	lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
+	lli.udb_density = 1 << adap->params.sge.eq_qpp;
+	lli.ucq_density = 1 << adap->params.sge.iq_qpp;
+	lli.filt_mode = adap->params.tp.vlan_pri_map;
+	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+	for (i = 0; i < NCHAN; i++)
+		lli.tx_modq[i] = i;
+	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
+	lli.fw_vers = adap->params.fw_vers;
+	lli.dbfifo_int_thresh = dbfifo_int_thresh;
+	lli.sge_ingpadboundary = adap->sge.fl_align;
+	lli.sge_egrstatuspagesize = adap->sge.stat_len;
+	lli.sge_pktshift = adap->sge.pktshift;
+	lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+	lli.max_ordird_qp = adap->params.max_ordird_qp;
+	lli.max_ird_adapter = adap->params.max_ird_adapter;
+	lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+	lli.nodeid = dev_to_node(adap->pdev_dev);
+
+	handle = ulds[uld].add(&lli);
+	if (IS_ERR(handle)) {
+		dev_warn(adap->pdev_dev,
+			 "could not attach to the %s driver, error %ld\n",
+			 uld_str[uld], PTR_ERR(handle));
+		return;
+	}
+
+	adap->uld_handle[uld] = handle;
+
+	if (!netevent_registered) {
+		register_netevent_notifier(&cxgb4_netevent_nb);
+		netevent_registered = true;
+	}
+
+	if (adap->flags & FULL_INIT_DONE)
+		ulds[uld].state_change(handle, CXGB4_STATE_UP);
+}
+
+static void attach_ulds(struct adapter *adap)
+{
+	unsigned int i;
+
+	spin_lock(&adap_rcu_lock);
+	list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
+	spin_unlock(&adap_rcu_lock);
+
+	mutex_lock(&uld_mutex);
+	list_add_tail(&adap->list_node, &adapter_list);
+	for (i = 0; i < CXGB4_ULD_MAX; i++)
+		if (ulds[i].add)
+			uld_attach(adap, i);
+	mutex_unlock(&uld_mutex);
+}
+
+static void detach_ulds(struct adapter *adap)
+{
+	unsigned int i;
+
+	mutex_lock(&uld_mutex);
+	list_del(&adap->list_node);
+	for (i = 0; i < CXGB4_ULD_MAX; i++)
+		if (adap->uld_handle[i]) {
+			ulds[i].state_change(adap->uld_handle[i],
+					     CXGB4_STATE_DETACH);
+			adap->uld_handle[i] = NULL;
+		}
+	if (netevent_registered && list_empty(&adapter_list)) {
+		unregister_netevent_notifier(&cxgb4_netevent_nb);
+		netevent_registered = false;
+	}
+	mutex_unlock(&uld_mutex);
+
+	spin_lock(&adap_rcu_lock);
+	list_del_rcu(&adap->rcu_node);
+	spin_unlock(&adap_rcu_lock);
+}
+
+static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
+{
+	unsigned int i;
+
+	mutex_lock(&uld_mutex);
+	for (i = 0; i < CXGB4_ULD_MAX; i++)
+		if (adap->uld_handle[i])
+			ulds[i].state_change(adap->uld_handle[i], new_state);
+	mutex_unlock(&uld_mutex);
+}
+
+/**
+ *	cxgb4_register_uld - register an upper-layer driver
+ *	@type: the ULD type
+ *	@p: the ULD methods
+ *
+ *	Registers an upper-layer driver with this driver and notifies the ULD
+ *	about any presently available devices that support its type.  Returns
+ *	%-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
+{
+	int ret = 0;
+	struct adapter *adap;
+
+	if (type >= CXGB4_ULD_MAX)
+		return -EINVAL;
+	mutex_lock(&uld_mutex);
+	if (ulds[type].add) {
+		ret = -EBUSY;
+		goto out;
+	}
+	ulds[type] = *p;
+	list_for_each_entry(adap, &adapter_list, list_node)
+		uld_attach(adap, type);
+out:	mutex_unlock(&uld_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ *	cxgb4_unregister_uld - unregister an upper-layer driver
+ *	@type: the ULD type
+ *
+ *	Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+	struct adapter *adap;
+
+	if (type >= CXGB4_ULD_MAX)
+		return -EINVAL;
+	mutex_lock(&uld_mutex);
+	list_for_each_entry(adap, &adapter_list, list_node)
+		adap->uld_handle[type] = NULL;
+	ulds[type].add = NULL;
+	mutex_unlock(&uld_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int cxgb4_inet6addr_handler(struct notifier_block *this,
+				   unsigned long event, void *data)
+{
+	struct inet6_ifaddr *ifa = data;
+	struct net_device *event_dev = ifa->idev->dev;
+	const struct device *parent = NULL;
+#if IS_ENABLED(CONFIG_BONDING)
+	struct adapter *adap;
+#endif
+	if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+		event_dev = vlan_dev_real_dev(event_dev);
+#if IS_ENABLED(CONFIG_BONDING)
+	if (event_dev->flags & IFF_MASTER) {
+		list_for_each_entry(adap, &adapter_list, list_node) {
+			switch (event) {
+			case NETDEV_UP:
+				cxgb4_clip_get(adap->port[0],
+					       (const u32 *)ifa, 1);
+				break;
+			case NETDEV_DOWN:
+				cxgb4_clip_release(adap->port[0],
+						   (const u32 *)ifa, 1);
+				break;
+			default:
+				break;
+			}
+		}
+		return NOTIFY_OK;
+	}
+#endif
+
+	if (event_dev)
+		parent = event_dev->dev.parent;
+
+	if (parent && parent->driver == &cxgb4_driver.driver) {
+		switch (event) {
+		case NETDEV_UP:
+			cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
+			break;
+		case NETDEV_DOWN:
+			cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
+			break;
+		default:
+			break;
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static bool inet6addr_registered;
+static struct notifier_block cxgb4_inet6addr_notifier = {
+	.notifier_call = cxgb4_inet6addr_handler
+};
+
+static void update_clip(const struct adapter *adap)
+{
+	int i;
+	struct net_device *dev;
+	int ret;
+
+	rcu_read_lock();
+
+	for (i = 0; i < MAX_NPORTS; i++) {
+		dev = adap->port[i];
+		ret = 0;
+
+		if (dev)
+			ret = cxgb4_update_root_dev_clip(dev);
+
+		if (ret < 0)
+			break;
+	}
+	rcu_read_unlock();
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+/**
+ *	cxgb_up - enable the adapter
+ *	@adap: adapter being enabled
+ *
+ *	Called when the first port is enabled, this function performs the
+ *	actions necessary to make an adapter operational, such as completing
+ *	the initialization of HW modules, and enabling interrupts.
+ *
+ *	Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+	int err;
+
+	err = setup_sge_queues(adap);
+	if (err)
+		goto out;
+	err = setup_rss(adap);
+	if (err)
+		goto freeq;
+
+	if (adap->flags & USING_MSIX) {
+		name_msix_vecs(adap);
+		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+				  adap->msix_info[0].desc, adap);
+		if (err)
+			goto irq_err;
+
+		err = request_msix_queue_irqs(adap);
+		if (err) {
+			free_irq(adap->msix_info[0].vec, adap);
+			goto irq_err;
+		}
+	} else {
+		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+				  adap->port[0]->name, adap);
+		if (err)
+			goto irq_err;
+	}
+
+	mutex_lock(&uld_mutex);
+	enable_rx(adap);
+	t4_sge_start(adap);
+	t4_intr_enable(adap);
+	adap->flags |= FULL_INIT_DONE;
+	mutex_unlock(&uld_mutex);
+
+	notify_ulds(adap, CXGB4_STATE_UP);
+#if IS_ENABLED(CONFIG_IPV6)
+	update_clip(adap);
+#endif
+ out:
+	return err;
+ irq_err:
+	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ freeq:
+	t4_free_sge_resources(adap);
+	goto out;
+}
+
+static void cxgb_down(struct adapter *adapter)
+{
+	cancel_work_sync(&adapter->tid_release_task);
+	cancel_work_sync(&adapter->db_full_task);
+	cancel_work_sync(&adapter->db_drop_task);
+	adapter->tid_release_task_busy = false;
+	adapter->tid_release_head = NULL;
+
+	t4_sge_stop(adapter);
+	t4_free_sge_resources(adapter);
+	adapter->flags &= ~FULL_INIT_DONE;
+}
+
+/*
+ * net_device operations
+ */
+static int cxgb_open(struct net_device *dev)
+{
+	int err;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	netif_carrier_off(dev);
+
+	if (!(adapter->flags & FULL_INIT_DONE)) {
+		err = cxgb_up(adapter);
+		if (err < 0)
+			return err;
+	}
+
+	err = link_start(dev);
+	if (!err)
+		netif_tx_start_all_queues(dev);
+	return err;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	netif_tx_stop_all_queues(dev);
+	netif_carrier_off(dev);
+	return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
+}
+
+/* Return an error number if the indicated filter isn't writable ...
+ */
+static int writable_filter(struct filter_entry *f)
+{
+	if (f->locked)
+		return -EPERM;
+	if (f->pending)
+		return -EBUSY;
+
+	return 0;
+}
+
+/* Delete the filter at the specified index (if valid).  The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+static int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+	struct filter_entry *f;
+	int ret;
+
+	if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+		return -EINVAL;
+
+	f = &adapter->tids.ftid_tab[fidx];
+	ret = writable_filter(f);
+	if (ret)
+		return ret;
+	if (f->valid)
+		return del_filter_wr(adapter, fidx);
+
+	return 0;
+}
+
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+		__be32 sip, __be16 sport, __be16 vlan,
+		unsigned int queue, unsigned char port, unsigned char mask)
+{
+	int ret;
+	struct filter_entry *f;
+	struct adapter *adap;
+	int i;
+	u8 *val;
+
+	adap = netdev2adap(dev);
+
+	/* Adjust stid to correct filter index */
+	stid -= adap->tids.sftid_base;
+	stid += adap->tids.nftids;
+
+	/* Check to make sure the filter requested is writable ...
+	 */
+	f = &adap->tids.ftid_tab[stid];
+	ret = writable_filter(f);
+	if (ret)
+		return ret;
+
+	/* Clear out any old resources being used by the filter before
+	 * we start constructing the new filter.
+	 */
+	if (f->valid)
+		clear_filter(adap, f);
+
+	/* Clear out filter specifications */
+	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
+	f->fs.val.lport = cpu_to_be16(sport);
+	f->fs.mask.lport  = ~0;
+	val = (u8 *)&sip;
+	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
+		for (i = 0; i < 4; i++) {
+			f->fs.val.lip[i] = val[i];
+			f->fs.mask.lip[i] = ~0;
+		}
+		if (adap->params.tp.vlan_pri_map & PORT_F) {
+			f->fs.val.iport = port;
+			f->fs.mask.iport = mask;
+		}
+	}
+
+	if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
+		f->fs.val.proto = IPPROTO_TCP;
+		f->fs.mask.proto = ~0;
+	}
+
+	f->fs.dirsteer = 1;
+	f->fs.iq = queue;
+	/* Mark filter as locked */
+	f->locked = 1;
+	f->fs.rpttid = 1;
+
+	ret = set_filter_wr(adap, stid);
+	if (ret) {
+		clear_filter(adap, f);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_create_server_filter);
+
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+		unsigned int queue, bool ipv6)
+{
+	int ret;
+	struct filter_entry *f;
+	struct adapter *adap;
+
+	adap = netdev2adap(dev);
+
+	/* Adjust stid to correct filter index */
+	stid -= adap->tids.sftid_base;
+	stid += adap->tids.nftids;
+
+	f = &adap->tids.ftid_tab[stid];
+	/* Unlock the filter */
+	f->locked = 0;
+
+	ret = delete_filter(adap, stid);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_remove_server_filter);
+
+static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
+						struct rtnl_link_stats64 *ns)
+{
+	struct port_stats stats;
+	struct port_info *p = netdev_priv(dev);
+	struct adapter *adapter = p->adapter;
+
+	/* Block retrieving statistics during EEH error
+	 * recovery. Otherwise, the recovery might fail
+	 * and the PCI device will be removed permanently
+	 */
+	spin_lock(&adapter->stats_lock);
+	if (!netif_device_present(dev)) {
+		spin_unlock(&adapter->stats_lock);
+		return ns;
+	}
+	t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+				 &p->stats_base);
+	spin_unlock(&adapter->stats_lock);
+
+	ns->tx_bytes   = stats.tx_octets;
+	ns->tx_packets = stats.tx_frames;
+	ns->rx_bytes   = stats.rx_octets;
+	ns->rx_packets = stats.rx_frames;
+	ns->multicast  = stats.rx_mcast_frames;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
+			       stats.rx_runt;
+	ns->rx_over_errors   = 0;
+	ns->rx_crc_errors    = stats.rx_fcs_err;
+	ns->rx_frame_errors  = stats.rx_symbol_err;
+	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
+			       stats.rx_ovflow2 + stats.rx_ovflow3 +
+			       stats.rx_trunc0 + stats.rx_trunc1 +
+			       stats.rx_trunc2 + stats.rx_trunc3;
+	ns->rx_missed_errors = 0;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors   = 0;
+	ns->tx_carrier_errors   = 0;
+	ns->tx_fifo_errors      = 0;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors    = 0;
+
+	ns->tx_errors = stats.tx_error_frames;
+	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+	return ns;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	unsigned int mbox;
+	int ret = 0, prtad, devad;
+	struct port_info *pi = netdev_priv(dev);
+	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		if (pi->mdio_addr < 0)
+			return -EOPNOTSUPP;
+		data->phy_id = pi->mdio_addr;
+		break;
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		if (mdio_phy_id_is_c45(data->phy_id)) {
+			prtad = mdio_phy_id_prtad(data->phy_id);
+			devad = mdio_phy_id_devad(data->phy_id);
+		} else if (data->phy_id < 32) {
+			prtad = data->phy_id;
+			devad = 0;
+			data->reg_num &= 0x1f;
+		} else
+			return -EINVAL;
+
+		mbox = pi->adapter->pf;
+		if (cmd == SIOCGMIIREG)
+			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
+					 data->reg_num, &data->val_out);
+		else
+			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
+					 data->reg_num, data->val_in);
+		break;
+	case SIOCGHWTSTAMP:
+		return copy_to_user(req->ifr_data, &pi->tstamp_config,
+				    sizeof(pi->tstamp_config)) ?
+			-EFAULT : 0;
+	case SIOCSHWTSTAMP:
+		if (copy_from_user(&pi->tstamp_config, req->ifr_data,
+				   sizeof(pi->tstamp_config)))
+			return -EFAULT;
+
+		switch (pi->tstamp_config.rx_filter) {
+		case HWTSTAMP_FILTER_NONE:
+			pi->rxtstamp = false;
+			break;
+		case HWTSTAMP_FILTER_ALL:
+			pi->rxtstamp = true;
+			break;
+		default:
+			pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+			return -ERANGE;
+		}
+
+		return copy_to_user(req->ifr_data, &pi->tstamp_config,
+				    sizeof(pi->tstamp_config)) ?
+			-EFAULT : 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+	/* unfortunately we can't return errors to the stack */
+	set_rxmode(dev, -1, false);
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
+		return -EINVAL;
+	ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
+			    -1, -1, -1, true);
+	if (!ret)
+		dev->mtu = new_mtu;
+	return ret;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+	int ret;
+	struct sockaddr *addr = p;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
+			    pi->xact_addr_filt, addr->sa_data, true, true);
+	if (ret < 0)
+		return ret;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	pi->xact_addr_filt = ret;
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+
+	if (adap->flags & USING_MSIX) {
+		int i;
+		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+
+		for (i = pi->nqsets; i; i--, rx++)
+			t4_sge_intr_msix(0, &rx->rspq);
+	} else
+		t4_intr_handler(adap)(0, adap);
+}
+#endif
+
+static const struct net_device_ops cxgb4_netdev_ops = {
+	.ndo_open             = cxgb_open,
+	.ndo_stop             = cxgb_close,
+	.ndo_start_xmit       = t4_eth_xmit,
+	.ndo_select_queue     =	cxgb_select_queue,
+	.ndo_get_stats64      = cxgb_get_stats,
+	.ndo_set_rx_mode      = cxgb_set_rxmode,
+	.ndo_set_mac_address  = cxgb_set_mac_addr,
+	.ndo_set_features     = cxgb_set_features,
+	.ndo_validate_addr    = eth_validate_addr,
+	.ndo_do_ioctl         = cxgb_ioctl,
+	.ndo_change_mtu       = cxgb_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller  = cxgb_netpoll,
+#endif
+#ifdef CONFIG_CHELSIO_T4_FCOE
+	.ndo_fcoe_enable      = cxgb_fcoe_enable,
+	.ndo_fcoe_disable     = cxgb_fcoe_disable,
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll        = cxgb_busy_poll,
+#endif
+
+};
+
+void t4_fatal_err(struct adapter *adap)
+{
+	t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
+	t4_intr_disable(adap);
+	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+	u32 nic_win_base = t4_get_util_window(adap);
+
+	t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
+}
+
+static void setup_memwin_rdma(struct adapter *adap)
+{
+	if (adap->vres.ocq.size) {
+		u32 start;
+		unsigned int sz_kb;
+
+		start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
+		start &= PCI_BASE_ADDRESS_MEM_MASK;
+		start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
+		t4_write_reg(adap,
+			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
+			     start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
+		t4_write_reg(adap,
+			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
+			     adap->vres.ocq.start);
+		t4_read_reg(adap,
+			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
+	}
+}
+
+static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
+{
+	u32 v;
+	int ret;
+
+	/* get device capabilities */
+	memset(c, 0, sizeof(*c));
+	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+			       FW_CMD_REQUEST_F | FW_CMD_READ_F);
+	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
+	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
+	if (ret < 0)
+		return ret;
+
+	/* select capabilities we'll be using */
+	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+		if (!vf_acls)
+			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+		else
+			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+	} else if (vf_acls) {
+		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+		return ret;
+	}
+	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
+	if (ret < 0)
+		return ret;
+
+	ret = t4_config_glbl_rss(adap, adap->pf,
+				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
+				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
+	if (ret < 0)
+		return ret;
+
+	ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
+			  MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
+			  FW_CMD_CAP_PF);
+	if (ret < 0)
+		return ret;
+
+	t4_sge_init(adap);
+
+	/* tweak some settings */
+	t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
+	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
+	t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
+	v = t4_read_reg(adap, TP_PIO_DATA_A);
+	t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
+
+	/* first 4 Tx modulation queues point to consecutive Tx channels */
+	adap->params.tp.tx_modq_map = 0xE4;
+	t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
+		     TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
+
+	/* associate each Tx modulation queue with consecutive Tx channels */
+	v = 0x84218421;
+	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+			  &v, 1, TP_TX_SCHED_HDR_A);
+	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+			  &v, 1, TP_TX_SCHED_FIFO_A);
+	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+			  &v, 1, TP_TX_SCHED_PCMD_A);
+
+#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
+	if (is_offload(adap)) {
+		t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
+			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+		t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
+			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+	}
+
+	/* get basic stuff going */
+	return t4_early_init(adap, adap->pf);
+}
+
+/*
+ * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration
+ */
+
+/*
+ * Tweak configuration based on module parameters, etc.  Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization.  But even in the case of using Firmware Configuration
+ * Files, we'd like to expose the ability to change these via module
+ * parameters so these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+	/*
+	 * Fix up various Host-Dependent Parameters like Page Size, Cache
+	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
+	 * 64B Cache Line Size ...
+	 */
+	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
+
+	/*
+	 * Process module parameters which affect early initialization.
+	 */
+	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
+		dev_err(&adapter->pdev->dev,
+			"Ignoring illegal rx_dma_offset=%d, using 2\n",
+			rx_dma_offset);
+		rx_dma_offset = 2;
+	}
+	t4_set_reg_field(adapter, SGE_CONTROL_A,
+			 PKTSHIFT_V(PKTSHIFT_M),
+			 PKTSHIFT_V(rx_dma_offset));
+
+	/*
+	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+	 * adds the pseudo header itself.
+	 */
+	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
+			       CSUM_HAS_PSEUDO_HDR_F, 0);
+
+	return 0;
+}
+
+/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
+ * unto themselves and they contain their own firmware to perform their
+ * tasks ...
+ */
+static int phy_aq1202_version(const u8 *phy_fw_data,
+			      size_t phy_fw_size)
+{
+	int offset;
+
+	/* At offset 0x8 you're looking for the primary image's
+	 * starting offset which is 3 Bytes wide
+	 *
+	 * At offset 0xa of the primary image, you look for the offset
+	 * of the DRAM segment which is 3 Bytes wide.
+	 *
+	 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
+	 * wide
+	 */
+	#define be16(__p) (((__p)[0] << 8) | (__p)[1])
+	#define le16(__p) ((__p)[0] | ((__p)[1] << 8))
+	#define le24(__p) (le16(__p) | ((__p)[2] << 16))
+
+	offset = le24(phy_fw_data + 0x8) << 12;
+	offset = le24(phy_fw_data + offset + 0xa);
+	return be16(phy_fw_data + offset + 0x27e);
+
+	#undef be16
+	#undef le16
+	#undef le24
+}
+
+static struct info_10gbt_phy_fw {
+	unsigned int phy_fw_id;		/* PCI Device ID */
+	char *phy_fw_file;		/* /lib/firmware/ PHY Firmware file */
+	int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
+	int phy_flash;			/* Has FLASH for PHY Firmware */
+} phy_info_array[] = {
+	{
+		PHY_AQ1202_DEVICEID,
+		PHY_AQ1202_FIRMWARE,
+		phy_aq1202_version,
+		1,
+	},
+	{
+		PHY_BCM84834_DEVICEID,
+		PHY_BCM84834_FIRMWARE,
+		NULL,
+		0,
+	},
+	{ 0, NULL, NULL },
+};
+
+static struct info_10gbt_phy_fw *find_phy_info(int devid)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
+		if (phy_info_array[i].phy_fw_id == devid)
+			return &phy_info_array[i];
+	}
+	return NULL;
+}
+
+/* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
+ * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
+ * we return a negative error number.  If we transfer new firmware we return 1
+ * (from t4_load_phy_fw()).  If we don't do anything we return 0.
+ */
+static int adap_init0_phy(struct adapter *adap)
+{
+	const struct firmware *phyf;
+	int ret;
+	struct info_10gbt_phy_fw *phy_info;
+
+	/* Use the device ID to determine which PHY file to flash.
+	 */
+	phy_info = find_phy_info(adap->pdev->device);
+	if (!phy_info) {
+		dev_warn(adap->pdev_dev,
+			 "No PHY Firmware file found for this PHY\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
+	 * use that. The adapter firmware provides us with a memory buffer
+	 * where we can load a PHY firmware file from the host if we want to
+	 * override the PHY firmware File in flash.
+	 */
+	ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
+				      adap->pdev_dev);
+	if (ret < 0) {
+		/* For adapters without FLASH attached to PHY for their
+		 * firmware, it's obviously a fatal error if we can't get the
+		 * firmware to the adapter.  For adapters with PHY firmware
+		 * FLASH storage, it's worth a warning if we can't find the
+		 * PHY Firmware but we'll neuter the error ...
+		 */
+		dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
+			"/lib/firmware/%s, error %d\n",
+			phy_info->phy_fw_file, -ret);
+		if (phy_info->phy_flash) {
+			int cur_phy_fw_ver = 0;
+
+			t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+			dev_warn(adap->pdev_dev, "continuing with, on-adapter "
+				 "FLASH copy, version %#x\n", cur_phy_fw_ver);
+			ret = 0;
+		}
+
+		return ret;
+	}
+
+	/* Load PHY Firmware onto adapter.
+	 */
+	ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
+			     phy_info->phy_fw_version,
+			     (u8 *)phyf->data, phyf->size);
+	if (ret < 0)
+		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+			-ret);
+	else if (ret > 0) {
+		int new_phy_fw_ver = 0;
+
+		if (phy_info->phy_fw_version)
+			new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
+								  phyf->size);
+		dev_info(adap->pdev_dev, "Successfully transferred PHY "
+			 "Firmware /lib/firmware/%s, version %#x\n",
+			 phy_info->phy_fw_file, new_phy_fw_ver);
+	}
+
+	release_firmware(phyf);
+
+	return ret;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+	struct fw_caps_config_cmd caps_cmd;
+	const struct firmware *cf;
+	unsigned long mtype = 0, maddr = 0;
+	u32 finiver, finicsum, cfcsum;
+	int ret;
+	int config_issued = 0;
+	char *fw_config_file, fw_config_file_path[256];
+	char *config_name = NULL;
+
+	/*
+	 * Reset device if necessary.
+	 */
+	if (reset) {
+		ret = t4_fw_reset(adapter, adapter->mbox,
+				  PIORSTMODE_F | PIORST_F);
+		if (ret < 0)
+			goto bye;
+	}
+
+	/* If this is a 10Gb/s-BT adapter make sure the chip-external
+	 * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
+	 * to be performed after any global adapter RESET above since some
+	 * PHYs only have local RAM copies of the PHY firmware.
+	 */
+	if (is_10gbt_device(adapter->pdev->device)) {
+		ret = adap_init0_phy(adapter);
+		if (ret < 0)
+			goto bye;
+	}
+	/*
+	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
+	 * then use that.  Otherwise, use the configuration file stored
+	 * in the adapter flash ...
+	 */
+	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+	case CHELSIO_T4:
+		fw_config_file = FW4_CFNAME;
+		break;
+	case CHELSIO_T5:
+		fw_config_file = FW5_CFNAME;
+		break;
+	case CHELSIO_T6:
+		fw_config_file = FW6_CFNAME;
+		break;
+	default:
+		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+		       adapter->pdev->device);
+		ret = -EINVAL;
+		goto bye;
+	}
+
+	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
+	if (ret < 0) {
+		config_name = "On FLASH";
+		mtype = FW_MEMTYPE_CF_FLASH;
+		maddr = t4_flash_cfg_addr(adapter);
+	} else {
+		u32 params[7], val[7];
+
+		sprintf(fw_config_file_path,
+			"/lib/firmware/%s", fw_config_file);
+		config_name = fw_config_file_path;
+
+		if (cf->size >= FLASH_CFG_MAX_SIZE)
+			ret = -ENOMEM;
+		else {
+			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+			ret = t4_query_params(adapter, adapter->mbox,
+					      adapter->pf, 0, 1, params, val);
+			if (ret == 0) {
+				/*
+				 * For t4_memory_rw() below addresses and
+				 * sizes have to be in terms of multiples of 4
+				 * bytes.  So, if the Configuration File isn't
+				 * a multiple of 4 bytes in length we'll have
+				 * to write that out separately since we can't
+				 * guarantee that the bytes following the
+				 * residual byte in the buffer returned by
+				 * request_firmware() are zeroed out ...
+				 */
+				size_t resid = cf->size & 0x3;
+				size_t size = cf->size & ~0x3;
+				__be32 *data = (__be32 *)cf->data;
+
+				mtype = FW_PARAMS_PARAM_Y_G(val[0]);
+				maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
+
+				spin_lock(&adapter->win0_lock);
+				ret = t4_memory_rw(adapter, 0, mtype, maddr,
+						   size, data, T4_MEMORY_WRITE);
+				if (ret == 0 && resid != 0) {
+					union {
+						__be32 word;
+						char buf[4];
+					} last;
+					int i;
+
+					last.word = data[size >> 2];
+					for (i = resid; i < 4; i++)
+						last.buf[i] = 0;
+					ret = t4_memory_rw(adapter, 0, mtype,
+							   maddr + size,
+							   4, &last.word,
+							   T4_MEMORY_WRITE);
+				}
+				spin_unlock(&adapter->win0_lock);
+			}
+		}
+
+		release_firmware(cf);
+		if (ret)
+			goto bye;
+	}
+
+	/*
+	 * Issue a Capability Configuration command to the firmware to get it
+	 * to parse the Configuration File.  We don't use t4_fw_config_file()
+	 * because we want the ability to modify various features after we've
+	 * processed the configuration file ...
+	 */
+	memset(&caps_cmd, 0, sizeof(caps_cmd));
+	caps_cmd.op_to_write =
+		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+		      FW_CMD_REQUEST_F |
+		      FW_CMD_READ_F);
+	caps_cmd.cfvalid_to_len16 =
+		htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
+		      FW_LEN16(caps_cmd));
+	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+			 &caps_cmd);
+
+	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+	 * Configuration File in FLASH), our last gasp effort is to use the
+	 * Firmware Configuration File which is embedded in the firmware.  A
+	 * very few early versions of the firmware didn't have one embedded
+	 * but we can ignore those.
+	 */
+	if (ret == -ENOENT) {
+		memset(&caps_cmd, 0, sizeof(caps_cmd));
+		caps_cmd.op_to_write =
+			htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+					FW_CMD_REQUEST_F |
+					FW_CMD_READ_F);
+		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
+				sizeof(caps_cmd), &caps_cmd);
+		config_name = "Firmware Default";
+	}
+
+	config_issued = 1;
+	if (ret < 0)
+		goto bye;
+
+	finiver = ntohl(caps_cmd.finiver);
+	finicsum = ntohl(caps_cmd.finicsum);
+	cfcsum = ntohl(caps_cmd.cfcsum);
+	if (finicsum != cfcsum)
+		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
+			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
+			 finicsum, cfcsum);
+
+	/*
+	 * And now tell the firmware to use the configuration we just loaded.
+	 */
+	caps_cmd.op_to_write =
+		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+		      FW_CMD_REQUEST_F |
+		      FW_CMD_WRITE_F);
+	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+			 NULL);
+	if (ret < 0)
+		goto bye;
+
+	/*
+	 * Tweak configuration based on system architecture, module
+	 * parameters, etc.
+	 */
+	ret = adap_init0_tweaks(adapter);
+	if (ret < 0)
+		goto bye;
+
+	/*
+	 * And finally tell the firmware to initialize itself using the
+	 * parameters from the Configuration File.
+	 */
+	ret = t4_fw_initialize(adapter, adapter->mbox);
+	if (ret < 0)
+		goto bye;
+
+	/* Emit Firmware Configuration File information and return
+	 * successfully.
+	 */
+	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
+		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
+		 config_name, finiver, cfcsum);
+	return 0;
+
+	/*
+	 * Something bad happened.  Return the error ...  (If the "error"
+	 * is that there's no Configuration File on the adapter we don't
+	 * want to issue a warning since this is fairly common.)
+	 */
+bye:
+	if (config_issued && ret != -ENOENT)
+		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
+			 config_name, -ret);
+	return ret;
+}
+
+static struct fw_info fw_info_array[] = {
+	{
+		.chip = CHELSIO_T4,
+		.fs_name = FW4_CFNAME,
+		.fw_mod_name = FW4_FNAME,
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T4,
+			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
+			.intfver_nic = FW_INTFVER(T4, NIC),
+			.intfver_vnic = FW_INTFVER(T4, VNIC),
+			.intfver_ri = FW_INTFVER(T4, RI),
+			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
+			.intfver_fcoe = FW_INTFVER(T4, FCOE),
+		},
+	}, {
+		.chip = CHELSIO_T5,
+		.fs_name = FW5_CFNAME,
+		.fw_mod_name = FW5_FNAME,
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T5,
+			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+			.intfver_nic = FW_INTFVER(T5, NIC),
+			.intfver_vnic = FW_INTFVER(T5, VNIC),
+			.intfver_ri = FW_INTFVER(T5, RI),
+			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
+			.intfver_fcoe = FW_INTFVER(T5, FCOE),
+		},
+	}, {
+		.chip = CHELSIO_T6,
+		.fs_name = FW6_CFNAME,
+		.fw_mod_name = FW6_FNAME,
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T6,
+			.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+			.intfver_nic = FW_INTFVER(T6, NIC),
+			.intfver_vnic = FW_INTFVER(T6, VNIC),
+			.intfver_ofld = FW_INTFVER(T6, OFLD),
+			.intfver_ri = FW_INTFVER(T6, RI),
+			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
+			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+			.intfver_fcoe = FW_INTFVER(T6, FCOE),
+		},
+	}
+
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+		if (fw_info_array[i].chip == chip)
+			return &fw_info_array[i];
+	}
+	return NULL;
+}
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ */
+static int adap_init0(struct adapter *adap)
+{
+	int ret;
+	u32 v, port_vec;
+	enum dev_state state;
+	u32 params[7], val[7];
+	struct fw_caps_config_cmd caps_cmd;
+	int reset = 1;
+
+	/* Grab Firmware Device Log parameters as early as possible so we have
+	 * access to it for debugging, etc.
+	 */
+	ret = t4_init_devlog_params(adap);
+	if (ret < 0)
+		return ret;
+
+	/* Contact FW, advertising Master capability */
+	ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+	if (ret < 0) {
+		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
+			ret);
+		return ret;
+	}
+	if (ret == adap->mbox)
+		adap->flags |= MASTER_PF;
+
+	/*
+	 * If we're the Master PF Driver and the device is uninitialized,
+	 * then let's consider upgrading the firmware ...  (We always want
+	 * to check the firmware version number in order to A. get it for
+	 * later reporting and B. to warn if the currently loaded firmware
+	 * is excessively mismatched relative to the driver.)
+	 */
+	t4_get_fw_version(adap, &adap->params.fw_vers);
+	t4_get_tp_version(adap, &adap->params.tp_vers);
+	ret = t4_check_fw_version(adap);
+	/* If firmware is too old (not supported by driver) force an update. */
+	if (ret)
+		state = DEV_STATE_UNINIT;
+	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+		struct fw_info *fw_info;
+		struct fw_hdr *card_fw;
+		const struct firmware *fw;
+		const u8 *fw_data = NULL;
+		unsigned int fw_size = 0;
+
+		/* This is the firmware whose headers the driver was compiled
+		 * against
+		 */
+		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
+		if (fw_info == NULL) {
+			dev_err(adap->pdev_dev,
+				"unable to get firmware info for chip %d.\n",
+				CHELSIO_CHIP_VERSION(adap->params.chip));
+			return -EINVAL;
+		}
+
+		/* allocate memory to read the header of the firmware on the
+		 * card
+		 */
+		card_fw = t4_alloc_mem(sizeof(*card_fw));
+
+		/* Get FW from from /lib/firmware/ */
+		ret = request_firmware(&fw, fw_info->fw_mod_name,
+				       adap->pdev_dev);
+		if (ret < 0) {
+			dev_err(adap->pdev_dev,
+				"unable to load firmware image %s, error %d\n",
+				fw_info->fw_mod_name, ret);
+		} else {
+			fw_data = fw->data;
+			fw_size = fw->size;
+		}
+
+		/* upgrade FW logic */
+		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
+				 state, &reset);
+
+		/* Cleaning up */
+		release_firmware(fw);
+		t4_free_mem(card_fw);
+
+		if (ret < 0)
+			goto bye;
+	}
+
+	/*
+	 * Grab VPD parameters.  This should be done after we establish a
+	 * connection to the firmware since some of the VPD parameters
+	 * (notably the Core Clock frequency) are retrieved via requests to
+	 * the firmware.  On the other hand, we need these fairly early on
+	 * so we do this right after getting ahold of the firmware.
+	 */
+	ret = t4_get_vpd_params(adap, &adap->params.vpd);
+	if (ret < 0)
+		goto bye;
+
+	/*
+	 * Find out what ports are available to us.  Note that we need to do
+	 * this before calling adap_init0_no_config() since it needs nports
+	 * and portvec ...
+	 */
+	v =
+	    FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+	if (ret < 0)
+		goto bye;
+
+	adap->params.nports = hweight32(port_vec);
+	adap->params.portvec = port_vec;
+
+	/* If the firmware is initialized already, emit a simply note to that
+	 * effect. Otherwise, it's time to try initializing the adapter.
+	 */
+	if (state == DEV_STATE_INIT) {
+		dev_info(adap->pdev_dev, "Coming up as %s: "\
+			 "Adapter already initialized\n",
+			 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+	} else {
+		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
+			 "Initializing adapter\n");
+
+		/* Find out whether we're dealing with a version of the
+		 * firmware which has configuration file support.
+		 */
+		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+				      params, val);
+
+		/* If the firmware doesn't support Configuration Files,
+		 * return an error.
+		 */
+		if (ret < 0) {
+			dev_err(adap->pdev_dev, "firmware doesn't support "
+				"Firmware Configuration Files\n");
+			goto bye;
+		}
+
+		/* The firmware provides us with a memory buffer where we can
+		 * load a Configuration File from the host if we want to
+		 * override the Configuration File in flash.
+		 */
+		ret = adap_init0_config(adap, reset);
+		if (ret == -ENOENT) {
+			dev_err(adap->pdev_dev, "no Configuration File "
+				"present on adapter.\n");
+			goto bye;
+		}
+		if (ret < 0) {
+			dev_err(adap->pdev_dev, "could not initialize "
+				"adapter, error %d\n", -ret);
+			goto bye;
+		}
+	}
+
+	/* Give the SGE code a chance to pull in anything that it needs ...
+	 * Note that this must be called after we retrieve our VPD parameters
+	 * in order to know how to convert core ticks to seconds, etc.
+	 */
+	ret = t4_sge_init(adap);
+	if (ret < 0)
+		goto bye;
+
+	if (is_bypass_device(adap->pdev->device))
+		adap->params.bypass = 1;
+
+	/*
+	 * Grab some of our basic fundamental operating parameters.
+	 */
+#define FW_PARAM_DEV(param) \
+	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+	FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+	FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+	FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)|  \
+	FW_PARAMS_PARAM_Y_V(0) | \
+	FW_PARAMS_PARAM_Z_V(0)
+
+	params[0] = FW_PARAM_PFVF(EQ_START);
+	params[1] = FW_PARAM_PFVF(L2T_START);
+	params[2] = FW_PARAM_PFVF(L2T_END);
+	params[3] = FW_PARAM_PFVF(FILTER_START);
+	params[4] = FW_PARAM_PFVF(FILTER_END);
+	params[5] = FW_PARAM_PFVF(IQFLINT_START);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
+	if (ret < 0)
+		goto bye;
+	adap->sge.egr_start = val[0];
+	adap->l2t_start = val[1];
+	adap->l2t_end = val[2];
+	adap->tids.ftid_base = val[3];
+	adap->tids.nftids = val[4] - val[3] + 1;
+	adap->sge.ingr_start = val[5];
+
+	/* qids (ingress/egress) returned from firmware can be anywhere
+	 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
+	 * Hence driver needs to allocate memory for this range to
+	 * store the queue info. Get the highest IQFLINT/EQ index returned
+	 * in FW_EQ_*_CMD.alloc command.
+	 */
+	params[0] = FW_PARAM_PFVF(EQ_END);
+	params[1] = FW_PARAM_PFVF(IQFLINT_END);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+	if (ret < 0)
+		goto bye;
+	adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
+	adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
+
+	adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
+				    sizeof(*adap->sge.egr_map), GFP_KERNEL);
+	if (!adap->sge.egr_map) {
+		ret = -ENOMEM;
+		goto bye;
+	}
+
+	adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
+				     sizeof(*adap->sge.ingr_map), GFP_KERNEL);
+	if (!adap->sge.ingr_map) {
+		ret = -ENOMEM;
+		goto bye;
+	}
+
+	/* Allocate the memory for the vaious egress queue bitmaps
+	 * ie starving_fl, txq_maperr and blocked_fl.
+	 */
+	adap->sge.starving_fl =	kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+					sizeof(long), GFP_KERNEL);
+	if (!adap->sge.starving_fl) {
+		ret = -ENOMEM;
+		goto bye;
+	}
+
+	adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+				       sizeof(long), GFP_KERNEL);
+	if (!adap->sge.txq_maperr) {
+		ret = -ENOMEM;
+		goto bye;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+				       sizeof(long), GFP_KERNEL);
+	if (!adap->sge.blocked_fl) {
+		ret = -ENOMEM;
+		goto bye;
+	}
+#endif
+
+	params[0] = FW_PARAM_PFVF(CLIP_START);
+	params[1] = FW_PARAM_PFVF(CLIP_END);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+	if (ret < 0)
+		goto bye;
+	adap->clipt_start = val[0];
+	adap->clipt_end = val[1];
+
+	/* query params related to active filter region */
+	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
+	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+	/* If Active filter size is set we enable establishing
+	 * offload connection through firmware work request
+	 */
+	if ((val[0] != val[1]) && (ret >= 0)) {
+		adap->flags |= FW_OFLD_CONN;
+		adap->tids.aftid_base = val[0];
+		adap->tids.aftid_end = val[1];
+	}
+
+	/* If we're running on newer firmware, let it know that we're
+	 * prepared to deal with encapsulated CPL messages.  Older
+	 * firmware won't understand this and we'll just get
+	 * unencapsulated messages ...
+	 */
+	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+	val[0] = 1;
+	(void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+
+	/*
+	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+	 * capability.  Earlier versions of the firmware didn't have the
+	 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+	 * permission to use ULPTX MEMWRITE DSGL.
+	 */
+	if (is_t4(adap->params.chip)) {
+		adap->params.ulptx_memwrite_dsgl = false;
+	} else {
+		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+				      1, params, val);
+		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+	}
+
+	/*
+	 * Get device capabilities so we can determine what resources we need
+	 * to manage.
+	 */
+	memset(&caps_cmd, 0, sizeof(caps_cmd));
+	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+				     FW_CMD_REQUEST_F | FW_CMD_READ_F);
+	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+			 &caps_cmd);
+	if (ret < 0)
+		goto bye;
+
+	if (caps_cmd.ofldcaps) {
+		/* query offload-related parameters */
+		params[0] = FW_PARAM_DEV(NTID);
+		params[1] = FW_PARAM_PFVF(SERVER_START);
+		params[2] = FW_PARAM_PFVF(SERVER_END);
+		params[3] = FW_PARAM_PFVF(TDDP_START);
+		params[4] = FW_PARAM_PFVF(TDDP_END);
+		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
+				      params, val);
+		if (ret < 0)
+			goto bye;
+		adap->tids.ntids = val[0];
+		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+		adap->tids.stid_base = val[1];
+		adap->tids.nstids = val[2] - val[1] + 1;
+		/*
+		 * Setup server filter region. Divide the available filter
+		 * region into two parts. Regular filters get 1/3rd and server
+		 * filters get 2/3rd part. This is only enabled if workarond
+		 * path is enabled.
+		 * 1. For regular filters.
+		 * 2. Server filter: This are special filters which are used
+		 * to redirect SYN packets to offload queue.
+		 */
+		if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+			adap->tids.sftid_base = adap->tids.ftid_base +
+					DIV_ROUND_UP(adap->tids.nftids, 3);
+			adap->tids.nsftids = adap->tids.nftids -
+					 DIV_ROUND_UP(adap->tids.nftids, 3);
+			adap->tids.nftids = adap->tids.sftid_base -
+						adap->tids.ftid_base;
+		}
+		adap->vres.ddp.start = val[3];
+		adap->vres.ddp.size = val[4] - val[3] + 1;
+		adap->params.ofldq_wr_cred = val[5];
+
+		adap->params.offload = 1;
+	}
+	if (caps_cmd.rdmacaps) {
+		params[0] = FW_PARAM_PFVF(STAG_START);
+		params[1] = FW_PARAM_PFVF(STAG_END);
+		params[2] = FW_PARAM_PFVF(RQ_START);
+		params[3] = FW_PARAM_PFVF(RQ_END);
+		params[4] = FW_PARAM_PFVF(PBL_START);
+		params[5] = FW_PARAM_PFVF(PBL_END);
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
+				      params, val);
+		if (ret < 0)
+			goto bye;
+		adap->vres.stag.start = val[0];
+		adap->vres.stag.size = val[1] - val[0] + 1;
+		adap->vres.rq.start = val[2];
+		adap->vres.rq.size = val[3] - val[2] + 1;
+		adap->vres.pbl.start = val[4];
+		adap->vres.pbl.size = val[5] - val[4] + 1;
+
+		params[0] = FW_PARAM_PFVF(SQRQ_START);
+		params[1] = FW_PARAM_PFVF(SQRQ_END);
+		params[2] = FW_PARAM_PFVF(CQ_START);
+		params[3] = FW_PARAM_PFVF(CQ_END);
+		params[4] = FW_PARAM_PFVF(OCQ_START);
+		params[5] = FW_PARAM_PFVF(OCQ_END);
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
+				      val);
+		if (ret < 0)
+			goto bye;
+		adap->vres.qp.start = val[0];
+		adap->vres.qp.size = val[1] - val[0] + 1;
+		adap->vres.cq.start = val[2];
+		adap->vres.cq.size = val[3] - val[2] + 1;
+		adap->vres.ocq.start = val[4];
+		adap->vres.ocq.size = val[5] - val[4] + 1;
+
+		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
+		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
+				      val);
+		if (ret < 0) {
+			adap->params.max_ordird_qp = 8;
+			adap->params.max_ird_adapter = 32 * adap->tids.ntids;
+			ret = 0;
+		} else {
+			adap->params.max_ordird_qp = val[0];
+			adap->params.max_ird_adapter = val[1];
+		}
+		dev_info(adap->pdev_dev,
+			 "max_ordird_qp %d max_ird_adapter %d\n",
+			 adap->params.max_ordird_qp,
+			 adap->params.max_ird_adapter);
+	}
+	if (caps_cmd.iscsicaps) {
+		params[0] = FW_PARAM_PFVF(ISCSI_START);
+		params[1] = FW_PARAM_PFVF(ISCSI_END);
+		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+				      params, val);
+		if (ret < 0)
+			goto bye;
+		adap->vres.iscsi.start = val[0];
+		adap->vres.iscsi.size = val[1] - val[0] + 1;
+	}
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+	/* The MTU/MSS Table is initialized by now, so load their values.  If
+	 * we're initializing the adapter, then we'll make any modifications
+	 * we want to the MTU/MSS Table and also initialize the congestion
+	 * parameters.
+	 */
+	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+	if (state != DEV_STATE_INIT) {
+		int i;
+
+		/* The default MTU Table contains values 1492 and 1500.
+		 * However, for TCP, it's better to have two values which are
+		 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+		 * This allows us to have a TCP Data Payload which is a
+		 * multiple of 8 regardless of what combination of TCP Options
+		 * are in use (always a multiple of 4 bytes) which is
+		 * important for performance reasons.  For instance, if no
+		 * options are in use, then we have a 20-byte IP header and a
+		 * 20-byte TCP header.  In this case, a 1500-byte MSS would
+		 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+		 * which is not a multiple of 8.  So using an MSS of 1488 in
+		 * this case results in a TCP Data Payload of 1448 bytes which
+		 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
+		 * Stamps have been negotiated, then an MTU of 1500 bytes
+		 * results in a TCP Data Payload of 1448 bytes which, as
+		 * above, is a multiple of 8 bytes ...
+		 */
+		for (i = 0; i < NMTUS; i++)
+			if (adap->params.mtus[i] == 1492) {
+				adap->params.mtus[i] = 1488;
+				break;
+			}
+
+		t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+			     adap->params.b_wnd);
+	}
+	t4_init_sge_params(adap);
+	adap->flags |= FW_OK;
+	t4_init_tp_params(adap);
+	return 0;
+
+	/*
+	 * Something bad happened.  If a command timed out or failed with EIO
+	 * FW does not operate within its spec or something catastrophic
+	 * happened to HW/FW, stop issuing commands.
+	 */
+bye:
+	kfree(adap->sge.egr_map);
+	kfree(adap->sge.ingr_map);
+	kfree(adap->sge.starving_fl);
+	kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+	kfree(adap->sge.blocked_fl);
+#endif
+	if (ret != -ETIMEDOUT && ret != -EIO)
+		t4_fw_bye(adap, adap->mbox);
+	return ret;
+}
+
+/* EEH callbacks */
+
+static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+					 pci_channel_state_t state)
+{
+	int i;
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap)
+		goto out;
+
+	rtnl_lock();
+	adap->flags &= ~FW_OK;
+	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+	spin_lock(&adap->stats_lock);
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+
+		netif_device_detach(dev);
+		netif_carrier_off(dev);
+	}
+	spin_unlock(&adap->stats_lock);
+	disable_interrupts(adap);
+	if (adap->flags & FULL_INIT_DONE)
+		cxgb_down(adap);
+	rtnl_unlock();
+	if ((adap->flags & DEV_ENABLED)) {
+		pci_disable_device(pdev);
+		adap->flags &= ~DEV_ENABLED;
+	}
+out:	return state == pci_channel_io_perm_failure ?
+		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
+{
+	int i, ret;
+	struct fw_caps_config_cmd c;
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap) {
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+		return PCI_ERS_RESULT_RECOVERED;
+	}
+
+	if (!(adap->flags & DEV_ENABLED)) {
+		if (pci_enable_device(pdev)) {
+			dev_err(&pdev->dev, "Cannot reenable PCI "
+					    "device after reset\n");
+			return PCI_ERS_RESULT_DISCONNECT;
+		}
+		adap->flags |= DEV_ENABLED;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	if (t4_wait_dev_ready(adap->regs) < 0)
+		return PCI_ERS_RESULT_DISCONNECT;
+	if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
+		return PCI_ERS_RESULT_DISCONNECT;
+	adap->flags |= FW_OK;
+	if (adap_init1(adap, &c))
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	for_each_port(adap, i) {
+		struct port_info *p = adap2pinfo(adap, i);
+
+		ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
+				  NULL, NULL);
+		if (ret < 0)
+			return PCI_ERS_RESULT_DISCONNECT;
+		p->viid = ret;
+		p->xact_addr_filt = -1;
+	}
+
+	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+		     adap->params.b_wnd);
+	setup_memwin(adap);
+	if (cxgb_up(adap))
+		return PCI_ERS_RESULT_DISCONNECT;
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void eeh_resume(struct pci_dev *pdev)
+{
+	int i;
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap)
+		return;
+
+	rtnl_lock();
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+
+		if (netif_running(dev)) {
+			link_start(dev);
+			cxgb_set_rxmode(dev);
+		}
+		netif_device_attach(dev);
+	}
+	rtnl_unlock();
+}
+
+static const struct pci_error_handlers cxgb4_eeh = {
+	.error_detected = eeh_err_detected,
+	.slot_reset     = eeh_slot_reset,
+	.resume         = eeh_resume,
+};
+
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+	       (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+}
+
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+			     unsigned int us, unsigned int cnt,
+			     unsigned int size, unsigned int iqe_size)
+{
+	q->adap = adap;
+	cxgb4_set_rspq_intr_params(q, us, cnt);
+	q->iqe_len = iqe_size;
+	q->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and type
+ * of ports we found and the number of available CPUs.  Most settings can be
+ * modified by the admin prior to actual use.
+ */
+static void cfg_queues(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	int i, n10g = 0, qidx = 0;
+#ifndef CONFIG_CHELSIO_T4_DCB
+	int q10g = 0;
+#endif
+	int ciq_size;
+
+	for_each_port(adap, i)
+		n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+#ifdef CONFIG_CHELSIO_T4_DCB
+	/* For Data Center Bridging support we need to be able to support up
+	 * to 8 Traffic Priorities; each of which will be assigned to its
+	 * own TX Queue in order to prevent Head-Of-Line Blocking.
+	 */
+	if (adap->params.nports * 8 > MAX_ETH_QSETS) {
+		dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
+			MAX_ETH_QSETS, adap->params.nports * 8);
+		BUG_ON(1);
+	}
+
+	for_each_port(adap, i) {
+		struct port_info *pi = adap2pinfo(adap, i);
+
+		pi->first_qset = qidx;
+		pi->nqsets = 8;
+		qidx += pi->nqsets;
+	}
+#else /* !CONFIG_CHELSIO_T4_DCB */
+	/*
+	 * We default to 1 queue per non-10G port and up to # of cores queues
+	 * per 10G port.
+	 */
+	if (n10g)
+		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+	if (q10g > netif_get_num_default_rss_queues())
+		q10g = netif_get_num_default_rss_queues();
+
+	for_each_port(adap, i) {
+		struct port_info *pi = adap2pinfo(adap, i);
+
+		pi->first_qset = qidx;
+		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+		qidx += pi->nqsets;
+	}
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+
+	s->ethqsets = qidx;
+	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
+
+	if (is_offload(adap)) {
+		/*
+		 * For offload we use 1 queue/channel if all ports are up to 1G,
+		 * otherwise we divide all available queues amongst the channels
+		 * capped by the number of available cores.
+		 */
+		if (n10g) {
+			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+				  num_online_cpus());
+			s->ofldqsets = roundup(i, adap->params.nports);
+		} else
+			s->ofldqsets = adap->params.nports;
+		/* For RDMA one Rx queue per channel suffices */
+		s->rdmaqs = adap->params.nports;
+		/* Try and allow at least 1 CIQ per cpu rounding down
+		 * to the number of ports, with a minimum of 1 per port.
+		 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
+		 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
+		 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
+		 */
+		s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
+		s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
+				adap->params.nports;
+		s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+		struct sge_eth_rxq *r = &s->ethrxq[i];
+
+		init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
+		r->fl.size = 72;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+		s->ethtxq[i].q.size = 1024;
+
+	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
+		s->ctrlq[i].q.size = 512;
+
+	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
+		s->ofldtxq[i].q.size = 1024;
+
+	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
+		struct sge_ofld_rxq *r = &s->ofldrxq[i];
+
+		init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
+		r->rspq.uld = CXGB4_ULD_ISCSI;
+		r->fl.size = 72;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
+		struct sge_ofld_rxq *r = &s->rdmarxq[i];
+
+		init_rspq(adap, &r->rspq, 5, 1, 511, 64);
+		r->rspq.uld = CXGB4_ULD_RDMA;
+		r->fl.size = 72;
+	}
+
+	ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+	if (ciq_size > SGE_MAX_IQ_SIZE) {
+		CH_WARN(adap, "CIQ size too small for available IQs\n");
+		ciq_size = SGE_MAX_IQ_SIZE;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
+		struct sge_ofld_rxq *r = &s->rdmaciq[i];
+
+		init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
+		r->rspq.uld = CXGB4_ULD_RDMA;
+	}
+
+	init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
+	init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void reduce_ethqs(struct adapter *adap, int n)
+{
+	int i;
+	struct port_info *pi;
+
+	while (n < adap->sge.ethqsets)
+		for_each_port(adap, i) {
+			pi = adap2pinfo(adap, i);
+			if (pi->nqsets > 1) {
+				pi->nqsets--;
+				adap->sge.ethqsets--;
+				if (adap->sge.ethqsets <= n)
+					break;
+			}
+		}
+
+	n = 0;
+	for_each_port(adap, i) {
+		pi = adap2pinfo(adap, i);
+		pi->first_qset = n;
+		n += pi->nqsets;
+	}
+}
+
+/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
+#define EXTRA_VECS 2
+
+static int enable_msix(struct adapter *adap)
+{
+	int ofld_need = 0;
+	int i, want, need, allocated;
+	struct sge *s = &adap->sge;
+	unsigned int nchan = adap->params.nports;
+	struct msix_entry *entries;
+
+	entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
+			  GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
+
+	for (i = 0; i < MAX_INGQ + 1; ++i)
+		entries[i].entry = i;
+
+	want = s->max_ethqsets + EXTRA_VECS;
+	if (is_offload(adap)) {
+		want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
+		/* need nchan for each possible ULD */
+		ofld_need = 3 * nchan;
+	}
+#ifdef CONFIG_CHELSIO_T4_DCB
+	/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
+	 * each port.
+	 */
+	need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
+#else
+	need = adap->params.nports + EXTRA_VECS + ofld_need;
+#endif
+	allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
+	if (allocated < 0) {
+		dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
+			 " not using MSI-X\n");
+		kfree(entries);
+		return allocated;
+	}
+
+	/* Distribute available vectors to the various queue groups.
+	 * Every group gets its minimum requirement and NIC gets top
+	 * priority for leftovers.
+	 */
+	i = allocated - EXTRA_VECS - ofld_need;
+	if (i < s->max_ethqsets) {
+		s->max_ethqsets = i;
+		if (i < s->ethqsets)
+			reduce_ethqs(adap, i);
+	}
+	if (is_offload(adap)) {
+		if (allocated < want) {
+			s->rdmaqs = nchan;
+			s->rdmaciqs = nchan;
+		}
+
+		/* leftovers go to OFLD */
+		i = allocated - EXTRA_VECS - s->max_ethqsets -
+		    s->rdmaqs - s->rdmaciqs;
+		s->ofldqsets = (i / nchan) * nchan;  /* round down */
+	}
+	for (i = 0; i < allocated; ++i)
+		adap->msix_info[i].vec = entries[i].vector;
+	dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
+		 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
+		 allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+		 s->rdmaciqs);
+
+	kfree(entries);
+	return 0;
+}
+
+#undef EXTRA_VECS
+
+static int init_rss(struct adapter *adap)
+{
+	unsigned int i;
+	int err;
+
+	err = t4_init_rss_mode(adap, adap->mbox);
+	if (err)
+		return err;
+
+	for_each_port(adap, i) {
+		struct port_info *pi = adap2pinfo(adap, i);
+
+		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
+		if (!pi->rss)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void print_port_info(const struct net_device *dev)
+{
+	char buf[80];
+	char *bufp = buf;
+	const char *spd = "";
+	const struct port_info *pi = netdev_priv(dev);
+	const struct adapter *adap = pi->adapter;
+
+	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+		spd = " 2.5 GT/s";
+	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
+		spd = " 5 GT/s";
+	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
+		spd = " 8 GT/s";
+
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+		bufp += sprintf(bufp, "100/");
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+		bufp += sprintf(bufp, "1000/");
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+		bufp += sprintf(bufp, "10G/");
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+		bufp += sprintf(bufp, "40G/");
+	if (bufp != buf)
+		--bufp;
+	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
+
+	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+		    adap->params.vpd.id,
+		    CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
+		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+		    (adap->flags & USING_MSIX) ? " MSI-X" :
+		    (adap->flags & USING_MSI) ? " MSI" : "");
+	netdev_info(dev, "S/N: %s, P/N: %s\n",
+		    adap->params.vpd.sn, adap->params.vpd.pn);
+}
+
+static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
+{
+	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+}
+
+/*
+ * Free the following resources:
+ * - memory used for tables
+ * - MSI/MSI-X
+ * - net devices
+ * - resources FW is holding for us
+ */
+static void free_some_resources(struct adapter *adapter)
+{
+	unsigned int i;
+
+	t4_free_mem(adapter->l2t);
+	t4_free_mem(adapter->tids.tid_tab);
+	kfree(adapter->sge.egr_map);
+	kfree(adapter->sge.ingr_map);
+	kfree(adapter->sge.starving_fl);
+	kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+	kfree(adapter->sge.blocked_fl);
+#endif
+	disable_msi(adapter);
+
+	for_each_port(adapter, i)
+		if (adapter->port[i]) {
+			struct port_info *pi = adap2pinfo(adapter, i);
+
+			if (pi->viid != 0)
+				t4_free_vi(adapter, adapter->mbox, adapter->pf,
+					   0, pi->viid);
+			kfree(adap2pinfo(adapter, i)->rss);
+			free_netdev(adapter->port[i]);
+		}
+	if (adapter->flags & FW_OK)
+		t4_fw_bye(adapter, adapter->pf);
+}
+
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+#define SEGMENT_SIZE 128
+
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+	u16 device_id;
+
+	/* Retrieve adapter's device ID */
+	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+
+	switch (device_id >> 12) {
+	case CHELSIO_T4:
+		return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+	case CHELSIO_T5:
+		return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+	case CHELSIO_T6:
+		return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+	default:
+		dev_err(&pdev->dev, "Device %d is not supported\n",
+			device_id);
+	}
+	return -EINVAL;
+}
+
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int func, i, err, s_qpp, qpp, num_seg;
+	struct port_info *pi;
+	bool highdma = false;
+	struct adapter *adapter = NULL;
+	void __iomem *regs;
+	u32 whoami, pl_rev;
+	enum chip_type chip;
+
+	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+	err = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		/* Just info, some other driver may have claimed the device. */
+		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+		return err;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto out_release_regions;
+	}
+
+	regs = pci_ioremap_bar(pdev, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -ENOMEM;
+		goto out_disable_device;
+	}
+
+	err = t4_wait_dev_ready(regs);
+	if (err < 0)
+		goto out_unmap_bar0;
+
+	/* We control everything through one PF */
+	whoami = readl(regs + PL_WHOAMI_A);
+	pl_rev = REV_G(readl(regs + PL_REV_A));
+	chip = get_chip_type(pdev, pl_rev);
+	func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+		SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+	if (func != ent->driver_data) {
+		iounmap(regs);
+		pci_disable_device(pdev);
+		pci_save_state(pdev);        /* to restore SR-IOV later */
+		goto sriov;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		highdma = true;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+				"coherent allocations\n");
+			goto out_unmap_bar0;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
+			goto out_unmap_bar0;
+		}
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+	enable_pcie_relaxed_ordering(pdev);
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto out_unmap_bar0;
+	}
+
+	adapter->workq = create_singlethread_workqueue("cxgb4");
+	if (!adapter->workq) {
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+
+	/* PCI device has been enabled */
+	adapter->flags |= DEV_ENABLED;
+
+	adapter->regs = regs;
+	adapter->pdev = pdev;
+	adapter->pdev_dev = &pdev->dev;
+	adapter->mbox = func;
+	adapter->pf = func;
+	adapter->msg_enable = dflt_msg_enable;
+	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+
+	spin_lock_init(&adapter->stats_lock);
+	spin_lock_init(&adapter->tid_release_lock);
+	spin_lock_init(&adapter->win0_lock);
+
+	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+	INIT_WORK(&adapter->db_full_task, process_db_full);
+	INIT_WORK(&adapter->db_drop_task, process_db_drop);
+
+	err = t4_prep_adapter(adapter);
+	if (err)
+		goto out_free_adapter;
+
+
+	if (!is_t4(adapter->params.chip)) {
+		s_qpp = (QUEUESPERPAGEPF0_S +
+			(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
+			adapter->pf);
+		qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
+		      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
+		num_seg = PAGE_SIZE / SEGMENT_SIZE;
+
+		/* Each segment size is 128B. Write coalescing is enabled only
+		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
+		 * queue is less no of segments that can be accommodated in
+		 * a page size.
+		 */
+		if (qpp > num_seg) {
+			dev_err(&pdev->dev,
+				"Incorrect number of egress queues per page\n");
+			err = -EINVAL;
+			goto out_free_adapter;
+		}
+		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+		pci_resource_len(pdev, 2));
+		if (!adapter->bar2) {
+			dev_err(&pdev->dev, "cannot map device bar2 region\n");
+			err = -ENOMEM;
+			goto out_free_adapter;
+		}
+	}
+
+	setup_memwin(adapter);
+	err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+	bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
+	setup_memwin_rdma(adapter);
+	if (err)
+		goto out_unmap_bar;
+
+	/* configure SGE_STAT_CFG_A to read WC stats */
+	if (!is_t4(adapter->params.chip))
+		t4_write_reg(adapter, SGE_STAT_CFG_A,
+			     STATSOURCE_T5_V(7) | STATMODE_V(0));
+
+	for_each_port(adapter, i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev_mq(sizeof(struct port_info),
+					   MAX_ETH_QSETS);
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		adapter->port[i] = netdev;
+		pi = netdev_priv(netdev);
+		pi->adapter = adapter;
+		pi->xact_addr_filt = -1;
+		pi->port_id = i;
+		netdev->irq = pdev->irq;
+
+		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			NETIF_F_RXCSUM | NETIF_F_RXHASH |
+			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+		if (highdma)
+			netdev->hw_features |= NETIF_F_HIGHDMA;
+		netdev->features |= netdev->hw_features;
+		netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+		netdev->priv_flags |= IFF_UNICAST_FLT;
+
+		netdev->netdev_ops = &cxgb4_netdev_ops;
+#ifdef CONFIG_CHELSIO_T4_DCB
+		netdev->dcbnl_ops = &cxgb4_dcb_ops;
+		cxgb4_dcb_state_init(netdev);
+#endif
+		cxgb4_set_ethtool_ops(netdev);
+	}
+
+	pci_set_drvdata(pdev, adapter);
+
+	if (adapter->flags & FW_OK) {
+		err = t4_port_init(adapter, func, func, 0);
+		if (err)
+			goto out_free_dev;
+	} else if (adapter->params.nports == 1) {
+		/* If we don't have a connection to the firmware -- possibly
+		 * because of an error -- grab the raw VPD parameters so we
+		 * can set the proper MAC Address on the debug network
+		 * interface that we've created.
+		 */
+		u8 hw_addr[ETH_ALEN];
+		u8 *na = adapter->params.vpd.na;
+
+		err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+		if (!err) {
+			for (i = 0; i < ETH_ALEN; i++)
+				hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+					      hex2val(na[2 * i + 1]));
+			t4_set_hw_addr(adapter, 0, hw_addr);
+		}
+	}
+
+	/* Configure queues and allocate tables now, they can be needed as
+	 * soon as the first register_netdev completes.
+	 */
+	cfg_queues(adapter);
+
+	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
+	if (!adapter->l2t) {
+		/* We tolerate a lack of L2T, giving up some functionality */
+		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
+		adapter->params.offload = 0;
+	}
+
+#if IS_ENABLED(CONFIG_IPV6)
+	adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+					  adapter->clipt_end);
+	if (!adapter->clipt) {
+		/* We tolerate a lack of clip_table, giving up
+		 * some functionality
+		 */
+		dev_warn(&pdev->dev,
+			 "could not allocate Clip table, continuing\n");
+		adapter->params.offload = 0;
+	}
+#endif
+	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+		dev_warn(&pdev->dev, "could not allocate TID table, "
+			 "continuing\n");
+		adapter->params.offload = 0;
+	}
+
+	if (is_offload(adapter)) {
+		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+			u32 hash_base, hash_reg;
+
+			if (chip <= CHELSIO_T5) {
+				hash_reg = LE_DB_TID_HASHBASE_A;
+				hash_base = t4_read_reg(adapter, hash_reg);
+				adapter->tids.hash_base = hash_base / 4;
+			} else {
+				hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+				hash_base = t4_read_reg(adapter, hash_reg);
+				adapter->tids.hash_base = hash_base;
+			}
+		}
+	}
+
+	/* See what interrupts we'll be using */
+	if (msi > 1 && enable_msix(adapter) == 0)
+		adapter->flags |= USING_MSIX;
+	else if (msi > 0 && pci_enable_msi(pdev) == 0)
+		adapter->flags |= USING_MSI;
+
+	err = init_rss(adapter);
+	if (err)
+		goto out_free_dev;
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for_each_port(adapter, i) {
+		pi = adap2pinfo(adapter, i);
+		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
+		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+
+		err = register_netdev(adapter->port[i]);
+		if (err)
+			break;
+		adapter->chan_map[pi->tx_chan] = i;
+		print_port_info(adapter->port[i]);
+	}
+	if (i == 0) {
+		dev_err(&pdev->dev, "could not register any net devices\n");
+		goto out_free_dev;
+	}
+	if (err) {
+		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
+		err = 0;
+	}
+
+	if (cxgb4_debugfs_root) {
+		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
+							   cxgb4_debugfs_root);
+		setup_debugfs(adapter);
+	}
+
+	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+	pdev->needs_freset = 1;
+
+	if (is_offload(adapter))
+		attach_ulds(adapter);
+
+sriov:
+#ifdef CONFIG_PCI_IOV
+	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
+			dev_info(&pdev->dev,
+				 "instantiated %u virtual functions\n",
+				 num_vf[func]);
+#endif
+	return 0;
+
+ out_free_dev:
+	free_some_resources(adapter);
+ out_unmap_bar:
+	if (!is_t4(adapter->params.chip))
+		iounmap(adapter->bar2);
+ out_free_adapter:
+	if (adapter->workq)
+		destroy_workqueue(adapter->workq);
+
+	kfree(adapter);
+ out_unmap_bar0:
+	iounmap(regs);
+ out_disable_device:
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+ out_release_regions:
+	pci_release_regions(pdev);
+	return err;
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_PCI_IOV
+	pci_disable_sriov(pdev);
+
+#endif
+
+	if (adapter) {
+		int i;
+
+		/* Tear down per-adapter Work Queue first since it can contain
+		 * references to our adapter data structure.
+		 */
+		destroy_workqueue(adapter->workq);
+
+		if (is_offload(adapter))
+			detach_ulds(adapter);
+
+		disable_interrupts(adapter);
+
+		for_each_port(adapter, i)
+			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+				unregister_netdev(adapter->port[i]);
+
+		debugfs_remove_recursive(adapter->debugfs_root);
+
+		/* If we allocated filters, free up state associated with any
+		 * valid filters ...
+		 */
+		if (adapter->tids.ftid_tab) {
+			struct filter_entry *f = &adapter->tids.ftid_tab[0];
+			for (i = 0; i < (adapter->tids.nftids +
+					adapter->tids.nsftids); i++, f++)
+				if (f->valid)
+					clear_filter(adapter, f);
+		}
+
+		if (adapter->flags & FULL_INIT_DONE)
+			cxgb_down(adapter);
+
+		free_some_resources(adapter);
+#if IS_ENABLED(CONFIG_IPV6)
+		t4_cleanup_clip_tbl(adapter);
+#endif
+		iounmap(adapter->regs);
+		if (!is_t4(adapter->params.chip))
+			iounmap(adapter->bar2);
+		pci_disable_pcie_error_reporting(pdev);
+		if ((adapter->flags & DEV_ENABLED)) {
+			pci_disable_device(pdev);
+			adapter->flags &= ~DEV_ENABLED;
+		}
+		pci_release_regions(pdev);
+		synchronize_rcu();
+		kfree(adapter);
+	} else
+		pci_release_regions(pdev);
+}
+
+static struct pci_driver cxgb4_driver = {
+	.name     = KBUILD_MODNAME,
+	.id_table = cxgb4_pci_tbl,
+	.probe    = init_one,
+	.remove   = remove_one,
+	.shutdown = remove_one,
+	.err_handler = &cxgb4_eeh,
+};
+
+static int __init cxgb4_init_module(void)
+{
+	int ret;
+
+	/* Debugfs support is optional, just warn if this fails */
+	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cxgb4_debugfs_root)
+		pr_warn("could not create debugfs entry, continuing\n");
+
+	ret = pci_register_driver(&cxgb4_driver);
+	if (ret < 0)
+		debugfs_remove(cxgb4_debugfs_root);
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (!inet6addr_registered) {
+		register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+		inet6addr_registered = true;
+	}
+#endif
+
+	return ret;
+}
+
+static void __exit cxgb4_cleanup_module(void)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+	if (inet6addr_registered) {
+		unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+		inet6addr_registered = false;
+	}
+#endif
+	pci_unregister_driver(&cxgb4_driver);
+	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
+}
+
+module_init(cxgb4_init_module);
+module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
new file mode 100644
index 0000000..cf711d5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -0,0 +1,322 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_OFLD_H
+#define __CXGB4_OFLD_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/atomic.h>
+#include "cxgb4.h"
+
+/* CPL message priority levels */
+enum {
+	CPL_PRIORITY_DATA     = 0,  /* data messages */
+	CPL_PRIORITY_SETUP    = 1,  /* connection setup messages */
+	CPL_PRIORITY_TEARDOWN = 0,  /* connection teardown messages */
+	CPL_PRIORITY_LISTEN   = 1,  /* listen start/stop messages */
+	CPL_PRIORITY_ACK      = 1,  /* RX ACK messages */
+	CPL_PRIORITY_CONTROL  = 1   /* control messages */
+};
+
+#define INIT_TP_WR(w, tid) do { \
+	(w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \
+			      FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \
+	(w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \
+			       FW_WR_FLOWID_V(tid)); \
+	(w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_CPL(w, cpl, tid) do { \
+	INIT_TP_WR(w, tid); \
+	OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+	(w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \
+			      FW_WR_ATOMIC_V(atomic)); \
+	(w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \
+			       FW_WR_FLOWID_V(tid)); \
+	(w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/* Special asynchronous notification message */
+#define CXGB4_MSG_AN ((void *)1)
+
+struct serv_entry {
+	void *data;
+};
+
+union aopen_entry {
+	void *data;
+	union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables.  The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+	void **tid_tab;
+	unsigned int ntids;
+
+	struct serv_entry *stid_tab;
+	unsigned long *stid_bmap;
+	unsigned int nstids;
+	unsigned int stid_base;
+	unsigned int hash_base;
+
+	union aopen_entry *atid_tab;
+	unsigned int natids;
+	unsigned int atid_base;
+
+	struct filter_entry *ftid_tab;
+	unsigned int nftids;
+	unsigned int ftid_base;
+	unsigned int aftid_base;
+	unsigned int aftid_end;
+	/* Server filter region */
+	unsigned int sftid_base;
+	unsigned int nsftids;
+
+	spinlock_t atid_lock ____cacheline_aligned_in_smp;
+	union aopen_entry *afree;
+	unsigned int atids_in_use;
+
+	spinlock_t stid_lock;
+	unsigned int stids_in_use;
+	unsigned int sftids_in_use;
+
+	/* TIDs in the TCAM */
+	atomic_t tids_in_use;
+	/* TIDs in the HASH */
+	atomic_t hash_tids_in_use;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+	return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+	return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
+{
+	/* Is it a server filter TID? */
+	if (t->nsftids && (stid >= t->sftid_base)) {
+		stid -= t->sftid_base;
+		stid += t->nstids;
+	} else {
+		stid -= t->stid_base;
+	}
+
+	return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
+}
+
+static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
+				    unsigned int tid)
+{
+	t->tid_tab[tid] = data;
+	if (t->hash_base && (tid >= t->hash_base))
+		atomic_inc(&t->hash_tids_in_use);
+	else
+		atomic_inc(&t->tids_in_use);
+}
+
+int cxgb4_alloc_atid(struct tid_info *t, void *data);
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
+void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
+
+struct in6_addr;
+
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+			__be32 sip, __be16 sport, __be16 vlan,
+			unsigned int queue);
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+			 const struct in6_addr *sip, __be16 sport,
+			 unsigned int queue);
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+			unsigned int queue, bool ipv6);
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+			       __be32 sip, __be16 sport, __be16 vlan,
+			       unsigned int queue,
+			       unsigned char port, unsigned char mask);
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+			       unsigned int queue, bool ipv6);
+
+static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
+{
+	skb_set_queue_mapping(skb, (queue << 1) | prio);
+}
+
+enum cxgb4_uld {
+	CXGB4_ULD_RDMA,
+	CXGB4_ULD_ISCSI,
+	CXGB4_ULD_MAX
+};
+
+enum cxgb4_state {
+	CXGB4_STATE_UP,
+	CXGB4_STATE_START_RECOVERY,
+	CXGB4_STATE_DOWN,
+	CXGB4_STATE_DETACH
+};
+
+enum cxgb4_control {
+	CXGB4_CONTROL_DB_FULL,
+	CXGB4_CONTROL_DB_EMPTY,
+	CXGB4_CONTROL_DB_DROP,
+};
+
+struct pci_dev;
+struct l2t_data;
+struct net_device;
+struct pkt_gl;
+struct tp_tcp_stats;
+
+struct cxgb4_range {
+	unsigned int start;
+	unsigned int size;
+};
+
+struct cxgb4_virt_res {                      /* virtualized HW resources */
+	struct cxgb4_range ddp;
+	struct cxgb4_range iscsi;
+	struct cxgb4_range stag;
+	struct cxgb4_range rq;
+	struct cxgb4_range pbl;
+	struct cxgb4_range qp;
+	struct cxgb4_range cq;
+	struct cxgb4_range ocq;
+};
+
+#define OCQ_WIN_OFFSET(pdev, vres) \
+	(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
+
+/*
+ * Block of information the LLD provides to ULDs attaching to a device.
+ */
+struct cxgb4_lld_info {
+	struct pci_dev *pdev;                /* associated PCI device */
+	struct l2t_data *l2t;                /* L2 table */
+	struct tid_info *tids;               /* TID table */
+	struct net_device **ports;           /* device ports */
+	const struct cxgb4_virt_res *vr;     /* assorted HW resources */
+	const unsigned short *mtus;          /* MTU table */
+	const unsigned short *rxq_ids;       /* the ULD's Rx queue ids */
+	const unsigned short *ciq_ids;       /* the ULD's concentrator IQ ids */
+	unsigned short nrxq;                 /* # of Rx queues */
+	unsigned short ntxq;                 /* # of Tx queues */
+	unsigned short nciq;		     /* # of concentrator IQ */
+	unsigned char nchan:4;               /* # of channels */
+	unsigned char nports:4;              /* # of ports */
+	unsigned char wr_cred;               /* WR 16-byte credits */
+	unsigned char adapter_type;          /* type of adapter */
+	unsigned char fw_api_ver;            /* FW API version */
+	unsigned int fw_vers;                /* FW version */
+	unsigned int iscsi_iolen;            /* iSCSI max I/O length */
+	unsigned int cclk_ps;                /* Core clock period in psec */
+	unsigned short udb_density;          /* # of user DB/page */
+	unsigned short ucq_density;          /* # of user CQs/page */
+	unsigned short filt_mode;            /* filter optional components */
+	unsigned short tx_modq[NCHAN];       /* maps each tx channel to a */
+					     /* scheduler queue */
+	void __iomem *gts_reg;               /* address of GTS register */
+	void __iomem *db_reg;                /* address of kernel doorbell */
+	int dbfifo_int_thresh;		     /* doorbell fifo int threshold */
+	unsigned int sge_ingpadboundary;     /* SGE ingress padding boundary */
+	unsigned int sge_egrstatuspagesize;  /* SGE egress status page size */
+	unsigned int sge_pktshift;           /* Padding between CPL and */
+					     /*	packet data */
+	unsigned int pf;		     /* Physical Function we're using */
+	bool enable_fw_ofld_conn;            /* Enable connection through fw */
+					     /* WR */
+	unsigned int max_ordird_qp;          /* Max ORD/IRD depth per RDMA QP */
+	unsigned int max_ird_adapter;        /* Max IRD memory per adapter */
+	bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
+	int nodeid;			     /* device numa node id */
+};
+
+struct cxgb4_uld_info {
+	const char *name;
+	void *(*add)(const struct cxgb4_lld_info *p);
+	int (*rx_handler)(void *handle, const __be64 *rsp,
+			  const struct pkt_gl *gl);
+	int (*state_change)(void *handle, enum cxgb4_state new_state);
+	int (*control)(void *handle, enum cxgb4_control control, ...);
+};
+
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+int cxgb4_unregister_uld(enum cxgb4_uld type);
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
+unsigned int cxgb4_port_chan(const struct net_device *dev);
+unsigned int cxgb4_port_viid(const struct net_device *dev);
+unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
+unsigned int cxgb4_port_idx(const struct net_device *dev);
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+			    unsigned int *idx);
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+				    unsigned short header_size,
+				    unsigned short data_size_max,
+				    unsigned short data_size_align,
+				    unsigned int *mtu_idxp);
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6);
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+		      const unsigned int *pgsz_order);
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+				   unsigned int skb_len, unsigned int pull_len);
+int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
+int cxgb4_flush_eq_cache(struct net_device *dev);
+int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
+u64 cxgb4_read_sge_timestamp(struct net_device *dev);
+
+enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS };
+int cxgb4_bar2_sge_qregs(struct net_device *dev,
+			 unsigned int qid,
+			 enum cxgb4_bar2_qtype qtype,
+			 int user,
+			 u64 *pbar2_qoffset,
+			 unsigned int *pbar2_qid);
+
+#endif  /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
new file mode 100644
index 0000000..ac27898
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -0,0 +1,680 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <net/neighbour.h>
+#include "cxgb4.h"
+#include "l2t.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "t4_regs.h"
+#include "t4_values.h"
+
+#define VLAN_NONE 0xfff
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define SYNC_WR_S    12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F    SYNC_WR_V(1)
+
+struct l2t_data {
+	unsigned int l2t_start;     /* start index of our piece of the L2T */
+	unsigned int l2t_size;      /* number of entries in l2tab */
+	rwlock_t lock;
+	atomic_t nfree;             /* number of free entries */
+	struct l2t_entry *rover;    /* starting point for next allocation */
+	struct l2t_entry l2tab[0];  /* MUST BE LAST */
+};
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+	return e->vlan >> 13;
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+	if (atomic_add_return(1, &e->refcnt) == 1)  /* 0 -> 1 transition */
+		atomic_dec(&d->nfree);
+}
+
+/*
+ * To avoid having to check address families we do not allow v4 and v6
+ * neighbors to be on the same hash chain.  We keep v4 entries in the first
+ * half of available hash buckets and v6 in the second.  We need at least two
+ * entries in our L2T for this scheme to work.
+ */
+enum {
+	L2T_MIN_HASH_BUCKETS = 2,
+};
+
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+				    int ifindex)
+{
+	unsigned int l2t_size_half = d->l2t_size / 2;
+
+	return jhash_2words(*key, ifindex, 0) % l2t_size_half;
+}
+
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+				     int ifindex)
+{
+	unsigned int l2t_size_half = d->l2t_size / 2;
+	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+	return (l2t_size_half +
+		(jhash_2words(xor, ifindex, 0) % l2t_size_half));
+}
+
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+			      int addr_len, int ifindex)
+{
+	return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+			       ipv6_hash(d, addr, ifindex);
+}
+
+/*
+ * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
+ * whether the L2T entry and the address are of the same address family.
+ * Callers ensure an address is only checked against L2T entries of the same
+ * family, something made trivial by the separation of IP and IPv6 hash chains
+ * mentioned above.  Returns 0 if there's a match,
+ */
+static int addreq(const struct l2t_entry *e, const u32 *addr)
+{
+	if (e->v6)
+		return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
+		       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
+	return e->addr[0] ^ addr[0];
+}
+
+static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+	neigh_hold(n);
+	if (e->neigh)
+		neigh_release(e->neigh);
+	e->neigh = n;
+}
+
+/*
+ * Write an L2T entry.  Must be called with the entry locked.
+ * The write may be synchronous or asynchronous.
+ */
+static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
+{
+	struct l2t_data *d = adap->l2t;
+	unsigned int l2t_idx = e->idx + d->l2t_start;
+	struct sk_buff *skb;
+	struct cpl_l2t_write_req *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+					l2t_idx | (sync ? SYNC_WR_F : 0) |
+					TID_QID_V(adap->sge.fw_evtq.abs_id)));
+	req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
+	req->l2t_idx = htons(l2t_idx);
+	req->vlan = htons(e->vlan);
+	if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
+		memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+	t4_ofld_send(adap, skb);
+
+	if (sync && e->state != L2T_STATE_SWITCHING)
+		e->state = L2T_STATE_SYNC_WRITE;
+	return 0;
+}
+
+/*
+ * Send packets waiting in an L2T entry's ARP queue.  Must be called with the
+ * entry locked.
+ */
+static void send_pending(struct adapter *adap, struct l2t_entry *e)
+{
+	while (e->arpq_head) {
+		struct sk_buff *skb = e->arpq_head;
+
+		e->arpq_head = skb->next;
+		skb->next = NULL;
+		t4_ofld_send(adap, skb);
+	}
+	e->arpq_tail = NULL;
+}
+
+/*
+ * Process a CPL_L2T_WRITE_RPL.  Wake up the ARP queue if it completes a
+ * synchronous L2T_WRITE.  Note that the TID in the reply is really the L2T
+ * index it refers to.
+ */
+void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
+{
+	struct l2t_data *d = adap->l2t;
+	unsigned int tid = GET_TID(rpl);
+	unsigned int l2t_idx = tid % L2T_SIZE;
+
+	if (unlikely(rpl->status != CPL_ERR_NONE)) {
+		dev_err(adap->pdev_dev,
+			"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+			rpl->status, l2t_idx);
+		return;
+	}
+
+	if (tid & SYNC_WR_F) {
+		struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
+
+		spin_lock(&e->lock);
+		if (e->state != L2T_STATE_SWITCHING) {
+			send_pending(adap, e);
+			e->state = (e->neigh->nud_state & NUD_STALE) ?
+					L2T_STATE_STALE : L2T_STATE_VALID;
+		}
+		spin_unlock(&e->lock);
+	}
+}
+
+/*
+ * Add a packet to an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+	skb->next = NULL;
+	if (e->arpq_head)
+		e->arpq_tail->next = skb;
+	else
+		e->arpq_head = skb;
+	e->arpq_tail = skb;
+}
+
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+		   struct l2t_entry *e)
+{
+	struct adapter *adap = netdev2adap(dev);
+
+again:
+	switch (e->state) {
+	case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
+		neigh_event_send(e->neigh, NULL);
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_STALE)
+			e->state = L2T_STATE_VALID;
+		spin_unlock_bh(&e->lock);
+	case L2T_STATE_VALID:     /* fast-path, send the packet on */
+		return t4_ofld_send(adap, skb);
+	case L2T_STATE_RESOLVING:
+	case L2T_STATE_SYNC_WRITE:
+		spin_lock_bh(&e->lock);
+		if (e->state != L2T_STATE_SYNC_WRITE &&
+		    e->state != L2T_STATE_RESOLVING) {
+			spin_unlock_bh(&e->lock);
+			goto again;
+		}
+		arpq_enqueue(e, skb);
+		spin_unlock_bh(&e->lock);
+
+		if (e->state == L2T_STATE_RESOLVING &&
+		    !neigh_event_send(e->neigh, NULL)) {
+			spin_lock_bh(&e->lock);
+			if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
+				write_l2e(adap, e, 1);
+			spin_unlock_bh(&e->lock);
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_l2t_send);
+
+/*
+ * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+	struct l2t_entry *end, *e, **p;
+
+	if (!atomic_read(&d->nfree))
+		return NULL;
+
+	/* there's definitely a free entry */
+	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
+		if (atomic_read(&e->refcnt) == 0)
+			goto found;
+
+	for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
+		;
+found:
+	d->rover = e + 1;
+	atomic_dec(&d->nfree);
+
+	/*
+	 * The entry we found may be an inactive entry that is
+	 * presently in the hash table.  We need to remove it.
+	 */
+	if (e->state < L2T_STATE_SWITCHING)
+		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
+			if (*p == e) {
+				*p = e->next;
+				e->next = NULL;
+				break;
+			}
+
+	e->state = L2T_STATE_UNUSED;
+	return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.
+ */
+static void t4_l2e_free(struct l2t_entry *e)
+{
+	struct l2t_data *d;
+
+	spin_lock_bh(&e->lock);
+	if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
+		if (e->neigh) {
+			neigh_release(e->neigh);
+			e->neigh = NULL;
+		}
+		while (e->arpq_head) {
+			struct sk_buff *skb = e->arpq_head;
+
+			e->arpq_head = skb->next;
+			kfree_skb(skb);
+		}
+		e->arpq_tail = NULL;
+	}
+	spin_unlock_bh(&e->lock);
+
+	d = container_of(e, struct l2t_data, l2tab[e->idx]);
+	atomic_inc(&d->nfree);
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e)
+{
+	if (atomic_dec_and_test(&e->refcnt))
+		t4_l2e_free(e);
+}
+EXPORT_SYMBOL(cxgb4_l2t_release);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+	unsigned int nud_state;
+
+	spin_lock(&e->lock);                /* avoid race with t4_l2t_free */
+	if (neigh != e->neigh)
+		neigh_replace(e, neigh);
+	nud_state = neigh->nud_state;
+	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+	    !(nud_state & NUD_VALID))
+		e->state = L2T_STATE_RESOLVING;
+	else if (nud_state & NUD_CONNECTED)
+		e->state = L2T_STATE_VALID;
+	else
+		e->state = L2T_STATE_STALE;
+	spin_unlock(&e->lock);
+}
+
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+				const struct net_device *physdev,
+				unsigned int priority)
+{
+	u8 lport;
+	u16 vlan;
+	struct l2t_entry *e;
+	int addr_len = neigh->tbl->key_len;
+	u32 *addr = (u32 *)neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = addr_hash(d, addr, addr_len, ifidx);
+
+	if (neigh->dev->flags & IFF_LOOPBACK)
+		lport = netdev2pinfo(physdev)->tx_chan + 4;
+	else
+		lport = netdev2pinfo(physdev)->lport;
+
+	if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+		vlan = vlan_dev_vlan_id(neigh->dev);
+	else
+		vlan = VLAN_NONE;
+
+	write_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (!addreq(e, addr) && e->ifindex == ifidx &&
+		    e->vlan == vlan && e->lport == lport) {
+			l2t_hold(d, e);
+			if (atomic_read(&e->refcnt) == 1)
+				reuse_entry(e, neigh);
+			goto done;
+		}
+
+	/* Need to allocate a new entry */
+	e = alloc_l2e(d);
+	if (e) {
+		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
+		e->state = L2T_STATE_RESOLVING;
+		if (neigh->dev->flags & IFF_LOOPBACK)
+			memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
+		memcpy(e->addr, addr, addr_len);
+		e->ifindex = ifidx;
+		e->hash = hash;
+		e->lport = lport;
+		e->v6 = addr_len == 16;
+		atomic_set(&e->refcnt, 1);
+		neigh_replace(e, neigh);
+		e->vlan = vlan;
+		e->next = d->l2tab[hash].first;
+		d->l2tab[hash].first = e;
+		spin_unlock(&e->lock);
+	}
+done:
+	write_unlock_bh(&d->lock);
+	return e;
+}
+EXPORT_SYMBOL(cxgb4_l2t_get);
+
+u64 cxgb4_select_ntuple(struct net_device *dev,
+			const struct l2t_entry *l2t)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct tp_params *tp = &adap->params.tp;
+	u64 ntuple = 0;
+
+	/* Initialize each of the fields which we care about which are present
+	 * in the Compressed Filter Tuple.
+	 */
+	if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+		ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
+
+	if (tp->port_shift >= 0)
+		ntuple |= (u64)l2t->lport << tp->port_shift;
+
+	if (tp->protocol_shift >= 0)
+		ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+	if (tp->vnic_shift >= 0) {
+		u32 viid = cxgb4_port_viid(dev);
+		u32 vf = FW_VIID_VIN_G(viid);
+		u32 pf = FW_VIID_PFN_G(viid);
+		u32 vld = FW_VIID_VIVLD_G(viid);
+
+		ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
+				FT_VNID_ID_PF_V(pf) |
+				FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
+	}
+
+	return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head.  If a packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
+{
+	while (arpq) {
+		struct sk_buff *skb = arpq;
+		const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+		arpq = skb->next;
+		skb->next = NULL;
+		if (cb->arp_err_handler)
+			cb->arp_err_handler(cb->handle, skb);
+		else
+			t4_ofld_send(adap, skb);
+	}
+}
+
+/*
+ * Called when the host's neighbor layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
+{
+	struct l2t_entry *e;
+	struct sk_buff *arpq = NULL;
+	struct l2t_data *d = adap->l2t;
+	int addr_len = neigh->tbl->key_len;
+	u32 *addr = (u32 *) neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = addr_hash(d, addr, addr_len, ifidx);
+
+	read_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (!addreq(e, addr) && e->ifindex == ifidx) {
+			spin_lock(&e->lock);
+			if (atomic_read(&e->refcnt))
+				goto found;
+			spin_unlock(&e->lock);
+			break;
+		}
+	read_unlock_bh(&d->lock);
+	return;
+
+ found:
+	read_unlock(&d->lock);
+
+	if (neigh != e->neigh)
+		neigh_replace(e, neigh);
+
+	if (e->state == L2T_STATE_RESOLVING) {
+		if (neigh->nud_state & NUD_FAILED) {
+			arpq = e->arpq_head;
+			e->arpq_head = e->arpq_tail = NULL;
+		} else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
+			   e->arpq_head) {
+			write_l2e(adap, e, 1);
+		}
+	} else {
+		e->state = neigh->nud_state & NUD_CONNECTED ?
+			L2T_STATE_VALID : L2T_STATE_STALE;
+		if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
+			write_l2e(adap, e, 0);
+	}
+
+	spin_unlock_bh(&e->lock);
+
+	if (arpq)
+		handle_failed_resolution(adap, arpq);
+}
+
+/* Allocate an L2T entry for use by a switching rule.  Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+	struct l2t_entry *e;
+
+	write_lock_bh(&d->lock);
+	e = alloc_l2e(d);
+	if (e) {
+		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
+		e->state = L2T_STATE_SWITCHING;
+		atomic_set(&e->refcnt, 1);
+		spin_unlock(&e->lock);
+	}
+	write_unlock_bh(&d->lock);
+	return e;
+}
+
+/* Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+		u8 port, u8 *eth_addr)
+{
+	e->vlan = vlan;
+	e->lport = port;
+	memcpy(e->dmac, eth_addr, ETH_ALEN);
+	return write_l2e(adap, e, 0);
+}
+
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
+{
+	unsigned int l2t_size;
+	int i;
+	struct l2t_data *d;
+
+	if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+		return NULL;
+	l2t_size = l2t_end - l2t_start + 1;
+	if (l2t_size < L2T_MIN_HASH_BUCKETS)
+		return NULL;
+
+	d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
+	if (!d)
+		return NULL;
+
+	d->l2t_start = l2t_start;
+	d->l2t_size = l2t_size;
+
+	d->rover = d->l2tab;
+	atomic_set(&d->nfree, l2t_size);
+	rwlock_init(&d->lock);
+
+	for (i = 0; i < d->l2t_size; ++i) {
+		d->l2tab[i].idx = i;
+		d->l2tab[i].state = L2T_STATE_UNUSED;
+		spin_lock_init(&d->l2tab[i].lock);
+		atomic_set(&d->l2tab[i].refcnt, 0);
+	}
+	return d;
+}
+
+static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct l2t_data *d = seq->private;
+
+	return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
+}
+
+static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	v = l2t_get_idx(seq, *pos);
+	if (v)
+		++*pos;
+	return v;
+}
+
+static void l2t_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static char l2e_state(const struct l2t_entry *e)
+{
+	switch (e->state) {
+	case L2T_STATE_VALID: return 'V';
+	case L2T_STATE_STALE: return 'S';
+	case L2T_STATE_SYNC_WRITE: return 'W';
+	case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
+	case L2T_STATE_SWITCHING: return 'X';
+	default:
+		return 'U';
+	}
+}
+
+static int l2t_seq_show(struct seq_file *seq, void *v)
+{
+	if (v == SEQ_START_TOKEN)
+		seq_puts(seq, " Idx IP address                "
+			 "Ethernet address  VLAN/P LP State Users Port\n");
+	else {
+		char ip[60];
+		struct l2t_data *d = seq->private;
+		struct l2t_entry *e = v;
+
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_SWITCHING)
+			ip[0] = '\0';
+		else
+			sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
+		seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
+			   e->idx + d->l2t_start, ip, e->dmac,
+			   e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
+			   l2e_state(e), atomic_read(&e->refcnt),
+			   e->neigh ? e->neigh->dev->name : "");
+		spin_unlock_bh(&e->lock);
+	}
+	return 0;
+}
+
+static const struct seq_operations l2t_seq_ops = {
+	.start = l2t_seq_start,
+	.next = l2t_seq_next,
+	.stop = l2t_seq_stop,
+	.show = l2t_seq_show
+};
+
+static int l2t_seq_open(struct inode *inode, struct file *file)
+{
+	int rc = seq_open(file, &l2t_seq_ops);
+
+	if (!rc) {
+		struct adapter *adap = inode->i_private;
+		struct seq_file *seq = file->private_data;
+
+		seq->private = adap->l2t;
+	}
+	return rc;
+}
+
+const struct file_operations t4_l2t_fops = {
+	.owner = THIS_MODULE,
+	.open = l2t_seq_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
new file mode 100644
index 0000000..b38dc52
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -0,0 +1,125 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_L2T_H
+#define __CXGB4_L2T_H
+
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/atomic.h>
+
+enum { L2T_SIZE = 4096 };     /* # of L2T entries */
+
+enum {
+	L2T_STATE_VALID,      /* entry is up to date */
+	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,  /* entry needs address resolution */
+	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+	L2T_STATE_NOARP,      /* Netdev down or removed*/
+
+	/* when state is one of the below the entry is not hashed */
+	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
+	L2T_STATE_UNUSED      /* entry not in use */
+};
+
+struct adapter;
+struct l2t_data;
+struct neighbour;
+struct net_device;
+struct file_operations;
+struct cpl_l2t_write_rpl;
+
+/*
+ * Each L2T entry plays multiple roles.  First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution.  Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer.  Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+	u16 state;                  /* entry state */
+	u16 idx;                    /* entry index within in-memory table */
+	u32 addr[4];                /* next hop IP or IPv6 address */
+	int ifindex;                /* neighbor's net_device's ifindex */
+	struct neighbour *neigh;    /* associated neighbour */
+	struct l2t_entry *first;    /* start of hash chain */
+	struct l2t_entry *next;     /* next l2t_entry on chain */
+	struct sk_buff *arpq_head;  /* queue of packets awaiting resolution */
+	struct sk_buff *arpq_tail;
+	spinlock_t lock;
+	atomic_t refcnt;            /* entry reference count */
+	u16 hash;                   /* hash bucket the entry is on */
+	u16 vlan;                   /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+	u8 v6;                      /* whether entry is for IPv6 */
+	u8 lport;                   /* associated offload logical interface */
+	u8 dmac[ETH_ALEN];          /* neighbour's MAC address */
+};
+
+typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+	void *handle;
+	arp_err_handler_t arp_err_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
+					  arp_err_handler_t handler)
+{
+	L2T_SKB_CB(skb)->handle = handle;
+	L2T_SKB_CB(skb)->arp_err_handler = handler;
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e);
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+		   struct l2t_entry *e);
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+				const struct net_device *physdev,
+				unsigned int priority);
+u64 cxgb4_select_ntuple(struct net_device *dev,
+			const struct l2t_entry *l2t);
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+			 u8 port, u8 *eth_addr);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
+void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+
+extern const struct file_operations t4_l2t_fops;
+#endif  /* __CXGB4_L2T_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
new file mode 100644
index 0000000..b7b93e7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -0,0 +1,3155 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/prefetch.h>
+#include <linux/export.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#include <scsi/fc/fc_fcoe.h>
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_values.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+/*
+ * Rx buffer size.  We use largish buffers if possible but settle for single
+ * pages under memory shortage.
+ */
+#if PAGE_SHIFT >= 16
+# define FL_PG_ORDER 0
+#else
+# define FL_PG_ORDER (16 - PAGE_SHIFT)
+#endif
+
+/* RX_PULL_LEN should be <= RX_COPY_THRES */
+#define RX_COPY_THRES    256
+#define RX_PULL_LEN      128
+
+/*
+ * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
+ * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
+ */
+#define RX_PKT_SKB_LEN   512
+
+/*
+ * Max number of Tx descriptors we clean up at a time.  Should be modest as
+ * freeing skbs isn't cheap and it happens while holding locks.  We just need
+ * to free packets faster than they arrive, we eventually catch up and keep
+ * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
+ */
+#define MAX_TX_RECLAIM 16
+
+/*
+ * Max number of Rx buffers we replenish at a time.  Again keep this modest,
+ * allocating buffers isn't cheap either.
+ */
+#define MAX_RX_REFILL 16U
+
+/*
+ * Period of the Rx queue check timer.  This timer is infrequent as it has
+ * something to do only when the system experiences severe memory shortage.
+ */
+#define RX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Period of the Tx queue check timer.
+ */
+#define TX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Max number of Tx descriptors to be reclaimed by the Tx timer.
+ */
+#define MAX_TIMER_TX_RECLAIM 100
+
+/*
+ * Timer index used when backing off due to memory shortage.
+ */
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
+/*
+ * Suspension threshold for non-Ethernet Tx queues.  We require enough room
+ * for a full sized WR.
+ */
+#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+struct tx_sw_desc {                /* SW state per Tx descriptor */
+	struct sk_buff *skb;
+	struct ulptx_sgl *sgl;
+};
+
+struct rx_sw_desc {                /* SW state per Rx descriptor */
+	struct page *page;
+	dma_addr_t dma_addr;
+};
+
+/*
+ * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
+ * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
+ * We could easily support more but there doesn't seem to be much need for
+ * that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+					  unsigned int mtu)
+{
+	struct sge *s = &adapter->sge;
+
+	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array.  We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings.  All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
+ */
+enum {
+	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
+	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
+	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
+
+	/*
+	 * XXX We shouldn't depend on being able to use these indices.
+	 * XXX Especially when some other Master PF has initialized the
+	 * XXX adapter or we use the Firmware Configuration File.  We
+	 * XXX should really search through the Host Buffer Size register
+	 * XXX array for the appropriately sized buffer indices.
+	 */
+	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
+	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
+
+	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
+	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
+};
+
+static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
+#define MIN_NAPI_WORK  1
+
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
+{
+	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
+}
+
+static inline bool is_buf_mapped(const struct rx_sw_desc *d)
+{
+	return !(d->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ *	txq_avail - return the number of available slots in a Tx queue
+ *	@q: the Tx queue
+ *
+ *	Returns the number of descriptors in a Tx queue available to write new
+ *	packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+	return q->size - 1 - q->in_use;
+}
+
+/**
+ *	fl_cap - return the capacity of a free-buffer list
+ *	@fl: the FL
+ *
+ *	Returns the capacity of a free-buffer list.  The capacity is less than
+ *	the size because one descriptor needs to be left unpopulated, otherwise
+ *	HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+	return fl->size - 8;   /* 1 descriptor = 8 buffers */
+}
+
+/**
+ *	fl_starving - return whether a Free List is starving.
+ *	@adapter: pointer to the adapter
+ *	@fl: the Free List
+ *
+ *	Tests specified Free List to see whether the number of buffers
+ *	available to the hardware has falled below our "starvation"
+ *	threshold.
+ */
+static inline bool fl_starving(const struct adapter *adapter,
+			       const struct sge_fl *fl)
+{
+	const struct sge *s = &adapter->sge;
+
+	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
+}
+
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+		   dma_addr_t *addr)
+{
+	const skb_frag_t *fp, *end;
+	const struct skb_shared_info *si;
+
+	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, *addr))
+		goto out_err;
+
+	si = skb_shinfo(skb);
+	end = &si->frags[si->nr_frags];
+
+	for (fp = si->frags; fp < end; fp++) {
+		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, *addr))
+			goto unwind;
+	}
+	return 0;
+
+unwind:
+	while (fp-- > si->frags)
+		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+
+	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+out_err:
+	return -ENOMEM;
+}
+
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+static void unmap_skb(struct device *dev, const struct sk_buff *skb,
+		      const dma_addr_t *addr)
+{
+	const skb_frag_t *fp, *end;
+	const struct skb_shared_info *si;
+
+	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
+
+	si = skb_shinfo(skb);
+	end = &si->frags[si->nr_frags];
+	for (fp = si->frags; fp < end; fp++)
+		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
+}
+
+/**
+ *	deferred_unmap_destructor - unmap a packet when it is freed
+ *	@skb: the packet
+ *
+ *	This is the packet destructor used for Tx packets that need to remain
+ *	mapped until they are freed rather than until their Tx descriptors are
+ *	freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
+}
+#endif
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
+{
+	const struct ulptx_sge_pair *p;
+	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+	if (likely(skb_headlen(skb)))
+		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+				 DMA_TO_DEVICE);
+	else {
+		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+			       DMA_TO_DEVICE);
+		nfrags--;
+	}
+
+	/*
+	 * the complexity below is because of the possibility of a wrap-around
+	 * in the middle of an SGL
+	 */
+	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
+unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       ntohl(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+				       ntohl(p->len[1]), DMA_TO_DEVICE);
+			p++;
+		} else if ((u8 *)p == (u8 *)q->stat) {
+			p = (const struct ulptx_sge_pair *)q->desc;
+			goto unmap;
+		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
+			const __be64 *addr = (const __be64 *)q->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       ntohl(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[1]),
+				       ntohl(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[2];
+		} else {
+			const __be64 *addr = (const __be64 *)q->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       ntohl(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       ntohl(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[1];
+		}
+	}
+	if (nfrags) {
+		__be64 addr;
+
+		if ((u8 *)p == (u8 *)q->stat)
+			p = (const struct ulptx_sge_pair *)q->desc;
+		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
+						       *(const __be64 *)q->desc;
+		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
+			       DMA_TO_DEVICE);
+	}
+}
+
+/**
+ *	free_tx_desc - reclaims Tx descriptors and their buffers
+ *	@adapter: the adapter
+ *	@q: the Tx queue to reclaim descriptors from
+ *	@n: the number of descriptors to reclaim
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ *	Tx buffers.  Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+			 unsigned int n, bool unmap)
+{
+	struct tx_sw_desc *d;
+	unsigned int cidx = q->cidx;
+	struct device *dev = adap->pdev_dev;
+
+	d = &q->sdesc[cidx];
+	while (n--) {
+		if (d->skb) {                       /* an SGL is present */
+			if (unmap)
+				unmap_sgl(dev, d->skb, d->sgl, q);
+			dev_consume_skb_any(d->skb);
+			d->skb = NULL;
+		}
+		++d;
+		if (++cidx == q->size) {
+			cidx = 0;
+			d = q->sdesc;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+	int hw_cidx = ntohs(q->stat->cidx);
+	hw_cidx -= q->cidx;
+	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
+}
+
+/**
+ *	reclaim_completed_tx - reclaims completed Tx descriptors
+ *	@adap: the adapter
+ *	@q: the Tx queue to reclaim completed descriptors from
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims Tx descriptors that the SGE has indicated it has processed,
+ *	and frees the associated buffers if possible.  Called with the Tx
+ *	queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+					bool unmap)
+{
+	int avail = reclaimable(q);
+
+	if (avail) {
+		/*
+		 * Limit the amount of clean up work we do at a time to keep
+		 * the Tx lock hold time O(1).
+		 */
+		if (avail > MAX_TX_RECLAIM)
+			avail = MAX_TX_RECLAIM;
+
+		free_tx_desc(adap, q, avail, unmap);
+		q->in_use -= avail;
+	}
+}
+
+static inline int get_buf_size(struct adapter *adapter,
+			       const struct rx_sw_desc *d)
+{
+	struct sge *s = &adapter->sge;
+	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+	int buf_size;
+
+	switch (rx_buf_size_idx) {
+	case RX_SMALL_PG_BUF:
+		buf_size = PAGE_SIZE;
+		break;
+
+	case RX_LARGE_PG_BUF:
+		buf_size = PAGE_SIZE << s->fl_pg_order;
+		break;
+
+	case RX_SMALL_MTU_BUF:
+		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+		break;
+
+	case RX_LARGE_MTU_BUF:
+		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+		break;
+
+	default:
+		BUG_ON(1);
+	}
+
+	return buf_size;
+}
+
+/**
+ *	free_rx_bufs - free the Rx buffers on an SGE free list
+ *	@adap: the adapter
+ *	@q: the SGE free list to free buffers from
+ *	@n: how many buffers to free
+ *
+ *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
+ *	buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
+{
+	while (n--) {
+		struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+		if (is_buf_mapped(d))
+			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+				       get_buf_size(adap, d),
+				       PCI_DMA_FROMDEVICE);
+		put_page(d->page);
+		d->page = NULL;
+		if (++q->cidx == q->size)
+			q->cidx = 0;
+		q->avail--;
+	}
+}
+
+/**
+ *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ *	@adap: the adapter
+ *	@q: the SGE free list
+ *
+ *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
+ *	buffer must be made inaccessible to HW before calling this function.
+ *
+ *	This is similar to @free_rx_bufs above but does not free the buffer.
+ *	Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
+{
+	struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+	if (is_buf_mapped(d))
+		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
+	d->page = NULL;
+	if (++q->cidx == q->size)
+		q->cidx = 0;
+	q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+	if (q->pend_cred >= 8) {
+		u32 val = adap->params.arch.sge_fl_db;
+
+		if (is_t4(adap->params.chip))
+			val |= PIDX_V(q->pend_cred / 8);
+		else
+			val |= PIDX_T5_V(q->pend_cred / 8);
+
+		/* Make sure all memory writes to the Free List queue are
+		 * committed before we tell the hardware about them.
+		 */
+		wmb();
+
+		/* If we don't have access to the new User Doorbell (T5+), use
+		 * the old doorbell mechanism; otherwise use the new BAR2
+		 * mechanism.
+		 */
+		if (unlikely(q->bar2_addr == NULL)) {
+			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+				     val | QID_V(q->cntxt_id));
+		} else {
+			writel(val | QID_V(q->bar2_qid),
+			       q->bar2_addr + SGE_UDB_KDOORBELL);
+
+			/* This Write memory Barrier will force the write to
+			 * the User Doorbell area to be flushed.
+			 */
+			wmb();
+		}
+		q->pend_cred &= 7;
+	}
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
+				  dma_addr_t mapping)
+{
+	sd->page = pg;
+	sd->dma_addr = mapping;      /* includes size low bits */
+}
+
+/**
+ *	refill_fl - refill an SGE Rx buffer ring
+ *	@adap: the adapter
+ *	@q: the ring to refill
+ *	@n: the number of new buffers to allocate
+ *	@gfp: the gfp flags for the allocations
+ *
+ *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ *	allocated with the supplied gfp flags.  The caller must assure that
+ *	@n does not exceed the queue's capacity.  If afterwards the queue is
+ *	found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ *	Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
+			      gfp_t gfp)
+{
+	struct sge *s = &adap->sge;
+	struct page *pg;
+	dma_addr_t mapping;
+	unsigned int cred = q->avail;
+	__be64 *d = &q->desc[q->pidx];
+	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+	int node;
+
+#ifdef CONFIG_DEBUG_FS
+	if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+		goto out;
+#endif
+
+	gfp |= __GFP_NOWARN;
+	node = dev_to_node(adap->pdev_dev);
+
+	if (s->fl_pg_order == 0)
+		goto alloc_small_pages;
+
+	/*
+	 * Prefer large buffers
+	 */
+	while (n) {
+		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
+		if (unlikely(!pg)) {
+			q->large_alloc_failed++;
+			break;       /* fall back to single pages */
+		}
+
+		mapping = dma_map_page(adap->pdev_dev, pg, 0,
+				       PAGE_SIZE << s->fl_pg_order,
+				       PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+			__free_pages(pg, s->fl_pg_order);
+			goto out;   /* do not try small pages for this error */
+		}
+		mapping |= RX_LARGE_PG_BUF;
+		*d++ = cpu_to_be64(mapping);
+
+		set_rx_sw_desc(sd, pg, mapping);
+		sd++;
+
+		q->avail++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			sd = q->sdesc;
+			d = q->desc;
+		}
+		n--;
+	}
+
+alloc_small_pages:
+	while (n--) {
+		pg = alloc_pages_node(node, gfp, 0);
+		if (unlikely(!pg)) {
+			q->alloc_failed++;
+			break;
+		}
+
+		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
+				       PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+			put_page(pg);
+			goto out;
+		}
+		*d++ = cpu_to_be64(mapping);
+
+		set_rx_sw_desc(sd, pg, mapping);
+		sd++;
+
+		q->avail++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			sd = q->sdesc;
+			d = q->desc;
+		}
+	}
+
+out:	cred = q->avail - cred;
+	q->pend_cred += cred;
+	ring_fl_db(adap, q);
+
+	if (unlikely(fl_starving(adap, q))) {
+		smp_wmb();
+		set_bit(q->cntxt_id - adap->sge.egr_start,
+			adap->sge.starving_fl);
+	}
+
+	return cred;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+		  GFP_ATOMIC);
+}
+
+/**
+ *	alloc_ring - allocate resources for an SGE descriptor ring
+ *	@dev: the PCI device's core device
+ *	@nelem: the number of descriptors
+ *	@elem_size: the size of each descriptor
+ *	@sw_size: the size of the SW state associated with each ring element
+ *	@phys: the physical address of the allocated ring
+ *	@metadata: address of the array holding the SW state for the ring
+ *	@stat_size: extra space in HW ring for status information
+ *	@node: preferred node for memory allocations
+ *
+ *	Allocates resources for an SGE descriptor ring, such as Tx queues,
+ *	free buffer lists, or response queues.  Each SGE ring requires
+ *	space for its HW descriptors plus, optionally, space for the SW state
+ *	associated with each HW entry (the metadata).  The function returns
+ *	three values: the virtual address for the HW ring (the return value
+ *	of the function), the bus address of the HW ring, and the address
+ *	of the SW ring.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
+			size_t sw_size, dma_addr_t *phys, void *metadata,
+			size_t stat_size, int node)
+{
+	size_t len = nelem * elem_size + stat_size;
+	void *s = NULL;
+	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
+
+	if (!p)
+		return NULL;
+	if (sw_size) {
+		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
+
+		if (!s) {
+			dma_free_coherent(dev, len, p, *phys);
+			return NULL;
+		}
+	}
+	if (metadata)
+		*(void **)metadata = s;
+	memset(p, 0, len);
+	return p;
+}
+
+/**
+ *	sgl_len - calculates the size of an SGL of the given capacity
+ *	@n: the number of SGL entries
+ *
+ *	Calculates the number of flits needed for a scatter/gather list that
+ *	can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	/* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
+	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+	 * repeated sequences of { Length[i], Length[i+1], Address[i],
+	 * Address[i+1] } (this ensures that all addresses are on 64-bit
+	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
+	 * Address[N+1] is omitted.
+	 *
+	 * The following calculation incorporates all of the above.  It's
+	 * somewhat hard to follow but, briefly: the "+2" accounts for the
+	 * first two flits which include the DSGL header, Length0 and
+	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
+	 * (n-1) is odd ...
+	 */
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ *	flits_to_desc - returns the num of Tx descriptors for the given flits
+ *	@n: the number of flits
+ *
+ *	Returns the number of Tx descriptors needed for the supplied number
+ *	of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+	BUG_ON(n > SGE_MAX_WR_LEN / 8);
+	return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ *	is_eth_imm - can an Ethernet packet be sent as immediate data?
+ *	@skb: the packet
+ *
+ *	Returns whether an Ethernet packet is small enough to fit as
+ *	immediate data. Return value corresponds to headroom required.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+	int hdrlen = skb_shinfo(skb)->gso_size ?
+			sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+	hdrlen += sizeof(struct cpl_tx_pkt);
+	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+		return hdrlen;
+	return 0;
+}
+
+/**
+ *	calc_tx_flits - calculate the number of flits for a packet Tx WR
+ *	@skb: the packet
+ *
+ *	Returns the number of flits needed for a Tx WR for the given Ethernet
+ *	packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+	unsigned int flits;
+	int hdrlen = is_eth_imm(skb);
+
+	/* If the skb is small enough, we can pump it out as a work request
+	 * with only immediate data.  In that case we just have to have the
+	 * TX Packet header plus the skb data in the Work Request.
+	 */
+
+	if (hdrlen)
+		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+	/* Otherwise, we're going to have to construct a Scatter gather list
+	 * of the skb body and fragments.  We also include the flits necessary
+	 * for the TX Packet Work Request and CPL.  We always have a firmware
+	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+	 * message or, if we're doing a Large Send Offload, an LSO CPL message
+	 * with an embedded TX Packet Write CPL message.
+	 */
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+	if (skb_shinfo(skb)->gso_size)
+		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+			  sizeof(struct cpl_tx_pkt_lso_core) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	else
+		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	return flits;
+}
+
+/**
+ *	calc_tx_descs - calculate the number of Tx descriptors for a packet
+ *	@skb: the packet
+ *
+ *	Returns the number of Tx descriptors needed for the given Ethernet
+ *	packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+	return flits_to_desc(calc_tx_flits(skb));
+}
+
+/**
+ *	write_sgl - populate a scatter/gather list for a packet
+ *	@skb: the packet
+ *	@q: the Tx queue we are writing into
+ *	@sgl: starting location for writing the SGL
+ *	@end: points right after the end of the SGL
+ *	@start: start offset into skb main-body data to include in the SGL
+ *	@addr: the list of bus addresses for the SGL elements
+ *
+ *	Generates a gather list for the buffers that make up a packet.
+ *	The caller must provide adequate space for the SGL that will be written.
+ *	The SGL includes all of the packet's page fragments and the data in its
+ *	main body except for the first @start bytes.  @sgl must be 16-byte
+ *	aligned and within a Tx descriptor with available space.  @end points
+ *	right after the end of the SGL but does not account for any potential
+ *	wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+		      const dma_addr_t *addr)
+{
+	unsigned int i, len;
+	struct ulptx_sge_pair *to;
+	const struct skb_shared_info *si = skb_shinfo(skb);
+	unsigned int nfrags = si->nr_frags;
+	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+	len = skb_headlen(skb) - start;
+	if (likely(len)) {
+		sgl->len0 = htonl(len);
+		sgl->addr0 = cpu_to_be64(addr[0] + start);
+		nfrags++;
+	} else {
+		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
+		sgl->addr0 = cpu_to_be64(addr[1]);
+	}
+
+	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+			      ULPTX_NSGE_V(nfrags));
+	if (likely(--nfrags == 0))
+		return;
+	/*
+	 * Most of the complexity below deals with the possibility we hit the
+	 * end of the queue in the middle of writing the SGL.  For this case
+	 * only we create the SGL in a temporary buffer and then copy it.
+	 */
+	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
+		to->addr[0] = cpu_to_be64(addr[i]);
+		to->addr[1] = cpu_to_be64(addr[++i]);
+	}
+	if (nfrags) {
+		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+		to->len[1] = cpu_to_be32(0);
+		to->addr[0] = cpu_to_be64(addr[i + 1]);
+	}
+	if (unlikely((u8 *)end > (u8 *)q->stat)) {
+		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+		if (likely(part0))
+			memcpy(sgl->sge, buf, part0);
+		part1 = (u8 *)end - (u8 *)q->stat;
+		memcpy(q->desc, (u8 *)buf + part0, part1);
+		end = (void *)q->desc + part1;
+	}
+	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
+		*end = 0;
+}
+
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space. For coalesced WR SGE fetches
+ * data from the FIFO instead of from Host.
+ */
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
+{
+	int count = 8;
+
+	while (count) {
+		writeq(*src, dst);
+		src++;
+		dst++;
+		count--;
+	}
+}
+
+/**
+ *	ring_tx_db - check and potentially ring a Tx queue's doorbell
+ *	@adap: the adapter
+ *	@q: the Tx queue
+ *	@n: number of new descriptors to give to HW
+ *
+ *	Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+{
+	/* Make sure that all writes to the TX Descriptors are committed
+	 * before we tell the hardware about them.
+	 */
+	wmb();
+
+	/* If we don't have access to the new User Doorbell (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(q->bar2_addr == NULL)) {
+		u32 val = PIDX_V(n);
+		unsigned long flags;
+
+		/* For T4 we need to participate in the Doorbell Recovery
+		 * mechanism.
+		 */
+		spin_lock_irqsave(&q->db_lock, flags);
+		if (!q->db_disabled)
+			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+				     QID_V(q->cntxt_id) | val);
+		else
+			q->db_pidx_inc += n;
+		q->db_pidx = q->pidx;
+		spin_unlock_irqrestore(&q->db_lock, flags);
+	} else {
+		u32 val = PIDX_T5_V(n);
+
+		/* T4 and later chips share the same PIDX field offset within
+		 * the doorbell, but T5 and later shrank the field in order to
+		 * gain a bit for Doorbell Priority.  The field was absurdly
+		 * large in the first place (14 bits) so we just use the T5
+		 * and later limits and warn if a Queue ID is too large.
+		 */
+		WARN_ON(val & DBPRIO_F);
+
+		/* If we're only writing a single TX Descriptor and we can use
+		 * Inferred QID registers, we can use the Write Combining
+		 * Gather Buffer; otherwise we use the simple doorbell.
+		 */
+		if (n == 1 && q->bar2_qid == 0) {
+			int index = (q->pidx
+				     ? (q->pidx - 1)
+				     : (q->size - 1));
+			u64 *wr = (u64 *)&q->desc[index];
+
+			cxgb_pio_copy((u64 __iomem *)
+				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
+				      wr);
+		} else {
+			writel(val | QID_V(q->bar2_qid),
+			       q->bar2_addr + SGE_UDB_KDOORBELL);
+		}
+
+		/* This Write Memory Barrier will force the write to the User
+		 * Doorbell area to be flushed.  This is needed to prevent
+		 * writes on different CPUs for the same queue from hitting
+		 * the adapter out of order.  This is required when some Work
+		 * Requests take the Write Combine Gather Buffer path (user
+		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+		 * take the traditional path where we simply increment the
+		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+		 * hardware DMA read the actual Work Request.
+		 */
+		wmb();
+	}
+}
+
+/**
+ *	inline_tx_skb - inline a packet's data into Tx descriptors
+ *	@skb: the packet
+ *	@q: the Tx queue where the packet will be inlined
+ *	@pos: starting position in the Tx queue where to inline the packet
+ *
+ *	Inline a packet's contents directly into Tx descriptors, starting at
+ *	the given position within the Tx DMA ring.
+ *	Most of the complexity of this operation is dealing with wrap arounds
+ *	in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+			  void *pos)
+{
+	u64 *p;
+	int left = (void *)q->stat - pos;
+
+	if (likely(skb->len <= left)) {
+		if (likely(!skb->data_len))
+			skb_copy_from_linear_data(skb, pos, skb->len);
+		else
+			skb_copy_bits(skb, 0, pos, skb->len);
+		pos += skb->len;
+	} else {
+		skb_copy_bits(skb, 0, pos, left);
+		skb_copy_bits(skb, left, q->desc, skb->len - left);
+		pos = (void *)q->desc + (skb->len - left);
+	}
+
+	/* 0-pad to multiple of 16 */
+	p = PTR_ALIGN(pos, 8);
+	if ((uintptr_t)p & 8)
+		*p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
+{
+	int csum_type;
+	const struct iphdr *iph = ip_hdr(skb);
+
+	if (iph->version == 4) {
+		if (iph->protocol == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP;
+		else if (iph->protocol == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP;
+		else {
+nocsum:			/*
+			 * unknown protocol, disable HW csum
+			 * and hope a bad packet is detected
+			 */
+			return TXPKT_L4CSUM_DIS_F;
+		}
+	} else {
+		/*
+		 * this doesn't work with extension headers
+		 */
+		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+		if (ip6h->nexthdr == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP6;
+		else if (ip6h->nexthdr == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP6;
+		else
+			goto nocsum;
+	}
+
+	if (likely(csum_type >= TX_CSUM_TCPIP)) {
+		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		else
+			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+	} else {
+		int start = skb_transport_offset(skb);
+
+		return TXPKT_CSUM_TYPE_V(csum_type) |
+			TXPKT_CSUM_START_V(start) |
+			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
+	}
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+	netif_tx_stop_queue(q->txq);
+	q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+	q->in_use += n;
+	q->pidx += n;
+	if (q->pidx >= q->size)
+		q->pidx -= q->size;
+}
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+static inline int
+cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
+		  const struct port_info *pi, u64 *cntrl)
+{
+	const struct cxgb_fcoe *fcoe = &pi->fcoe;
+
+	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
+		return 0;
+
+	if (skb->protocol != htons(ETH_P_FCOE))
+		return 0;
+
+	skb_reset_mac_header(skb);
+	skb->mac_len = sizeof(struct ethhdr);
+
+	skb_set_network_header(skb, skb->mac_len);
+	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
+
+	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
+		return -ENOTSUPP;
+
+	/* FC CRC offload */
+	*cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+		     TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+		     TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+		     TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+		     TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
+	return 0;
+}
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
+/**
+ *	t4_eth_xmit - add a packet to an Ethernet Tx queue
+ *	@skb: the packet
+ *	@dev: the egress net device
+ *
+ *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
+ */
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	u32 wr_mid, ctrl0;
+	u64 cntrl, *end;
+	int qidx, credits;
+	unsigned int flits, ndesc;
+	struct adapter *adap;
+	struct sge_eth_txq *q;
+	const struct port_info *pi;
+	struct fw_eth_tx_pkt_wr *wr;
+	struct cpl_tx_pkt_core *cpl;
+	const struct skb_shared_info *ssi;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+	bool immediate = false;
+	int len, max_pkt_len;
+#ifdef CONFIG_CHELSIO_T4_FCOE
+	int err;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
+	/*
+	 * The chip min packet length is 10 octets but play safe and reject
+	 * anything shorter than an Ethernet header.
+	 */
+	if (unlikely(skb->len < ETH_HLEN)) {
+out_free:	dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* Discard the packet if the length is greater than mtu */
+	max_pkt_len = ETH_HLEN + dev->mtu;
+	if (skb_vlan_tag_present(skb))
+		max_pkt_len += VLAN_HLEN;
+	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+		goto out_free;
+
+	pi = netdev_priv(dev);
+	adap = pi->adapter;
+	qidx = skb_get_queue_mapping(skb);
+	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+	reclaim_completed_tx(adap, &q->q, true);
+	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+
+#ifdef CONFIG_CHELSIO_T4_FCOE
+	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+	if (unlikely(err == -ENOTSUPP))
+		goto out_free;
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+
+	flits = calc_tx_flits(skb);
+	ndesc = flits_to_desc(flits);
+	credits = txq_avail(&q->q) - ndesc;
+
+	if (unlikely(credits < 0)) {
+		eth_txq_stop(q);
+		dev_err(adap->pdev_dev,
+			"%s: Tx ring %u full while queue awake!\n",
+			dev->name, qidx);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (is_eth_imm(skb))
+		immediate = true;
+
+	if (!immediate &&
+	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+		q->mapping_err++;
+		goto out_free;
+	}
+
+	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+		eth_txq_stop(q);
+		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+	}
+
+	wr = (void *)&q->q.desc[q->q.pidx];
+	wr->equiq_to_len16 = htonl(wr_mid);
+	wr->r3 = cpu_to_be64(0);
+	end = (u64 *)wr + flits;
+
+	len = immediate ? skb->len : 0;
+	ssi = skb_shinfo(skb);
+	if (ssi->gso_size) {
+		struct cpl_tx_pkt_lso *lso = (void *)wr;
+		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+		int l3hdr_len = skb_network_header_len(skb);
+		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+		len += sizeof(*lso);
+		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+				       FW_WR_IMMDLEN_V(len));
+		lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+					LSO_IPV6_V(v6) |
+					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+		lso->c.ipid_ofst = htons(0);
+		lso->c.mss = htons(ssi->gso_size);
+		lso->c.seqno_offset = htonl(0);
+		if (is_t4(adap->params.chip))
+			lso->c.len = htonl(skb->len);
+		else
+			lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
+		cpl = (void *)(lso + 1);
+
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+			cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+		else
+			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+			 TXPKT_IPHDR_LEN_V(l3hdr_len);
+		q->tso++;
+		q->tx_cso += ssi->gso_segs;
+	} else {
+		len += sizeof(*cpl);
+		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+				       FW_WR_IMMDLEN_V(len));
+		cpl = (void *)(wr + 1);
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			cntrl = hwcsum(adap->params.chip, skb) |
+				TXPKT_IPCSUM_DIS_F;
+			q->tx_cso++;
+		}
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		q->vlan_ins++;
+		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+#ifdef CONFIG_CHELSIO_T4_FCOE
+		if (skb->protocol == htons(ETH_P_FCOE))
+			cntrl |= TXPKT_VLAN_V(
+				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+	}
+
+	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+		TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+	if (is_t4(adap->params.chip))
+		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+	else
+		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+	cpl->ctrl0 = htonl(ctrl0);
+	cpl->pack = htons(0);
+	cpl->len = htons(skb->len);
+	cpl->ctrl1 = cpu_to_be64(cntrl);
+
+	if (immediate) {
+		inline_tx_skb(skb, &q->q, cpl + 1);
+		dev_consume_skb_any(skb);
+	} else {
+		int last_desc;
+
+		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+			  addr);
+		skb_orphan(skb);
+
+		last_desc = q->q.pidx + ndesc - 1;
+		if (last_desc >= q->q.size)
+			last_desc -= q->q.size;
+		q->q.sdesc[last_desc].skb = skb;
+		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+	}
+
+	txq_advance(&q->q, ndesc);
+
+	ring_tx_db(adap, &q->q, ndesc);
+	return NETDEV_TX_OK;
+}
+
+/**
+ *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ *	@q: the SGE control Tx queue
+ *
+ *	This is a variant of reclaim_completed_tx() that is used for Tx queues
+ *	that send only immediate data (presently just the control queues) and
+ *	thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+	int hw_cidx = ntohs(q->stat->cidx);
+	int reclaim = hw_cidx - q->cidx;
+
+	if (reclaim < 0)
+		reclaim += q->size;
+
+	q->in_use -= reclaim;
+	q->cidx = hw_cidx;
+}
+
+/**
+ *	is_imm - check whether a packet can be sent as immediate data
+ *	@skb: the packet
+ *
+ *	Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct sk_buff *skb)
+{
+	return skb->len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ *	ctrlq_check_stop - check if a control queue is full and should stop
+ *	@q: the queue
+ *	@wr: most recent WR written to the queue
+ *
+ *	Check if a control queue has become full and should be stopped.
+ *	We clean up control queue descriptors very lazily, only when we are out.
+ *	If the queue is still full after reclaiming any completed descriptors
+ *	we suspend it and have the last WR wake it up.
+ */
+static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
+{
+	reclaim_completed_tx_imm(&q->q);
+	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
+		q->q.stops++;
+		q->full = 1;
+	}
+}
+
+/**
+ *	ctrl_xmit - send a packet through an SGE control Tx queue
+ *	@q: the control queue
+ *	@skb: the packet
+ *
+ *	Send a packet through an SGE control Tx queue.  Packets sent through
+ *	a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
+{
+	unsigned int ndesc;
+	struct fw_wr_hdr *wr;
+
+	if (unlikely(!is_imm(skb))) {
+		WARN_ON(1);
+		dev_kfree_skb(skb);
+		return NET_XMIT_DROP;
+	}
+
+	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
+	spin_lock(&q->sendq.lock);
+
+	if (unlikely(q->full)) {
+		skb->priority = ndesc;                  /* save for restart */
+		__skb_queue_tail(&q->sendq, skb);
+		spin_unlock(&q->sendq.lock);
+		return NET_XMIT_CN;
+	}
+
+	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+	inline_tx_skb(skb, &q->q, wr);
+
+	txq_advance(&q->q, ndesc);
+	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
+		ctrlq_check_stop(q, wr);
+
+	ring_tx_db(q->adap, &q->q, ndesc);
+	spin_unlock(&q->sendq.lock);
+
+	kfree_skb(skb);
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_ctrlq - restart a suspended control queue
+ *	@data: the control queue to restart
+ *
+ *	Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+	struct sk_buff *skb;
+	unsigned int written = 0;
+	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+
+	spin_lock(&q->sendq.lock);
+	reclaim_completed_tx_imm(&q->q);
+	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
+
+	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
+		struct fw_wr_hdr *wr;
+		unsigned int ndesc = skb->priority;     /* previously saved */
+
+		written += ndesc;
+		/* Write descriptors and free skbs outside the lock to limit
+		 * wait times.  q->full is still set so new skbs will be queued.
+		 */
+		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+		txq_advance(&q->q, ndesc);
+		spin_unlock(&q->sendq.lock);
+
+		inline_tx_skb(skb, &q->q, wr);
+		kfree_skb(skb);
+
+		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+			unsigned long old = q->q.stops;
+
+			ctrlq_check_stop(q, wr);
+			if (q->q.stops != old) {          /* suspended anew */
+				spin_lock(&q->sendq.lock);
+				goto ringdb;
+			}
+		}
+		if (written > 16) {
+			ring_tx_db(q->adap, &q->q, written);
+			written = 0;
+		}
+		spin_lock(&q->sendq.lock);
+	}
+	q->full = 0;
+ringdb: if (written)
+		ring_tx_db(q->adap, &q->q, written);
+	spin_unlock(&q->sendq.lock);
+}
+
+/**
+ *	t4_mgmt_tx - send a management message
+ *	@adap: the adapter
+ *	@skb: the packet containing the management message
+ *
+ *	Send a management message through control queue 0.
+ */
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
+	local_bh_enable();
+	return ret;
+}
+
+/**
+ *	is_ofld_imm - check whether a packet can be sent as immediate data
+ *	@skb: the packet
+ *
+ *	Returns true if a packet can be sent as an offload WR with immediate
+ *	data.  We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+	return skb->len <= MAX_IMM_TX_PKT_LEN;
+}
+
+/**
+ *	calc_tx_flits_ofld - calculate # of flits for an offload packet
+ *	@skb: the packet
+ *
+ *	Returns the number of flits needed for the given offload packet.
+ *	These packets are already fully constructed and no additional headers
+ *	will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt;
+
+	if (is_ofld_imm(skb))
+		return DIV_ROUND_UP(skb->len, 8);
+
+	flits = skb_transport_offset(skb) / 8U;   /* headers */
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb_tail_pointer(skb) != skb_transport_header(skb))
+		cnt++;
+	return flits + sgl_len(cnt);
+}
+
+/**
+ *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
+ *	@adap: the adapter
+ *	@q: the queue to stop
+ *
+ *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
+ *	inability to map packets.  A periodic timer attempts to restart
+ *	queues so marked.
+ */
+static void txq_stop_maperr(struct sge_ofld_txq *q)
+{
+	q->mapping_err++;
+	q->q.stops++;
+	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
+		q->adap->sge.txq_maperr);
+}
+
+/**
+ *	ofldtxq_stop - stop an offload Tx queue that has become full
+ *	@q: the queue to stop
+ *	@skb: the packet causing the queue to become full
+ *
+ *	Stops an offload Tx queue that has become full and modifies the packet
+ *	being written to request a wakeup.
+ */
+static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
+
+	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
+	q->q.stops++;
+	q->full = 1;
+}
+
+/**
+ *	service_ofldq - restart a suspended offload queue
+ *	@q: the offload queue
+ *
+ *	Services an offload Tx queue by moving packets from its packet queue
+ *	to the HW Tx ring.  The function starts and ends with the queue locked.
+ */
+static void service_ofldq(struct sge_ofld_txq *q)
+{
+	u64 *pos;
+	int credits;
+	struct sk_buff *skb;
+	unsigned int written = 0;
+	unsigned int flits, ndesc;
+
+	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
+		/*
+		 * We drop the lock but leave skb on sendq, thus retaining
+		 * exclusive access to the state of the queue.
+		 */
+		spin_unlock(&q->sendq.lock);
+
+		reclaim_completed_tx(q->adap, &q->q, false);
+
+		flits = skb->priority;                /* previously saved */
+		ndesc = flits_to_desc(flits);
+		credits = txq_avail(&q->q) - ndesc;
+		BUG_ON(credits < 0);
+		if (unlikely(credits < TXQ_STOP_THRES))
+			ofldtxq_stop(q, skb);
+
+		pos = (u64 *)&q->q.desc[q->q.pidx];
+		if (is_ofld_imm(skb))
+			inline_tx_skb(skb, &q->q, pos);
+		else if (map_skb(q->adap->pdev_dev, skb,
+				 (dma_addr_t *)skb->head)) {
+			txq_stop_maperr(q);
+			spin_lock(&q->sendq.lock);
+			break;
+		} else {
+			int last_desc, hdr_len = skb_transport_offset(skb);
+
+			memcpy(pos, skb->data, hdr_len);
+			write_sgl(skb, &q->q, (void *)pos + hdr_len,
+				  pos + flits, hdr_len,
+				  (dma_addr_t *)skb->head);
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+			skb->dev = q->adap->port[0];
+			skb->destructor = deferred_unmap_destructor;
+#endif
+			last_desc = q->q.pidx + ndesc - 1;
+			if (last_desc >= q->q.size)
+				last_desc -= q->q.size;
+			q->q.sdesc[last_desc].skb = skb;
+		}
+
+		txq_advance(&q->q, ndesc);
+		written += ndesc;
+		if (unlikely(written > 32)) {
+			ring_tx_db(q->adap, &q->q, written);
+			written = 0;
+		}
+
+		spin_lock(&q->sendq.lock);
+		__skb_unlink(skb, &q->sendq);
+		if (is_ofld_imm(skb))
+			kfree_skb(skb);
+	}
+	if (likely(written))
+		ring_tx_db(q->adap, &q->q, written);
+}
+
+/**
+ *	ofld_xmit - send a packet through an offload queue
+ *	@q: the Tx offload queue
+ *	@skb: the packet
+ *
+ *	Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
+	spin_lock(&q->sendq.lock);
+	__skb_queue_tail(&q->sendq, skb);
+	if (q->sendq.qlen == 1)
+		service_ofldq(q);
+	spin_unlock(&q->sendq.lock);
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_ofldq - restart a suspended offload queue
+ *	@data: the offload queue to restart
+ *
+ *	Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_ofldq(unsigned long data)
+{
+	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+
+	spin_lock(&q->sendq.lock);
+	q->full = 0;            /* the queue actually is completely empty now */
+	service_ofldq(q);
+	spin_unlock(&q->sendq.lock);
+}
+
+/**
+ *	skb_txq - return the Tx queue an offload packet should use
+ *	@skb: the packet
+ *
+ *	Returns the Tx queue an offload packet should use as indicated by bits
+ *	1-15 in the packet's queue_mapping.
+ */
+static inline unsigned int skb_txq(const struct sk_buff *skb)
+{
+	return skb->queue_mapping >> 1;
+}
+
+/**
+ *	is_ctrl_pkt - return whether an offload packet is a control packet
+ *	@skb: the packet
+ *
+ *	Returns whether an offload packet should use an OFLD or a CTRL
+ *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
+ */
+static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
+{
+	return skb->queue_mapping & 1;
+}
+
+static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+	unsigned int idx = skb_txq(skb);
+
+	if (unlikely(is_ctrl_pkt(skb))) {
+		/* Single ctrl queue is a requirement for LE workaround path */
+		if (adap->tids.nsftids)
+			idx = 0;
+		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+	}
+	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+}
+
+/**
+ *	t4_ofld_send - send an offload packet
+ *	@adap: the adapter
+ *	@skb: the packet
+ *
+ *	Sends an offload packet.  We use the packet queue_mapping to select the
+ *	appropriate Tx queue as follows: bit 0 indicates whether the packet
+ *	should be sent as regular or control, bits 1-15 select the queue.
+ */
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = ofld_send(adap, skb);
+	local_bh_enable();
+	return ret;
+}
+
+/**
+ *	cxgb4_ofld_send - send an offload packet
+ *	@dev: the net device
+ *	@skb: the packet
+ *
+ *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
+ *	intended for ULDs.
+ */
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
+{
+	return t4_ofld_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_ofld_send);
+
+static inline void copy_frags(struct sk_buff *skb,
+			      const struct pkt_gl *gl, unsigned int offset)
+{
+	int i;
+
+	/* usually there's just one frag */
+	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
+			     gl->frags[0].offset + offset,
+			     gl->frags[0].size - offset);
+	skb_shinfo(skb)->nr_frags = gl->nfrags;
+	for (i = 1; i < gl->nfrags; i++)
+		__skb_fill_page_desc(skb, i, gl->frags[i].page,
+				     gl->frags[i].offset,
+				     gl->frags[i].size);
+
+	/* get a reference to the last page, we don't own it */
+	get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+/**
+ *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
+ *	@gl: the gather list
+ *	@skb_len: size of sk_buff main body if it carries fragments
+ *	@pull_len: amount of data to move to the sk_buff's main body
+ *
+ *	Builds an sk_buff from the given packet gather list.  Returns the
+ *	sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+				   unsigned int skb_len, unsigned int pull_len)
+{
+	struct sk_buff *skb;
+
+	/*
+	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
+	 * size, which is expected since buffers are at least PAGE_SIZEd.
+	 * In this case packets up to RX_COPY_THRES have only one fragment.
+	 */
+	if (gl->tot_len <= RX_COPY_THRES) {
+		skb = dev_alloc_skb(gl->tot_len);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, gl->tot_len);
+		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+	} else {
+		skb = dev_alloc_skb(skb_len);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, pull_len);
+		skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+		copy_frags(skb, gl, pull_len);
+		skb->len = gl->tot_len;
+		skb->data_len = skb->len - pull_len;
+		skb->truesize += skb->data_len;
+	}
+out:	return skb;
+}
+EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
+
+/**
+ *	t4_pktgl_free - free a packet gather list
+ *	@gl: the gather list
+ *
+ *	Releases the pages of a packet gather list.  We do not own the last
+ *	page on the list and do not free it.
+ */
+static void t4_pktgl_free(const struct pkt_gl *gl)
+{
+	int n;
+	const struct page_frag *p;
+
+	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
+		put_page(p->page);
+}
+
+/*
+ * Process an MPS trace packet.  Give it an unused protocol number so it won't
+ * be delivered to anyone and send it to the stack for capture.
+ */
+static noinline int handle_trace_pkt(struct adapter *adap,
+				     const struct pkt_gl *gl)
+{
+	struct sk_buff *skb;
+
+	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+	if (unlikely(!skb)) {
+		t4_pktgl_free(gl);
+		return 0;
+	}
+
+	if (is_t4(adap->params.chip))
+		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
+	else
+		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
+
+	skb_reset_mac_header(skb);
+	skb->protocol = htons(0xffff);
+	skb->dev = adap->port[0];
+	netif_receive_skb(skb);
+	return 0;
+}
+
+/**
+ * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
+ * @adap: the adapter
+ * @hwtstamps: time stamp structure to update
+ * @sgetstamp: 60bit iqe timestamp
+ *
+ * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
+ * which is in Core Clock ticks into ktime_t and assign it
+ **/
+static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
+				     struct skb_shared_hwtstamps *hwtstamps,
+				     u64 sgetstamp)
+{
+	u64 ns;
+	u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
+
+	ns = div_u64(tmp, adap->params.vpd.cclk);
+
+	memset(hwtstamps, 0, sizeof(*hwtstamps));
+	hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+		   const struct cpl_rx_pkt *pkt)
+{
+	struct adapter *adapter = rxq->rspq.adap;
+	struct sge *s = &adapter->sge;
+	struct port_info *pi;
+	int ret;
+	struct sk_buff *skb;
+
+	skb = napi_get_frags(&rxq->rspq.napi);
+	if (unlikely(!skb)) {
+		t4_pktgl_free(gl);
+		rxq->stats.rx_drops++;
+		return;
+	}
+
+	copy_frags(skb, gl, s->pktshift);
+	skb->len = gl->tot_len - s->pktshift;
+	skb->data_len = skb->len;
+	skb->truesize += skb->data_len;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_record_rx_queue(skb, rxq->rspq.idx);
+	skb_mark_napi_id(skb, &rxq->rspq.napi);
+	pi = netdev_priv(skb->dev);
+	if (pi->rxtstamp)
+		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+					 gl->sgetstamp);
+	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+			     PKT_HASH_TYPE_L3);
+
+	if (unlikely(pkt->vlan_ex)) {
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
+		rxq->stats.vlan_ex++;
+	}
+	ret = napi_gro_frags(&rxq->rspq.napi);
+	if (ret == GRO_HELD)
+		rxq->stats.lro_pkts++;
+	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+		rxq->stats.lro_merged++;
+	rxq->stats.pkts++;
+	rxq->stats.rx_cso++;
+}
+
+/**
+ *	t4_ethrx_handler - process an ingress ethernet packet
+ *	@q: the response queue that received the packet
+ *	@rsp: the response queue descriptor holding the RX_PKT message
+ *	@si: the gather list of packet fragments
+ *
+ *	Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+		     const struct pkt_gl *si)
+{
+	bool csum_ok;
+	struct sk_buff *skb;
+	const struct cpl_rx_pkt *pkt;
+	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+	struct sge *s = &q->adap->sge;
+	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
+			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
+	struct port_info *pi;
+
+	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
+		return handle_trace_pkt(q->adap, si);
+
+	pkt = (const struct cpl_rx_pkt *)rsp;
+	csum_ok = pkt->csum_calc && !pkt->err_vec &&
+		  (q->netdev->features & NETIF_F_RXCSUM);
+	if ((pkt->l2info & htonl(RXF_TCP_F)) &&
+	    !(cxgb_poll_busy_polling(q)) &&
+	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
+		do_gro(rxq, si, pkt);
+		return 0;
+	}
+
+	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
+	if (unlikely(!skb)) {
+		t4_pktgl_free(si);
+		rxq->stats.rx_drops++;
+		return 0;
+	}
+
+	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
+	skb->protocol = eth_type_trans(skb, q->netdev);
+	skb_record_rx_queue(skb, q->idx);
+	if (skb->dev->features & NETIF_F_RXHASH)
+		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+			     PKT_HASH_TYPE_L3);
+
+	rxq->stats.pkts++;
+
+	pi = netdev_priv(skb->dev);
+	if (pi->rxtstamp)
+		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
+					 si->sgetstamp);
+	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
+		if (!pkt->ip_frag) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			rxq->stats.rx_cso++;
+		} else if (pkt->l2info & htonl(RXF_IP_F)) {
+			__sum16 c = (__force __sum16)pkt->csum;
+			skb->csum = csum_unfold(c);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+			rxq->stats.rx_cso++;
+		}
+	} else {
+		skb_checksum_none_assert(skb);
+#ifdef CONFIG_CHELSIO_T4_FCOE
+#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
+			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
+
+		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
+			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
+			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
+				if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+			}
+		}
+
+#undef CPL_RX_PKT_FLAGS
+#endif /* CONFIG_CHELSIO_T4_FCOE */
+	}
+
+	if (unlikely(pkt->vlan_ex)) {
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
+		rxq->stats.vlan_ex++;
+	}
+	skb_mark_napi_id(skb, &q->napi);
+	netif_receive_skb(skb);
+	return 0;
+}
+
+/**
+ *	restore_rx_bufs - put back a packet's Rx buffers
+ *	@si: the packet gather list
+ *	@q: the SGE free list
+ *	@frags: number of FL buffers to restore
+ *
+ *	Puts back on an FL the Rx buffers associated with @si.  The buffers
+ *	have already been unmapped and are left unmapped, we mark them so to
+ *	prevent further unmapping attempts.
+ *
+ *	This function undoes a series of @unmap_rx_buf calls when we find out
+ *	that the current packet can't be processed right away afterall and we
+ *	need to come back to it later.  This is a very rare event and there's
+ *	no effort to make this particularly efficient.
+ */
+static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
+			    int frags)
+{
+	struct rx_sw_desc *d;
+
+	while (frags--) {
+		if (q->cidx == 0)
+			q->cidx = q->size - 1;
+		else
+			q->cidx--;
+		d = &q->sdesc[q->cidx];
+		d->page = si->frags[frags].page;
+		d->dma_addr |= RX_UNMAPPED_BUF;
+		q->avail++;
+	}
+}
+
+/**
+ *	is_new_response - check if a response is newly written
+ *	@r: the response descriptor
+ *	@q: the response queue
+ *
+ *	Returns true if a response descriptor contains a yet unprocessed
+ *	response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *r,
+				   const struct sge_rspq *q)
+{
+	return (r->type_gen >> RSPD_GEN_S) == q->gen;
+}
+
+/**
+ *	rspq_next - advance to the next entry in a response queue
+ *	@q: the queue
+ *
+ *	Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
+	if (unlikely(++q->cidx == q->size)) {
+		q->cidx = 0;
+		q->gen ^= 1;
+		q->cur_desc = q->desc;
+	}
+}
+
+/**
+ *	process_responses - process responses from an SGE response queue
+ *	@q: the ingress queue to process
+ *	@budget: how many responses can be processed in this round
+ *
+ *	Process responses from an SGE response queue up to the supplied budget.
+ *	Responses include received packets as well as control messages from FW
+ *	or HW.
+ *
+ *	Additionally choose the interrupt holdoff time for the next interrupt
+ *	on this queue.  If the system is under memory shortage use a fairly
+ *	long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget)
+{
+	int ret, rsp_type;
+	int budget_left = budget;
+	const struct rsp_ctrl *rc;
+	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+	struct adapter *adapter = q->adap;
+	struct sge *s = &adapter->sge;
+
+	while (likely(budget_left)) {
+		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, q))
+			break;
+
+		dma_rmb();
+		rsp_type = RSPD_TYPE_G(rc->type_gen);
+		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
+			struct page_frag *fp;
+			struct pkt_gl si;
+			const struct rx_sw_desc *rsd;
+			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
+
+			if (len & RSPD_NEWBUF_F) {
+				if (likely(q->offset > 0)) {
+					free_rx_bufs(q->adap, &rxq->fl, 1);
+					q->offset = 0;
+				}
+				len = RSPD_LEN_G(len);
+			}
+			si.tot_len = len;
+
+			/* gather packet fragments */
+			for (frags = 0, fp = si.frags; ; frags++, fp++) {
+				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+				bufsz = get_buf_size(adapter, rsd);
+				fp->page = rsd->page;
+				fp->offset = q->offset;
+				fp->size = min(bufsz, len);
+				len -= fp->size;
+				if (!len)
+					break;
+				unmap_rx_buf(q->adap, &rxq->fl);
+			}
+
+			si.sgetstamp = SGE_TIMESTAMP_G(
+					be64_to_cpu(rc->last_flit));
+			/*
+			 * Last buffer remains mapped so explicitly make it
+			 * coherent for CPU access.
+			 */
+			dma_sync_single_for_cpu(q->adap->pdev_dev,
+						get_buf_addr(rsd),
+						fp->size, DMA_FROM_DEVICE);
+
+			si.va = page_address(si.frags[0].page) +
+				si.frags[0].offset;
+			prefetch(si.va);
+
+			si.nfrags = frags + 1;
+			ret = q->handler(q, q->cur_desc, &si);
+			if (likely(ret == 0))
+				q->offset += ALIGN(fp->size, s->fl_align);
+			else
+				restore_rx_bufs(&si, &rxq->fl, frags);
+		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
+			ret = q->handler(q, q->cur_desc, NULL);
+		} else {
+			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+		}
+
+		if (unlikely(ret)) {
+			/* couldn't process descriptor, back off for recovery */
+			q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
+			break;
+		}
+
+		rspq_next(q);
+		budget_left--;
+	}
+
+	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+		__refill_fl(q->adap, &rxq->fl);
+	return budget - budget_left;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+int cxgb_busy_poll(struct napi_struct *napi)
+{
+	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+	unsigned int params, work_done;
+	u32 val;
+
+	if (!cxgb_poll_lock_poll(q))
+		return LL_FLUSH_BUSY;
+
+	work_done = process_responses(q, 4);
+	params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
+	q->next_intr_params = params;
+	val = CIDXINC_V(work_done) | SEINTARM_V(params);
+
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!q->bar2_addr))
+		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+			     val | INGRESSQID_V((u32)q->cntxt_id));
+	else {
+		writel(val | INGRESSQID_V(q->bar2_qid),
+		       q->bar2_addr + SGE_UDB_GTS);
+		wmb();
+	}
+
+	cxgb_poll_unlock_poll(q);
+	return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/**
+ *	napi_rx_handler - the NAPI handler for Rx processing
+ *	@napi: the napi instance
+ *	@budget: how many packets we can process in this round
+ *
+ *	Handler for new data events when using NAPI.  This does not need any
+ *	locking or protection from interrupts as data interrupts are off at
+ *	this point and other adapter interrupts do not interfere (the latter
+ *	in not a concern at all with MSI-X as non-data interrupts then have
+ *	a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+	unsigned int params;
+	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+	int work_done;
+	u32 val;
+
+	if (!cxgb_poll_lock_napi(q))
+		return budget;
+
+	work_done = process_responses(q, budget);
+	if (likely(work_done < budget)) {
+		int timer_index;
+
+		napi_complete(napi);
+		timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
+
+		if (q->adaptive_rx) {
+			if (work_done > max(timer_pkt_quota[timer_index],
+					    MIN_NAPI_WORK))
+				timer_index = (timer_index + 1);
+			else
+				timer_index = timer_index - 1;
+
+			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
+			q->next_intr_params =
+					QINTR_TIMER_IDX_V(timer_index) |
+					QINTR_CNT_EN_V(0);
+			params = q->next_intr_params;
+		} else {
+			params = q->next_intr_params;
+			q->next_intr_params = q->intr_params;
+		}
+	} else
+		params = QINTR_TIMER_IDX_V(7);
+
+	val = CIDXINC_V(work_done) | SEINTARM_V(params);
+
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(q->bar2_addr == NULL)) {
+		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+			     val | INGRESSQID_V((u32)q->cntxt_id));
+	} else {
+		writel(val | INGRESSQID_V(q->bar2_qid),
+		       q->bar2_addr + SGE_UDB_GTS);
+		wmb();
+	}
+	cxgb_poll_unlock_napi(q);
+	return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue.
+ */
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
+{
+	struct sge_rspq *q = cookie;
+
+	napi_schedule(&q->napi);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adap)
+{
+	unsigned int credits;
+	const struct rsp_ctrl *rc;
+	struct sge_rspq *q = &adap->sge.intrq;
+	u32 val;
+
+	spin_lock(&adap->sge.intrq_lock);
+	for (credits = 0; ; credits++) {
+		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, q))
+			break;
+
+		dma_rmb();
+		if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
+			unsigned int qid = ntohl(rc->pldbuflen_qid);
+
+			qid -= adap->sge.ingr_start;
+			napi_schedule(&adap->sge.ingr_map[qid]->napi);
+		}
+
+		rspq_next(q);
+	}
+
+	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
+
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(q->bar2_addr == NULL)) {
+		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+			     val | INGRESSQID_V(q->cntxt_id));
+	} else {
+		writel(val | INGRESSQID_V(q->bar2_qid),
+		       q->bar2_addr + SGE_UDB_GTS);
+		wmb();
+	}
+	spin_unlock(&adap->sge.intrq_lock);
+	return credits;
+}
+
+/*
+ * The MSI interrupt handler, which handles data events from SGE response queues
+ * as well as error and other async events as they all use the same MSI vector.
+ */
+static irqreturn_t t4_intr_msi(int irq, void *cookie)
+{
+	struct adapter *adap = cookie;
+
+	if (adap->flags & MASTER_PF)
+		t4_slow_intr_handler(adap);
+	process_intrq(adap);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt line.
+ */
+static irqreturn_t t4_intr_intx(int irq, void *cookie)
+{
+	struct adapter *adap = cookie;
+
+	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
+	if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
+	    process_intrq(adap))
+		return IRQ_HANDLED;
+	return IRQ_NONE;             /* probably shared interrupt */
+}
+
+/**
+ *	t4_intr_handler - select the top-level interrupt handler
+ *	@adap: the adapter
+ *
+ *	Selects the top-level interrupt handler based on the type of interrupts
+ *	(MSI-X, MSI, or INTx).
+ */
+irq_handler_t t4_intr_handler(struct adapter *adap)
+{
+	if (adap->flags & USING_MSIX)
+		return t4_sge_intr_msix;
+	if (adap->flags & USING_MSI)
+		return t4_intr_msi;
+	return t4_intr_intx;
+}
+
+static void sge_rx_timer_cb(unsigned long data)
+{
+	unsigned long m;
+	unsigned int i;
+	struct adapter *adap = (struct adapter *)data;
+	struct sge *s = &adap->sge;
+
+	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
+		for (m = s->starving_fl[i]; m; m &= m - 1) {
+			struct sge_eth_rxq *rxq;
+			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+			struct sge_fl *fl = s->egr_map[id];
+
+			clear_bit(id, s->starving_fl);
+			smp_mb__after_atomic();
+
+			if (fl_starving(adap, fl)) {
+				rxq = container_of(fl, struct sge_eth_rxq, fl);
+				if (napi_reschedule(&rxq->rspq.napi))
+					fl->starving++;
+				else
+					set_bit(id, s->starving_fl);
+			}
+		}
+	/* The remainder of the SGE RX Timer Callback routine is dedicated to
+	 * global Master PF activities like checking for chip ingress stalls,
+	 * etc.
+	 */
+	if (!(adap->flags & MASTER_PF))
+		goto done;
+
+	t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
+
+done:
+	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+static void sge_tx_timer_cb(unsigned long data)
+{
+	unsigned long m;
+	unsigned int i, budget;
+	struct adapter *adap = (struct adapter *)data;
+	struct sge *s = &adap->sge;
+
+	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
+		for (m = s->txq_maperr[i]; m; m &= m - 1) {
+			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
+			struct sge_ofld_txq *txq = s->egr_map[id];
+
+			clear_bit(id, s->txq_maperr);
+			tasklet_schedule(&txq->qresume_tsk);
+		}
+
+	budget = MAX_TIMER_TX_RECLAIM;
+	i = s->ethtxq_rover;
+	do {
+		struct sge_eth_txq *q = &s->ethtxq[i];
+
+		if (q->q.in_use &&
+		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
+		    __netif_tx_trylock(q->txq)) {
+			int avail = reclaimable(&q->q);
+
+			if (avail) {
+				if (avail > budget)
+					avail = budget;
+
+				free_tx_desc(adap, &q->q, avail, true);
+				q->q.in_use -= avail;
+				budget -= avail;
+			}
+			__netif_tx_unlock(q->txq);
+		}
+
+		if (++i >= s->ethqsets)
+			i = 0;
+	} while (budget && i != s->ethtxq_rover);
+	s->ethtxq_rover = i;
+	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+/**
+ *	bar2_address - return the BAR2 address for an SGE Queue's Registers
+ *	@adapter: the adapter
+ *	@qid: the SGE Queue ID
+ *	@qtype: the SGE Queue Type (Egress or Ingress)
+ *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ *	Returns the BAR2 address for the SGE Queue Registers associated with
+ *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
+ *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter,
+				  unsigned int qid,
+				  enum t4_bar2_qtype qtype,
+				  unsigned int *pbar2_qid)
+{
+	u64 bar2_qoffset;
+	int ret;
+
+	ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
+				&bar2_qoffset, pbar2_qid);
+	if (ret)
+		return NULL;
+
+	return adapter->bar2 + bar2_qoffset;
+}
+
+/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+		     struct net_device *dev, int intr_idx,
+		     struct sge_fl *fl, rspq_handler_t hnd, int cong)
+{
+	int ret, flsz = 0;
+	struct fw_iq_cmd c;
+	struct sge *s = &adap->sge;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Size needs to be multiple of 16, including status entry. */
+	iq->size = roundup(iq->size, 16);
+
+	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
+			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
+	if (!iq->desc)
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+			    FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
+	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
+				 FW_LEN16(c));
+	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
+		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
+		FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+		FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
+		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
+							-intr_idx - 1));
+	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
+		FW_IQ_CMD_IQGTSMODE_F |
+		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
+		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
+	c.iqsize = htons(iq->size);
+	c.iqaddr = cpu_to_be64(iq->phys_addr);
+	if (cong >= 0)
+		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+
+	if (fl) {
+		enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+		/* Allocate the ring for the hardware free list (with space
+		 * for its status page) along with the associated software
+		 * descriptor ring.  The free list size needs to be a multiple
+		 * of the Egress Queue Unit and at least 2 Egress Units larger
+		 * than the SGE's Egress Congrestion Threshold
+		 * (fl_starve_thres - 1).
+		 */
+		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+			fl->size = s->fl_starve_thres - 1 + 2 * 8;
+		fl->size = roundup(fl->size, 8);
+		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
+				      sizeof(struct rx_sw_desc), &fl->addr,
+				      &fl->sdesc, s->stat_len, NUMA_NO_NODE);
+		if (!fl->desc)
+			goto fl_nomem;
+
+		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
+		c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
+					     FW_IQ_CMD_FL0FETCHRO_F |
+					     FW_IQ_CMD_FL0DATARO_F |
+					     FW_IQ_CMD_FL0PADEN_F);
+		if (cong >= 0)
+			c.iqns_to_fl0congen |=
+				htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
+				      FW_IQ_CMD_FL0CONGCIF_F |
+				      FW_IQ_CMD_FL0CONGEN_F);
+		c.fl0dcaen_to_fl0cidxfthresh =
+			htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+			      FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+						   FETCHBURSTMAX_512B_X :
+						   FETCHBURSTMAX_256B_X));
+		c.fl0size = htons(flsz);
+		c.fl0addr = cpu_to_be64(fl->addr);
+	}
+
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+	if (ret)
+		goto err;
+
+	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+	napi_hash_add(&iq->napi);
+	iq->cur_desc = iq->desc;
+	iq->cidx = 0;
+	iq->gen = 1;
+	iq->next_intr_params = iq->intr_params;
+	iq->cntxt_id = ntohs(c.iqid);
+	iq->abs_id = ntohs(c.physiqid);
+	iq->bar2_addr = bar2_address(adap,
+				     iq->cntxt_id,
+				     T4_BAR2_QTYPE_INGRESS,
+				     &iq->bar2_qid);
+	iq->size--;                           /* subtract status entry */
+	iq->netdev = dev;
+	iq->handler = hnd;
+
+	/* set offset to -1 to distinguish ingress queues without FL */
+	iq->offset = fl ? 0 : -1;
+
+	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
+
+	if (fl) {
+		fl->cntxt_id = ntohs(c.fl0id);
+		fl->avail = fl->pend_cred = 0;
+		fl->pidx = fl->cidx = 0;
+		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
+		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
+
+		/* Note, we must initialize the BAR2 Free List User Doorbell
+		 * information before refilling the Free List!
+		 */
+		fl->bar2_addr = bar2_address(adap,
+					     fl->cntxt_id,
+					     T4_BAR2_QTYPE_EGRESS,
+					     &fl->bar2_qid);
+		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
+	}
+
+	/* For T5 and later we attempt to set up the Congestion Manager values
+	 * of the new RX Ethernet Queue.  This should really be handled by
+	 * firmware because it's more complex than any host driver wants to
+	 * get involved with and it's different per chip and this is almost
+	 * certainly wrong.  Firmware would be wrong as well, but it would be
+	 * a lot easier to fix in one place ...  For now we do something very
+	 * simple (and hopefully less wrong).
+	 */
+	if (!is_t4(adap->params.chip) && cong >= 0) {
+		u32 param, val;
+		int i;
+
+		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+			 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
+		if (cong == 0) {
+			val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
+		} else {
+			val =
+			    CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
+			for (i = 0; i < 4; i++) {
+				if (cong & (1 << i))
+					val |=
+					     CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+			}
+		}
+		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+				    &param, &val);
+		if (ret)
+			dev_warn(adap->pdev_dev, "Failed to set Congestion"
+				 " Manager Context for Ingress Queue %d: %d\n",
+				 iq->cntxt_id, -ret);
+	}
+
+	return 0;
+
+fl_nomem:
+	ret = -ENOMEM;
+err:
+	if (iq->desc) {
+		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
+				  iq->desc, iq->phys_addr);
+		iq->desc = NULL;
+	}
+	if (fl && fl->desc) {
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
+				  fl->desc, fl->addr);
+		fl->desc = NULL;
+	}
+	return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+{
+	q->cntxt_id = id;
+	q->bar2_addr = bar2_address(adap,
+				    q->cntxt_id,
+				    T4_BAR2_QTYPE_EGRESS,
+				    &q->bar2_qid);
+	q->in_use = 0;
+	q->cidx = q->pidx = 0;
+	q->stops = q->restarts = 0;
+	q->stat = (void *)&q->desc[q->size];
+	spin_lock_init(&q->db_lock);
+	adap->sge.egr_map[id - adap->sge.egr_start] = q;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+			 struct net_device *dev, struct netdev_queue *netdevq,
+			 unsigned int iqid)
+{
+	int ret, nentries;
+	struct fw_eq_eth_cmd c;
+	struct sge *s = &adap->sge;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Add status entries */
+	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
+			netdev_queue_numa_node_read(netdevq));
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
+			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+			    FW_EQ_ETH_CMD_PFN_V(adap->pf) |
+			    FW_EQ_ETH_CMD_VFN_V(0));
+	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
+				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
+	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
+	c.fetchszm_to_iqid =
+		htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+		      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+		      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+	c.dcaen_to_eqsize =
+		htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+		      FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+		      FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+		      FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+	if (ret) {
+		kfree(txq->q.sdesc);
+		txq->q.sdesc = NULL;
+		dma_free_coherent(adap->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
+	txq->txq = netdevq;
+	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+	txq->mapping_err = 0;
+	return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+			  struct net_device *dev, unsigned int iqid,
+			  unsigned int cmplqid)
+{
+	int ret, nentries;
+	struct fw_eq_ctrl_cmd c;
+	struct sge *s = &adap->sge;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Add status entries */
+	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
+				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
+				 NULL, 0, dev_to_node(adap->pdev_dev));
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
+			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+			    FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
+			    FW_EQ_CTRL_CMD_VFN_V(0));
+	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
+				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
+	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
+	c.physeqid_pkd = htonl(0);
+	c.fetchszm_to_iqid =
+		htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+		      FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+		      FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+	c.dcaen_to_eqsize =
+		htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+		      FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+		      FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+		      FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+	if (ret) {
+		dma_free_coherent(adap->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
+	txq->adap = adap;
+	skb_queue_head_init(&txq->sendq);
+	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+	txq->full = 0;
+	return 0;
+}
+
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+			  struct net_device *dev, unsigned int iqid)
+{
+	int ret, nentries;
+	struct fw_eq_ofld_cmd c;
+	struct sge *s = &adap->sge;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Add status entries */
+	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
+			NUMA_NO_NODE);
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+			    FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
+			    FW_EQ_OFLD_CMD_VFN_V(0));
+	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
+				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
+	c.fetchszm_to_iqid =
+		htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+		      FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+		      FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+	c.dcaen_to_eqsize =
+		htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+		      FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+		      FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+		      FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+	if (ret) {
+		kfree(txq->q.sdesc);
+		txq->q.sdesc = NULL;
+		dma_free_coherent(adap->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
+	txq->adap = adap;
+	skb_queue_head_init(&txq->sendq);
+	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+	txq->full = 0;
+	txq->mapping_err = 0;
+	return 0;
+}
+
+static void free_txq(struct adapter *adap, struct sge_txq *q)
+{
+	struct sge *s = &adap->sge;
+
+	dma_free_coherent(adap->pdev_dev,
+			  q->size * sizeof(struct tx_desc) + s->stat_len,
+			  q->desc, q->phys_addr);
+	q->cntxt_id = 0;
+	q->sdesc = NULL;
+	q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+			 struct sge_fl *fl)
+{
+	struct sge *s = &adap->sge;
+	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
+	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+		   rq->cntxt_id, fl_id, 0xffff);
+	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
+			  rq->desc, rq->phys_addr);
+	napi_hash_del(&rq->napi);
+	netif_napi_del(&rq->napi);
+	rq->netdev = NULL;
+	rq->cntxt_id = rq->abs_id = 0;
+	rq->desc = NULL;
+
+	if (fl) {
+		free_rx_bufs(adap, fl, fl->avail);
+		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
+				  fl->desc, fl->addr);
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		fl->cntxt_id = 0;
+		fl->desc = NULL;
+	}
+}
+
+/**
+ *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
+ *      @adap: the adapter
+ *      @n: number of queues
+ *      @q: pointer to first queue
+ *
+ *      Release the resources of a consecutive block of offload Rx queues.
+ */
+void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
+{
+	for ( ; n; n--, q++)
+		if (q->rspq.desc)
+			free_rspq_fl(adap, &q->rspq,
+				     q->fl.size ? &q->fl : NULL);
+}
+
+/**
+ *	t4_free_sge_resources - free SGE resources
+ *	@adap: the adapter
+ *
+ *	Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+	int i;
+	struct sge_eth_rxq *eq = adap->sge.ethrxq;
+	struct sge_eth_txq *etq = adap->sge.ethtxq;
+
+	/* clean up Ethernet Tx/Rx queues */
+	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+		if (eq->rspq.desc)
+			free_rspq_fl(adap, &eq->rspq,
+				     eq->fl.size ? &eq->fl : NULL);
+		if (etq->q.desc) {
+			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
+				       etq->q.cntxt_id);
+			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+			kfree(etq->q.sdesc);
+			free_txq(adap, &etq->q);
+		}
+	}
+
+	/* clean up RDMA and iSCSI Rx queues */
+	t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
+	t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
+	t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
+
+	/* clean up offload Tx queues */
+	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
+		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
+
+		if (q->q.desc) {
+			tasklet_kill(&q->qresume_tsk);
+			t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+					q->q.cntxt_id);
+			free_tx_desc(adap, &q->q, q->q.in_use, false);
+			kfree(q->q.sdesc);
+			__skb_queue_purge(&q->sendq);
+			free_txq(adap, &q->q);
+		}
+	}
+
+	/* clean up control Tx queues */
+	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+		if (cq->q.desc) {
+			tasklet_kill(&cq->qresume_tsk);
+			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+					cq->q.cntxt_id);
+			__skb_queue_purge(&cq->sendq);
+			free_txq(adap, &cq->q);
+		}
+	}
+
+	if (adap->sge.fw_evtq.desc)
+		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+
+	if (adap->sge.intrq.desc)
+		free_rspq_fl(adap, &adap->sge.intrq, NULL);
+
+	/* clear the reverse egress queue map */
+	memset(adap->sge.egr_map, 0,
+	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
+}
+
+void t4_sge_start(struct adapter *adap)
+{
+	adap->sge.ethtxq_rover = 0;
+	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ *	t4_sge_stop - disable SGE operation
+ *	@adap: the adapter
+ *
+ *	Stop tasklets and timers associated with the DMA engine.  Note that
+ *	this is effective only if measures have been taken to disable any HW
+ *	events that may restart them.
+ */
+void t4_sge_stop(struct adapter *adap)
+{
+	int i;
+	struct sge *s = &adap->sge;
+
+	if (in_interrupt())  /* actions below require waiting */
+		return;
+
+	if (s->rx_timer.function)
+		del_timer_sync(&s->rx_timer);
+	if (s->tx_timer.function)
+		del_timer_sync(&s->tx_timer);
+
+	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
+		struct sge_ofld_txq *q = &s->ofldtxq[i];
+
+		if (q->q.desc)
+			tasklet_kill(&q->qresume_tsk);
+	}
+	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
+		struct sge_ctrl_txq *cq = &s->ctrlq[i];
+
+		if (cq->q.desc)
+			tasklet_kill(&cq->qresume_tsk);
+	}
+}
+
+/**
+ *	t4_sge_init_soft - grab core SGE values needed by SGE code
+ *	@adap: the adapter
+ *
+ *	We need to grab the SGE operating parameters that we need to have
+ *	in order to do our job and make sure we can live with them.
+ */
+
+static int t4_sge_init_soft(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+	u32 ingress_rx_threshold;
+
+	/*
+	 * Verify that CPL messages are going to the Ingress Queue for
+	 * process_responses() and that only packet data is going to the
+	 * Free Lists.
+	 */
+	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
+	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
+		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Validate the Host Buffer Register Array indices that we want to
+	 * use ...
+	 *
+	 * XXX Note that we should really read through the Host Buffer Size
+	 * XXX register array and find the indices of the Buffer Sizes which
+	 * XXX meet our needs!
+	 */
+	#define READ_FL_BUF(x) \
+		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
+
+	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+	/* We only bother using the Large Page logic if the Large Page Buffer
+	 * is larger than our Page Size Buffer.
+	 */
+	if (fl_large_pg <= fl_small_pg)
+		fl_large_pg = 0;
+
+	#undef READ_FL_BUF
+
+	/* The Page Size Buffer must be exactly equal to our Page Size and the
+	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
+	 */
+	if (fl_small_pg != PAGE_SIZE ||
+	    (fl_large_pg & (fl_large_pg-1)) != 0) {
+		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
+			fl_small_pg, fl_large_pg);
+		return -EINVAL;
+	}
+	if (fl_large_pg)
+		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
+	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
+			fl_small_mtu, fl_large_mtu);
+		return -EINVAL;
+	}
+
+	/*
+	 * Retrieve our RX interrupt holdoff timer values and counter
+	 * threshold values from the SGE parameters.
+	 */
+	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
+	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
+	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
+	s->timer_val[0] = core_ticks_to_us(adap,
+		TIMERVALUE0_G(timer_value_0_and_1));
+	s->timer_val[1] = core_ticks_to_us(adap,
+		TIMERVALUE1_G(timer_value_0_and_1));
+	s->timer_val[2] = core_ticks_to_us(adap,
+		TIMERVALUE2_G(timer_value_2_and_3));
+	s->timer_val[3] = core_ticks_to_us(adap,
+		TIMERVALUE3_G(timer_value_2_and_3));
+	s->timer_val[4] = core_ticks_to_us(adap,
+		TIMERVALUE4_G(timer_value_4_and_5));
+	s->timer_val[5] = core_ticks_to_us(adap,
+		TIMERVALUE5_G(timer_value_4_and_5));
+
+	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
+	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
+
+	return 0;
+}
+
+/**
+ *     t4_sge_init - initialize SGE
+ *     @adap: the adapter
+ *
+ *     Perform low-level SGE code initialization needed every time after a
+ *     chip reset.
+ */
+int t4_sge_init(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	u32 sge_control, sge_control2, sge_conm_ctrl;
+	unsigned int ingpadboundary, ingpackboundary;
+	int ret, egress_threshold;
+
+	/*
+	 * Ingress Padding Boundary and Egress Status Page Size are set up by
+	 * t4_fixup_host_params().
+	 */
+	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+	s->pktshift = PKTSHIFT_G(sge_control);
+	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
+
+	/* T4 uses a single control field to specify both the PCIe Padding and
+	 * Packing Boundary.  T5 introduced the ability to specify these
+	 * separately.  The actual Ingress Packet Data alignment boundary
+	 * within Packed Buffer Mode is the maximum of these two
+	 * specifications.  (Note that it makes no real practical sense to
+	 * have the Pading Boudary be larger than the Packing Boundary but you
+	 * could set the chip up that way and, in fact, legacy T4 code would
+	 * end doing this because it would initialize the Padding Boundary and
+	 * leave the Packing Boundary initialized to 0 (16 bytes).)
+	 */
+	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
+			       INGPADBOUNDARY_SHIFT_X);
+	if (is_t4(adap->params.chip)) {
+		s->fl_align = ingpadboundary;
+	} else {
+		/* T5 has a different interpretation of one of the PCIe Packing
+		 * Boundary values.
+		 */
+		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+			ingpackboundary = 16;
+		else
+			ingpackboundary = 1 << (ingpackboundary +
+						INGPACKBOUNDARY_SHIFT_X);
+
+		s->fl_align = max(ingpadboundary, ingpackboundary);
+	}
+
+	ret = t4_sge_init_soft(adap);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * A FL with <= fl_starve_thres buffers is starving and a periodic
+	 * timer will attempt to refill it.  This needs to be larger than the
+	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+	 * stuck waiting for new packets while the SGE is waiting for us to
+	 * give it more Free List entries.  (Note that the SGE's Egress
+	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+	 * there was only a single field to control this.  For T5 there's the
+	 * original field which now only applies to Unpacked Mode Free List
+	 * buffers and a new field which only applies to Packed Mode Free List
+	 * buffers.
+	 */
+	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
+	if (is_t4(adap->params.chip))
+		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
+	else
+		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
+	s->fl_starve_thres = 2*egress_threshold + 1;
+
+	t4_idma_monitor_init(adap, &s->idma_monitor);
+
+	/* Set up timers used for recuring callbacks to process RX and TX
+	 * administrative tasks.
+	 */
+	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
+	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+
+	spin_lock_init(&s->intrq_lock);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h
new file mode 100644
index 0000000..54b7181
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h
@@ -0,0 +1,85 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2015 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_CHIP_TYPE_H__
+#define __T4_CHIP_TYPE_H__
+
+#define CHELSIO_T4		0x4
+#define CHELSIO_T5		0x5
+#define CHELSIO_T6		0x6
+
+/* We code the Chelsio T4 Family "Chip Code" as a tuple:
+ *
+ *     (Chip Version, Chip Revision)
+ *
+ * where:
+ *
+ *     Chip Version: is T4, T5, etc.
+ *     Chip Revision: is the FAB "spin" of the Chip Version.
+ */
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+enum chip_type {
+	T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+	T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+	T4_FIRST_REV	= T4_A1,
+	T4_LAST_REV	= T4_A2,
+
+	T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+	T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+	T5_FIRST_REV	= T5_A0,
+	T5_LAST_REV	= T5_A1,
+
+	T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+	T6_FIRST_REV	= T6_A0,
+	T6_LAST_REV	= T6_A0,
+};
+
+static inline int is_t4(enum chip_type chip)
+{
+	return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4);
+}
+
+static inline int is_t5(enum chip_type chip)
+{
+	return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+	return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6);
+}
+
+#endif /* __T4_CHIP_TYPE_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
new file mode 100644
index 0000000..cf61a58
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -0,0 +1,7883 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_values.h"
+#include "t4fw_api.h"
+#include "t4fw_version.h"
+
+/**
+ *	t4_wait_op_done_val - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *	@delay: delay in usecs between iterations
+ *	@valp: where to store the value of the register at completion time
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  If @valp is not NULL the value of the register
+ *	at the time it indicated completion is stored there.  Returns 0 if the
+ *	operation completes and	-EAGAIN	otherwise.
+ */
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			       int polarity, int attempts, int delay, u32 *valp)
+{
+	while (1) {
+		u32 val = t4_read_reg(adapter, reg);
+
+		if (!!(val & mask) == polarity) {
+			if (valp)
+				*valp = val;
+			return 0;
+		}
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+				  int polarity, int attempts, int delay)
+{
+	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+				   delay, NULL);
+}
+
+/**
+ *	t4_set_reg_field - set a register field to a value
+ *	@adapter: the adapter to program
+ *	@addr: the register address
+ *	@mask: specifies the portion of the register to modify
+ *	@val: the new value for the register field
+ *
+ *	Sets a register field specified by the supplied mask to the
+ *	given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+		      u32 val)
+{
+	u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+	t4_write_reg(adapter, addr, v | val);
+	(void) t4_read_reg(adapter, addr);      /* flush */
+}
+
+/**
+ *	t4_read_indirect - read indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect address
+ *	@data_reg: register holding the value of the indirect register
+ *	@vals: where the read register values are stored
+ *	@nregs: how many indirect registers to read
+ *	@start_idx: index of first indirect register to read
+ *
+ *	Reads registers that are accessed indirectly through an address/data
+ *	register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+			     unsigned int data_reg, u32 *vals,
+			     unsigned int nregs, unsigned int start_idx)
+{
+	while (nregs--) {
+		t4_write_reg(adap, addr_reg, start_idx);
+		*vals++ = t4_read_reg(adap, data_reg);
+		start_idx++;
+	}
+}
+
+/**
+ *	t4_write_indirect - write indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect addresses
+ *	@data_reg: register holding the value for the indirect registers
+ *	@vals: values to write
+ *	@nregs: how many indirect registers to write
+ *	@start_idx: address of first indirect register to write
+ *
+ *	Writes a sequential block of registers that are accessed indirectly
+ *	through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+		       unsigned int data_reg, const u32 *vals,
+		       unsigned int nregs, unsigned int start_idx)
+{
+	while (nregs--) {
+		t4_write_reg(adap, addr_reg, start_idx++);
+		t4_write_reg(adap, data_reg, *vals++);
+	}
+}
+
+/*
+ * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
+ * mechanism.  This guarantees that we get the real value even if we're
+ * operating within a Virtual Machine and the Hypervisor is trapping our
+ * Configuration Space accesses.
+ */
+void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
+{
+	u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		req |= ENABLE_F;
+	else
+		req |= T6_ENABLE_F;
+
+	if (is_t4(adap->params.chip))
+		req |= LOCALCFG_F;
+
+	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
+	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
+
+	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
+	 * Configuration Space read.  (None of the other fields matter when
+	 * ENABLE is 0 so a simple register write is easier than a
+	 * read-modify-write via t4_set_reg_field().)
+	 */
+	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
+}
+
+/*
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+	static const char *const reason[] = {
+		"Crash",                        /* PCIE_FW_EVAL_CRASH */
+		"During Device Preparation",    /* PCIE_FW_EVAL_PREP */
+		"During Device Configuration",  /* PCIE_FW_EVAL_CONF */
+		"During Device Initialization", /* PCIE_FW_EVAL_INIT */
+		"Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+		"Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
+		"Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+		"Reserved",                     /* reserved */
+	};
+	u32 pcie_fw;
+
+	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+	if (pcie_fw & PCIE_FW_ERR_F)
+		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+			reason[PCIE_FW_EVAL_G(pcie_fw)]);
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+			 u32 mbox_addr)
+{
+	for ( ; nflit; nflit--, mbox_addr += 8)
+		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+	struct fw_debug_cmd asrt;
+
+	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+	dev_alert(adap->pdev_dev,
+		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+		  asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+		  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
+}
+
+static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+{
+	dev_err(adap->pdev_dev,
+		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+		(unsigned long long)t4_read_reg64(adap, data_reg),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
+}
+
+/**
+ *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
+ *	@adap: the adapter
+ *	@mbox: index of the mailbox to use
+ *	@cmd: the command to write
+ *	@size: command length in bytes
+ *	@rpl: where to optionally store the reply
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *	@timeout: time to wait for command to finish before timing out
+ *
+ *	Sends the given command to FW through the selected mailbox and waits
+ *	for the FW to execute the command.  If @rpl is not %NULL it is used to
+ *	store the FW's reply to the command.  The command and its optional
+ *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
+ *	to respond.  @sleep_ok determines whether we may sleep while awaiting
+ *	the response.  If sleeping is allowed we use progressive backoff
+ *	otherwise we spin.
+ *
+ *	The return value is 0 on success or a negative errno on failure.  A
+ *	failure can happen either because we are not able to execute the
+ *	command or FW executes it but signals an error.  In the latter case
+ *	the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+			    int size, void *rpl, bool sleep_ok, int timeout)
+{
+	static const int delay[] = {
+		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
+	};
+
+	u32 v;
+	u64 res;
+	int i, ms, delay_idx;
+	const __be64 *p = cmd;
+	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
+
+	if ((size & 15) || size > MBOX_LEN)
+		return -EINVAL;
+
+	/*
+	 * If the device is off-line, as in EEH, commands will time out.
+	 * Fail them early so we don't waste time waiting.
+	 */
+	if (adap->pdev->error_state != pci_channel_io_normal)
+		return -EIO;
+
+	v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
+	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
+
+	if (v != MBOX_OWNER_DRV)
+		return v ? -EBUSY : -ETIMEDOUT;
+
+	for (i = 0; i < size; i += 8)
+		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
+
+	t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
+	t4_read_reg(adap, ctl_reg);          /* flush write */
+
+	delay_idx = 0;
+	ms = delay[0];
+
+	for (i = 0; i < timeout; i += ms) {
+		if (sleep_ok) {
+			ms = delay[delay_idx];  /* last element may repeat */
+			if (delay_idx < ARRAY_SIZE(delay) - 1)
+				delay_idx++;
+			msleep(ms);
+		} else
+			mdelay(ms);
+
+		v = t4_read_reg(adap, ctl_reg);
+		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
+			if (!(v & MBMSGVALID_F)) {
+				t4_write_reg(adap, ctl_reg, 0);
+				continue;
+			}
+
+			res = t4_read_reg64(adap, data_reg);
+			if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
+				fw_asrt(adap, data_reg);
+				res = FW_CMD_RETVAL_V(EIO);
+			} else if (rpl) {
+				get_mbox_rpl(adap, rpl, size / 8, data_reg);
+			}
+
+			if (FW_CMD_RETVAL_G((int)res))
+				dump_mbox(adap, mbox, data_reg);
+			t4_write_reg(adap, ctl_reg, 0);
+			return -FW_CMD_RETVAL_G((int)res);
+		}
+	}
+
+	dump_mbox(adap, mbox, data_reg);
+	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
+		*(const u8 *)cmd, mbox);
+	t4_report_fw_error(adap);
+	return -ETIMEDOUT;
+}
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok)
+{
+	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+				       FW_CMD_MAX_TIMEOUT);
+}
+
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+	u32 edc_ecc_err_addr_reg;
+	u32 rdata_reg;
+
+	if (is_t4(adap->params.chip)) {
+		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+		return 0;
+	}
+	if (idx != 0 && idx != 1) {
+		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+		return 0;
+	}
+
+	edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+	rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+	CH_WARN(adap,
+		"edc%d err addr 0x%x: 0x%x.\n",
+		idx, edc_ecc_err_addr_reg,
+		t4_read_reg(adap, edc_ecc_err_addr_reg));
+	CH_WARN(adap,
+		"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+		rdata_reg,
+		(unsigned long long)t4_read_reg64(adap, rdata_reg),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+	return 0;
+}
+
+/**
+ *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ *	@adap: the adapter
+ *	@win: PCI-E Memory Window to use
+ *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ *	@addr: address within indicated memory type
+ *	@len: amount of memory to transfer
+ *	@hbuf: host memory buffer
+ *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ *	Reads/writes an [almost] arbitrary memory region in the firmware: the
+ *	firmware memory address and host buffer must be aligned on 32-bit
+ *	boudaries; the length may be arbitrary.  The memory is transferred as
+ *	a raw byte sequence from/to the firmware's memory.  If this memory
+ *	contains data structures which contain multi-byte integers, it's the
+ *	caller's responsibility to perform appropriate byte order conversions.
+ */
+int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
+		 u32 len, void *hbuf, int dir)
+{
+	u32 pos, offset, resid, memoffset;
+	u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+	u32 *buf;
+
+	/* Argument sanity checks ...
+	 */
+	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+		return -EINVAL;
+	buf = (u32 *)hbuf;
+
+	/* It's convenient to be able to handle lengths which aren't a
+	 * multiple of 32-bits because we often end up transferring files to
+	 * the firmware.  So we'll handle that by normalizing the length here
+	 * and then handling any residual transfer at the end.
+	 */
+	resid = len & 0x3;
+	len -= resid;
+
+	/* Offset into the region of memory which is being accessed
+	 * MEM_EDC0 = 0
+	 * MEM_EDC1 = 1
+	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
+	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
+	 */
+	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
+	if (mtype != MEM_MC1)
+		memoffset = (mtype * (edc_size * 1024 * 1024));
+	else {
+		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+						      MA_EXT_MEMORY0_BAR_A));
+		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+	}
+
+	/* Determine the PCIE_MEM_ACCESS_OFFSET */
+	addr = addr + memoffset;
+
+	/* Each PCI-E Memory Window is programmed with a window size -- or
+	 * "aperture" -- which controls the granularity of its mapping onto
+	 * adapter memory.  We need to grab that aperture in order to know
+	 * how to use the specified window.  The window is also programmed
+	 * with the base address of the Memory Window in BAR0's address
+	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
+	 * the address is relative to BAR0.
+	 */
+	mem_reg = t4_read_reg(adap,
+			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
+						  win));
+	mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
+	mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
+	if (is_t4(adap->params.chip))
+		mem_base -= adap->t4_bar0;
+	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
+
+	/* Calculate our initial PCI-E Memory Window Position and Offset into
+	 * that Window.
+	 */
+	pos = addr & ~(mem_aperture-1);
+	offset = addr - pos;
+
+	/* Set up initial PCI-E Memory Window to cover the start of our
+	 * transfer.  (Read it back to ensure that changes propagate before we
+	 * attempt to use the new value.)
+	 */
+	t4_write_reg(adap,
+		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
+		     pos | win_pf);
+	t4_read_reg(adap,
+		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
+
+	/* Transfer data to/from the adapter as long as there's an integral
+	 * number of 32-bit transfers to complete.
+	 *
+	 * A note on Endianness issues:
+	 *
+	 * The "register" reads and writes below from/to the PCI-E Memory
+	 * Window invoke the standard adapter Big-Endian to PCI-E Link
+	 * Little-Endian "swizzel."  As a result, if we have the following
+	 * data in adapter memory:
+	 *
+	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
+	 *     Address:      i+0  i+1  i+2  i+3
+	 *
+	 * Then a read of the adapter memory via the PCI-E Memory Window
+	 * will yield:
+	 *
+	 *     x = readl(i)
+	 *         31                  0
+	 *         [ b3 | b2 | b1 | b0 ]
+	 *
+	 * If this value is stored into local memory on a Little-Endian system
+	 * it will show up correctly in local memory as:
+	 *
+	 *     ( ..., b0, b1, b2, b3, ... )
+	 *
+	 * But on a Big-Endian system, the store will show up in memory
+	 * incorrectly swizzled as:
+	 *
+	 *     ( ..., b3, b2, b1, b0, ... )
+	 *
+	 * So we need to account for this in the reads and writes to the
+	 * PCI-E Memory Window below by undoing the register read/write
+	 * swizzels.
+	 */
+	while (len > 0) {
+		if (dir == T4_MEMORY_READ)
+			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+						mem_base + offset));
+		else
+			t4_write_reg(adap, mem_base + offset,
+				     (__force u32)cpu_to_le32(*buf++));
+		offset += sizeof(__be32);
+		len -= sizeof(__be32);
+
+		/* If we've reached the end of our current window aperture,
+		 * move the PCI-E Memory Window on to the next.  Note that
+		 * doing this here after "len" may be 0 allows us to set up
+		 * the PCI-E Memory Window for a possible final residual
+		 * transfer below ...
+		 */
+		if (offset == mem_aperture) {
+			pos += mem_aperture;
+			offset = 0;
+			t4_write_reg(adap,
+				PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+						    win), pos | win_pf);
+			t4_read_reg(adap,
+				PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+						    win));
+		}
+	}
+
+	/* If the original transfer had a length which wasn't a multiple of
+	 * 32-bits, now's where we need to finish off the transfer of the
+	 * residual amount.  The PCI-E Memory Window has already been moved
+	 * above (if necessary) to cover this final transfer.
+	 */
+	if (resid) {
+		union {
+			u32 word;
+			char byte[4];
+		} last;
+		unsigned char *bp;
+		int i;
+
+		if (dir == T4_MEMORY_READ) {
+			last.word = le32_to_cpu(
+					(__force __le32)t4_read_reg(adap,
+						mem_base + offset));
+			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+				bp[i] = last.byte[i];
+		} else {
+			last.word = *buf;
+			for (i = resid; i < 4; i++)
+				last.byte[i] = 0;
+			t4_write_reg(adap, mem_base + offset,
+				     (__force u32)cpu_to_le32(last.word));
+		}
+	}
+
+	return 0;
+}
+
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function.  We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+	u32 val, ldst_addrspace;
+
+	/* If fw_attach != 0, construct and send the Firmware LDST Command to
+	 * retrieve the specified PCI-E Configuration Space register.
+	 */
+	struct fw_ldst_cmd ldst_cmd;
+	int ret;
+
+	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
+	ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					       FW_CMD_REQUEST_F |
+					       FW_CMD_READ_F |
+					       ldst_addrspace);
+	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+	ldst_cmd.u.pcie.ctrl_to_fn =
+		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
+	ldst_cmd.u.pcie.r = reg;
+
+	/* If the LDST Command succeeds, return the result, otherwise
+	 * fall through to reading it directly ourselves ...
+	 */
+	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+			 &ldst_cmd);
+	if (ret == 0)
+		val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+	else
+		/* Read the desired Configuration Space register via the PCI-E
+		 * Backdoor mechanism.
+		 */
+		t4_hw_pci_read_cfg4(adap, reg, &val);
+	return val;
+}
+
+/* Get the window based on base passed to it.
+ * Window aperture is currently unhandled, but there is no use case for it
+ * right now
+ */
+static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
+			 u32 memwin_base)
+{
+	u32 ret;
+
+	if (is_t4(adap->params.chip)) {
+		u32 bar0;
+
+		/* Truncation intentional: we only read the bottom 32-bits of
+		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
+		 * mechanism to read BAR0 instead of using
+		 * pci_resource_start() because we could be operating from
+		 * within a Virtual Machine which is trapping our accesses to
+		 * our Configuration Space and we need to set up the PCI-E
+		 * Memory Window decoders with the actual addresses which will
+		 * be coming across the PCI-E link.
+		 */
+		bar0 = t4_read_pcie_cfg4(adap, pci_base);
+		bar0 &= pci_mask;
+		adap->t4_bar0 = bar0;
+
+		ret = bar0 + memwin_base;
+	} else {
+		/* For T5, only relative offset inside the PCIe BAR is passed */
+		ret = memwin_base;
+	}
+	return ret;
+}
+
+/* Get the default utility window (win0) used by everyone */
+u32 t4_get_util_window(struct adapter *adap)
+{
+	return t4_get_window(adap, PCI_BASE_ADDRESS_0,
+			     PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
+}
+
+/* Set up memory window for accessing adapter memory ranges.  (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
+{
+	t4_write_reg(adap,
+		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
+		     memwin_base | BIR_V(0) |
+		     WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
+	t4_read_reg(adap,
+		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
+}
+
+/**
+ *	t4_get_regs_len - return the size of the chips register set
+ *	@adapter: the adapter
+ *
+ *	Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+	switch (chip_version) {
+	case CHELSIO_T4:
+		return T4_REGMAP_SIZE;
+
+	case CHELSIO_T5:
+	case CHELSIO_T6:
+		return T5_REGMAP_SIZE;
+	}
+
+	dev_err(adapter->pdev_dev,
+		"Unsupported chip version %d\n", chip_version);
+	return 0;
+}
+
+/**
+ *	t4_get_regs - read chip registers into provided buffer
+ *	@adap: the adapter
+ *	@buf: register buffer
+ *	@buf_size: size (in bytes) of register buffer
+ *
+ *	If the provided register buffer isn't large enough for the chip's
+ *	full register range, the register dump will be truncated to the
+ *	register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+{
+	static const unsigned int t4_reg_ranges[] = {
+		0x1008, 0x1108,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
+		0x11fc, 0x123c,
+		0x1300, 0x173c,
+		0x1800, 0x18fc,
+		0x3000, 0x30d8,
+		0x30e0, 0x30e4,
+		0x30ec, 0x5910,
+		0x5920, 0x5924,
+		0x5960, 0x5960,
+		0x5968, 0x5968,
+		0x5970, 0x5970,
+		0x5978, 0x5978,
+		0x5980, 0x5980,
+		0x5988, 0x5988,
+		0x5990, 0x5990,
+		0x5998, 0x5998,
+		0x59a0, 0x59d4,
+		0x5a00, 0x5ae0,
+		0x5ae8, 0x5ae8,
+		0x5af0, 0x5af0,
+		0x5af8, 0x5af8,
+		0x6000, 0x6098,
+		0x6100, 0x6150,
+		0x6200, 0x6208,
+		0x6240, 0x6248,
+		0x6280, 0x62b0,
+		0x62c0, 0x6338,
+		0x6370, 0x638c,
+		0x6400, 0x643c,
+		0x6500, 0x6524,
+		0x6a00, 0x6a04,
+		0x6a14, 0x6a38,
+		0x6a60, 0x6a70,
+		0x6a78, 0x6a78,
+		0x6b00, 0x6b0c,
+		0x6b1c, 0x6b84,
+		0x6bf0, 0x6bf8,
+		0x6c00, 0x6c0c,
+		0x6c1c, 0x6c84,
+		0x6cf0, 0x6cf8,
+		0x6d00, 0x6d0c,
+		0x6d1c, 0x6d84,
+		0x6df0, 0x6df8,
+		0x6e00, 0x6e0c,
+		0x6e1c, 0x6e84,
+		0x6ef0, 0x6ef8,
+		0x6f00, 0x6f0c,
+		0x6f1c, 0x6f84,
+		0x6ff0, 0x6ff8,
+		0x7000, 0x700c,
+		0x701c, 0x7084,
+		0x70f0, 0x70f8,
+		0x7100, 0x710c,
+		0x711c, 0x7184,
+		0x71f0, 0x71f8,
+		0x7200, 0x720c,
+		0x721c, 0x7284,
+		0x72f0, 0x72f8,
+		0x7300, 0x730c,
+		0x731c, 0x7384,
+		0x73f0, 0x73f8,
+		0x7400, 0x7450,
+		0x7500, 0x7530,
+		0x7600, 0x760c,
+		0x7614, 0x761c,
+		0x7680, 0x76cc,
+		0x7700, 0x7798,
+		0x77c0, 0x77fc,
+		0x7900, 0x79fc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c38,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7ea4,
+		0x7eac, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8e04,
+		0x8e10, 0x8e1c,
+		0x8e30, 0x8e78,
+		0x8ea0, 0x8eb8,
+		0x8ec0, 0x8f6c,
+		0x8fc0, 0x9008,
+		0x9010, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x9074,
+		0x90fc, 0x90fc,
+		0x9400, 0x9408,
+		0x9410, 0x9458,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96bc,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0x9fec,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0xea7c,
+		0xf000, 0x11190,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x190e4,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x1924c,
+		0x193f8, 0x1943c,
+		0x1944c, 0x19474,
+		0x19490, 0x194e0,
+		0x194f0, 0x194f8,
+		0x19800, 0x19c08,
+		0x19c10, 0x19c90,
+		0x19ca0, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e40,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f4c,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f4,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e040, 0x1e04c,
+		0x1e284, 0x1e28c,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e440, 0x1e44c,
+		0x1e684, 0x1e68c,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e840, 0x1e84c,
+		0x1ea84, 0x1ea8c,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec40, 0x1ec4c,
+		0x1ee84, 0x1ee8c,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f040, 0x1f04c,
+		0x1f284, 0x1f28c,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f440, 0x1f44c,
+		0x1f684, 0x1f68c,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f840, 0x1f84c,
+		0x1fa84, 0x1fa8c,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc40, 0x1fc4c,
+		0x1fe84, 0x1fe8c,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x20000, 0x2002c,
+		0x20100, 0x2013c,
+		0x20190, 0x201a0,
+		0x201a8, 0x201b8,
+		0x201c4, 0x201c8,
+		0x20200, 0x20318,
+		0x20400, 0x204b4,
+		0x204c0, 0x20528,
+		0x20540, 0x20614,
+		0x21000, 0x21040,
+		0x2104c, 0x21060,
+		0x210c0, 0x210ec,
+		0x21200, 0x21268,
+		0x21270, 0x21284,
+		0x212fc, 0x21388,
+		0x21400, 0x21404,
+		0x21500, 0x21500,
+		0x21510, 0x21518,
+		0x2152c, 0x21530,
+		0x2153c, 0x2153c,
+		0x21550, 0x21554,
+		0x21600, 0x21600,
+		0x21608, 0x2161c,
+		0x21624, 0x21628,
+		0x21630, 0x21634,
+		0x2163c, 0x2163c,
+		0x21700, 0x2171c,
+		0x21780, 0x2178c,
+		0x21800, 0x21818,
+		0x21820, 0x21828,
+		0x21830, 0x21848,
+		0x21850, 0x21854,
+		0x21860, 0x21868,
+		0x21870, 0x21870,
+		0x21878, 0x21898,
+		0x218a0, 0x218a8,
+		0x218b0, 0x218c8,
+		0x218d0, 0x218d4,
+		0x218e0, 0x218e8,
+		0x218f0, 0x218f0,
+		0x218f8, 0x21a18,
+		0x21a20, 0x21a28,
+		0x21a30, 0x21a48,
+		0x21a50, 0x21a54,
+		0x21a60, 0x21a68,
+		0x21a70, 0x21a70,
+		0x21a78, 0x21a98,
+		0x21aa0, 0x21aa8,
+		0x21ab0, 0x21ac8,
+		0x21ad0, 0x21ad4,
+		0x21ae0, 0x21ae8,
+		0x21af0, 0x21af0,
+		0x21af8, 0x21c18,
+		0x21c20, 0x21c20,
+		0x21c28, 0x21c30,
+		0x21c38, 0x21c38,
+		0x21c80, 0x21c98,
+		0x21ca0, 0x21ca8,
+		0x21cb0, 0x21cc8,
+		0x21cd0, 0x21cd4,
+		0x21ce0, 0x21ce8,
+		0x21cf0, 0x21cf0,
+		0x21cf8, 0x21d7c,
+		0x21e00, 0x21e04,
+		0x22000, 0x2202c,
+		0x22100, 0x2213c,
+		0x22190, 0x221a0,
+		0x221a8, 0x221b8,
+		0x221c4, 0x221c8,
+		0x22200, 0x22318,
+		0x22400, 0x224b4,
+		0x224c0, 0x22528,
+		0x22540, 0x22614,
+		0x23000, 0x23040,
+		0x2304c, 0x23060,
+		0x230c0, 0x230ec,
+		0x23200, 0x23268,
+		0x23270, 0x23284,
+		0x232fc, 0x23388,
+		0x23400, 0x23404,
+		0x23500, 0x23500,
+		0x23510, 0x23518,
+		0x2352c, 0x23530,
+		0x2353c, 0x2353c,
+		0x23550, 0x23554,
+		0x23600, 0x23600,
+		0x23608, 0x2361c,
+		0x23624, 0x23628,
+		0x23630, 0x23634,
+		0x2363c, 0x2363c,
+		0x23700, 0x2371c,
+		0x23780, 0x2378c,
+		0x23800, 0x23818,
+		0x23820, 0x23828,
+		0x23830, 0x23848,
+		0x23850, 0x23854,
+		0x23860, 0x23868,
+		0x23870, 0x23870,
+		0x23878, 0x23898,
+		0x238a0, 0x238a8,
+		0x238b0, 0x238c8,
+		0x238d0, 0x238d4,
+		0x238e0, 0x238e8,
+		0x238f0, 0x238f0,
+		0x238f8, 0x23a18,
+		0x23a20, 0x23a28,
+		0x23a30, 0x23a48,
+		0x23a50, 0x23a54,
+		0x23a60, 0x23a68,
+		0x23a70, 0x23a70,
+		0x23a78, 0x23a98,
+		0x23aa0, 0x23aa8,
+		0x23ab0, 0x23ac8,
+		0x23ad0, 0x23ad4,
+		0x23ae0, 0x23ae8,
+		0x23af0, 0x23af0,
+		0x23af8, 0x23c18,
+		0x23c20, 0x23c20,
+		0x23c28, 0x23c30,
+		0x23c38, 0x23c38,
+		0x23c80, 0x23c98,
+		0x23ca0, 0x23ca8,
+		0x23cb0, 0x23cc8,
+		0x23cd0, 0x23cd4,
+		0x23ce0, 0x23ce8,
+		0x23cf0, 0x23cf0,
+		0x23cf8, 0x23d7c,
+		0x23e00, 0x23e04,
+		0x24000, 0x2402c,
+		0x24100, 0x2413c,
+		0x24190, 0x241a0,
+		0x241a8, 0x241b8,
+		0x241c4, 0x241c8,
+		0x24200, 0x24318,
+		0x24400, 0x244b4,
+		0x244c0, 0x24528,
+		0x24540, 0x24614,
+		0x25000, 0x25040,
+		0x2504c, 0x25060,
+		0x250c0, 0x250ec,
+		0x25200, 0x25268,
+		0x25270, 0x25284,
+		0x252fc, 0x25388,
+		0x25400, 0x25404,
+		0x25500, 0x25500,
+		0x25510, 0x25518,
+		0x2552c, 0x25530,
+		0x2553c, 0x2553c,
+		0x25550, 0x25554,
+		0x25600, 0x25600,
+		0x25608, 0x2561c,
+		0x25624, 0x25628,
+		0x25630, 0x25634,
+		0x2563c, 0x2563c,
+		0x25700, 0x2571c,
+		0x25780, 0x2578c,
+		0x25800, 0x25818,
+		0x25820, 0x25828,
+		0x25830, 0x25848,
+		0x25850, 0x25854,
+		0x25860, 0x25868,
+		0x25870, 0x25870,
+		0x25878, 0x25898,
+		0x258a0, 0x258a8,
+		0x258b0, 0x258c8,
+		0x258d0, 0x258d4,
+		0x258e0, 0x258e8,
+		0x258f0, 0x258f0,
+		0x258f8, 0x25a18,
+		0x25a20, 0x25a28,
+		0x25a30, 0x25a48,
+		0x25a50, 0x25a54,
+		0x25a60, 0x25a68,
+		0x25a70, 0x25a70,
+		0x25a78, 0x25a98,
+		0x25aa0, 0x25aa8,
+		0x25ab0, 0x25ac8,
+		0x25ad0, 0x25ad4,
+		0x25ae0, 0x25ae8,
+		0x25af0, 0x25af0,
+		0x25af8, 0x25c18,
+		0x25c20, 0x25c20,
+		0x25c28, 0x25c30,
+		0x25c38, 0x25c38,
+		0x25c80, 0x25c98,
+		0x25ca0, 0x25ca8,
+		0x25cb0, 0x25cc8,
+		0x25cd0, 0x25cd4,
+		0x25ce0, 0x25ce8,
+		0x25cf0, 0x25cf0,
+		0x25cf8, 0x25d7c,
+		0x25e00, 0x25e04,
+		0x26000, 0x2602c,
+		0x26100, 0x2613c,
+		0x26190, 0x261a0,
+		0x261a8, 0x261b8,
+		0x261c4, 0x261c8,
+		0x26200, 0x26318,
+		0x26400, 0x264b4,
+		0x264c0, 0x26528,
+		0x26540, 0x26614,
+		0x27000, 0x27040,
+		0x2704c, 0x27060,
+		0x270c0, 0x270ec,
+		0x27200, 0x27268,
+		0x27270, 0x27284,
+		0x272fc, 0x27388,
+		0x27400, 0x27404,
+		0x27500, 0x27500,
+		0x27510, 0x27518,
+		0x2752c, 0x27530,
+		0x2753c, 0x2753c,
+		0x27550, 0x27554,
+		0x27600, 0x27600,
+		0x27608, 0x2761c,
+		0x27624, 0x27628,
+		0x27630, 0x27634,
+		0x2763c, 0x2763c,
+		0x27700, 0x2771c,
+		0x27780, 0x2778c,
+		0x27800, 0x27818,
+		0x27820, 0x27828,
+		0x27830, 0x27848,
+		0x27850, 0x27854,
+		0x27860, 0x27868,
+		0x27870, 0x27870,
+		0x27878, 0x27898,
+		0x278a0, 0x278a8,
+		0x278b0, 0x278c8,
+		0x278d0, 0x278d4,
+		0x278e0, 0x278e8,
+		0x278f0, 0x278f0,
+		0x278f8, 0x27a18,
+		0x27a20, 0x27a28,
+		0x27a30, 0x27a48,
+		0x27a50, 0x27a54,
+		0x27a60, 0x27a68,
+		0x27a70, 0x27a70,
+		0x27a78, 0x27a98,
+		0x27aa0, 0x27aa8,
+		0x27ab0, 0x27ac8,
+		0x27ad0, 0x27ad4,
+		0x27ae0, 0x27ae8,
+		0x27af0, 0x27af0,
+		0x27af8, 0x27c18,
+		0x27c20, 0x27c20,
+		0x27c28, 0x27c30,
+		0x27c38, 0x27c38,
+		0x27c80, 0x27c98,
+		0x27ca0, 0x27ca8,
+		0x27cb0, 0x27cc8,
+		0x27cd0, 0x27cd4,
+		0x27ce0, 0x27ce8,
+		0x27cf0, 0x27cf0,
+		0x27cf8, 0x27d7c,
+		0x27e00, 0x27e04,
+	};
+
+	static const unsigned int t5_reg_ranges[] = {
+		0x1008, 0x10c0,
+		0x10cc, 0x10f8,
+		0x1100, 0x1100,
+		0x110c, 0x1148,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
+		0x11fc, 0x123c,
+		0x1280, 0x173c,
+		0x1800, 0x18fc,
+		0x3000, 0x3028,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
+		0x30e0, 0x30fc,
+		0x3140, 0x357c,
+		0x35a8, 0x35cc,
+		0x35ec, 0x35ec,
+		0x3600, 0x5624,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
+		0x580c, 0x5814,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
+		0x5940, 0x59c8,
+		0x59d0, 0x59dc,
+		0x59fc, 0x5a18,
+		0x5a60, 0x5a70,
+		0x5a80, 0x5a9c,
+		0x5b94, 0x5bfc,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x614c,
+		0x7700, 0x7798,
+		0x77c0, 0x78fc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8de0,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
+		0x8ea0, 0x8f84,
+		0x8fc0, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9400, 0x9408,
+		0x9410, 0x9470,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96f4,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0xa020,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0x1106c,
+		0x11074, 0x11088,
+		0x1109c, 0x1117c,
+		0x11190, 0x11204,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x19290,
+		0x193f8, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
+		0x19490, 0x194cc,
+		0x194f0, 0x194f8,
+		0x19c00, 0x19c08,
+		0x19c10, 0x19c60,
+		0x19c94, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f24,
+		0x19f34, 0x19f34,
+		0x19f40, 0x19f50,
+		0x19f90, 0x19fb4,
+		0x19fc4, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e008, 0x1e00c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
+		0x1e284, 0x1e290,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e408, 0x1e40c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
+		0x1e684, 0x1e690,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e808, 0x1e80c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
+		0x1ea84, 0x1ea90,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec08, 0x1ec0c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
+		0x1ee84, 0x1ee90,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f008, 0x1f00c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
+		0x1f284, 0x1f290,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f408, 0x1f40c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
+		0x1f684, 0x1f690,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f808, 0x1f80c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
+		0x1fa84, 0x1fa90,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc08, 0x1fc0c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
+		0x1fe84, 0x1fe90,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
+		0x30100, 0x30144,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
+		0x30200, 0x30318,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
+		0x30540, 0x3061c,
+		0x30800, 0x30828,
+		0x30834, 0x30834,
+		0x308c0, 0x30908,
+		0x30910, 0x309ac,
+		0x30a00, 0x30a14,
+		0x30a1c, 0x30a2c,
+		0x30a44, 0x30a50,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
+		0x30d00, 0x30d00,
+		0x30d08, 0x30d14,
+		0x30d1c, 0x30d20,
+		0x30d3c, 0x30d3c,
+		0x30d48, 0x30d50,
+		0x31200, 0x3120c,
+		0x31220, 0x31220,
+		0x31240, 0x31240,
+		0x31600, 0x3160c,
+		0x31a00, 0x31a1c,
+		0x31e00, 0x31e20,
+		0x31e38, 0x31e3c,
+		0x31e80, 0x31e80,
+		0x31e88, 0x31ea8,
+		0x31eb0, 0x31eb4,
+		0x31ec8, 0x31ed4,
+		0x31fb8, 0x32004,
+		0x32200, 0x32200,
+		0x32208, 0x32240,
+		0x32248, 0x32280,
+		0x32288, 0x322c0,
+		0x322c8, 0x322fc,
+		0x32600, 0x32630,
+		0x32a00, 0x32abc,
+		0x32b00, 0x32b10,
+		0x32b20, 0x32b30,
+		0x32b40, 0x32b50,
+		0x32b60, 0x32b70,
+		0x33000, 0x33028,
+		0x33030, 0x33048,
+		0x33060, 0x33068,
+		0x33070, 0x3309c,
+		0x330f0, 0x33128,
+		0x33130, 0x33148,
+		0x33160, 0x33168,
+		0x33170, 0x3319c,
+		0x331f0, 0x33238,
+		0x33240, 0x33240,
+		0x33248, 0x33250,
+		0x3325c, 0x33264,
+		0x33270, 0x332b8,
+		0x332c0, 0x332e4,
+		0x332f8, 0x33338,
+		0x33340, 0x33340,
+		0x33348, 0x33350,
+		0x3335c, 0x33364,
+		0x33370, 0x333b8,
+		0x333c0, 0x333e4,
+		0x333f8, 0x33428,
+		0x33430, 0x33448,
+		0x33460, 0x33468,
+		0x33470, 0x3349c,
+		0x334f0, 0x33528,
+		0x33530, 0x33548,
+		0x33560, 0x33568,
+		0x33570, 0x3359c,
+		0x335f0, 0x33638,
+		0x33640, 0x33640,
+		0x33648, 0x33650,
+		0x3365c, 0x33664,
+		0x33670, 0x336b8,
+		0x336c0, 0x336e4,
+		0x336f8, 0x33738,
+		0x33740, 0x33740,
+		0x33748, 0x33750,
+		0x3375c, 0x33764,
+		0x33770, 0x337b8,
+		0x337c0, 0x337e4,
+		0x337f8, 0x337fc,
+		0x33814, 0x33814,
+		0x3382c, 0x3382c,
+		0x33880, 0x3388c,
+		0x338e8, 0x338ec,
+		0x33900, 0x33928,
+		0x33930, 0x33948,
+		0x33960, 0x33968,
+		0x33970, 0x3399c,
+		0x339f0, 0x33a38,
+		0x33a40, 0x33a40,
+		0x33a48, 0x33a50,
+		0x33a5c, 0x33a64,
+		0x33a70, 0x33ab8,
+		0x33ac0, 0x33ae4,
+		0x33af8, 0x33b10,
+		0x33b28, 0x33b28,
+		0x33b3c, 0x33b50,
+		0x33bf0, 0x33c10,
+		0x33c28, 0x33c28,
+		0x33c3c, 0x33c50,
+		0x33cf0, 0x33cfc,
+		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
+		0x34100, 0x34144,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
+		0x34200, 0x34318,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
+		0x34540, 0x3461c,
+		0x34800, 0x34828,
+		0x34834, 0x34834,
+		0x348c0, 0x34908,
+		0x34910, 0x349ac,
+		0x34a00, 0x34a14,
+		0x34a1c, 0x34a2c,
+		0x34a44, 0x34a50,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
+		0x34d00, 0x34d00,
+		0x34d08, 0x34d14,
+		0x34d1c, 0x34d20,
+		0x34d3c, 0x34d3c,
+		0x34d48, 0x34d50,
+		0x35200, 0x3520c,
+		0x35220, 0x35220,
+		0x35240, 0x35240,
+		0x35600, 0x3560c,
+		0x35a00, 0x35a1c,
+		0x35e00, 0x35e20,
+		0x35e38, 0x35e3c,
+		0x35e80, 0x35e80,
+		0x35e88, 0x35ea8,
+		0x35eb0, 0x35eb4,
+		0x35ec8, 0x35ed4,
+		0x35fb8, 0x36004,
+		0x36200, 0x36200,
+		0x36208, 0x36240,
+		0x36248, 0x36280,
+		0x36288, 0x362c0,
+		0x362c8, 0x362fc,
+		0x36600, 0x36630,
+		0x36a00, 0x36abc,
+		0x36b00, 0x36b10,
+		0x36b20, 0x36b30,
+		0x36b40, 0x36b50,
+		0x36b60, 0x36b70,
+		0x37000, 0x37028,
+		0x37030, 0x37048,
+		0x37060, 0x37068,
+		0x37070, 0x3709c,
+		0x370f0, 0x37128,
+		0x37130, 0x37148,
+		0x37160, 0x37168,
+		0x37170, 0x3719c,
+		0x371f0, 0x37238,
+		0x37240, 0x37240,
+		0x37248, 0x37250,
+		0x3725c, 0x37264,
+		0x37270, 0x372b8,
+		0x372c0, 0x372e4,
+		0x372f8, 0x37338,
+		0x37340, 0x37340,
+		0x37348, 0x37350,
+		0x3735c, 0x37364,
+		0x37370, 0x373b8,
+		0x373c0, 0x373e4,
+		0x373f8, 0x37428,
+		0x37430, 0x37448,
+		0x37460, 0x37468,
+		0x37470, 0x3749c,
+		0x374f0, 0x37528,
+		0x37530, 0x37548,
+		0x37560, 0x37568,
+		0x37570, 0x3759c,
+		0x375f0, 0x37638,
+		0x37640, 0x37640,
+		0x37648, 0x37650,
+		0x3765c, 0x37664,
+		0x37670, 0x376b8,
+		0x376c0, 0x376e4,
+		0x376f8, 0x37738,
+		0x37740, 0x37740,
+		0x37748, 0x37750,
+		0x3775c, 0x37764,
+		0x37770, 0x377b8,
+		0x377c0, 0x377e4,
+		0x377f8, 0x377fc,
+		0x37814, 0x37814,
+		0x3782c, 0x3782c,
+		0x37880, 0x3788c,
+		0x378e8, 0x378ec,
+		0x37900, 0x37928,
+		0x37930, 0x37948,
+		0x37960, 0x37968,
+		0x37970, 0x3799c,
+		0x379f0, 0x37a38,
+		0x37a40, 0x37a40,
+		0x37a48, 0x37a50,
+		0x37a5c, 0x37a64,
+		0x37a70, 0x37ab8,
+		0x37ac0, 0x37ae4,
+		0x37af8, 0x37b10,
+		0x37b28, 0x37b28,
+		0x37b3c, 0x37b50,
+		0x37bf0, 0x37c10,
+		0x37c28, 0x37c28,
+		0x37c3c, 0x37c50,
+		0x37cf0, 0x37cfc,
+		0x38000, 0x38030,
+		0x38038, 0x38038,
+		0x38040, 0x38040,
+		0x38100, 0x38144,
+		0x38190, 0x381a0,
+		0x381a8, 0x381b8,
+		0x381c4, 0x381c8,
+		0x381d0, 0x381d0,
+		0x38200, 0x38318,
+		0x38400, 0x384b4,
+		0x384c0, 0x3852c,
+		0x38540, 0x3861c,
+		0x38800, 0x38828,
+		0x38834, 0x38834,
+		0x388c0, 0x38908,
+		0x38910, 0x389ac,
+		0x38a00, 0x38a14,
+		0x38a1c, 0x38a2c,
+		0x38a44, 0x38a50,
+		0x38a74, 0x38a74,
+		0x38a7c, 0x38afc,
+		0x38b08, 0x38c24,
+		0x38d00, 0x38d00,
+		0x38d08, 0x38d14,
+		0x38d1c, 0x38d20,
+		0x38d3c, 0x38d3c,
+		0x38d48, 0x38d50,
+		0x39200, 0x3920c,
+		0x39220, 0x39220,
+		0x39240, 0x39240,
+		0x39600, 0x3960c,
+		0x39a00, 0x39a1c,
+		0x39e00, 0x39e20,
+		0x39e38, 0x39e3c,
+		0x39e80, 0x39e80,
+		0x39e88, 0x39ea8,
+		0x39eb0, 0x39eb4,
+		0x39ec8, 0x39ed4,
+		0x39fb8, 0x3a004,
+		0x3a200, 0x3a200,
+		0x3a208, 0x3a240,
+		0x3a248, 0x3a280,
+		0x3a288, 0x3a2c0,
+		0x3a2c8, 0x3a2fc,
+		0x3a600, 0x3a630,
+		0x3aa00, 0x3aabc,
+		0x3ab00, 0x3ab10,
+		0x3ab20, 0x3ab30,
+		0x3ab40, 0x3ab50,
+		0x3ab60, 0x3ab70,
+		0x3b000, 0x3b028,
+		0x3b030, 0x3b048,
+		0x3b060, 0x3b068,
+		0x3b070, 0x3b09c,
+		0x3b0f0, 0x3b128,
+		0x3b130, 0x3b148,
+		0x3b160, 0x3b168,
+		0x3b170, 0x3b19c,
+		0x3b1f0, 0x3b238,
+		0x3b240, 0x3b240,
+		0x3b248, 0x3b250,
+		0x3b25c, 0x3b264,
+		0x3b270, 0x3b2b8,
+		0x3b2c0, 0x3b2e4,
+		0x3b2f8, 0x3b338,
+		0x3b340, 0x3b340,
+		0x3b348, 0x3b350,
+		0x3b35c, 0x3b364,
+		0x3b370, 0x3b3b8,
+		0x3b3c0, 0x3b3e4,
+		0x3b3f8, 0x3b428,
+		0x3b430, 0x3b448,
+		0x3b460, 0x3b468,
+		0x3b470, 0x3b49c,
+		0x3b4f0, 0x3b528,
+		0x3b530, 0x3b548,
+		0x3b560, 0x3b568,
+		0x3b570, 0x3b59c,
+		0x3b5f0, 0x3b638,
+		0x3b640, 0x3b640,
+		0x3b648, 0x3b650,
+		0x3b65c, 0x3b664,
+		0x3b670, 0x3b6b8,
+		0x3b6c0, 0x3b6e4,
+		0x3b6f8, 0x3b738,
+		0x3b740, 0x3b740,
+		0x3b748, 0x3b750,
+		0x3b75c, 0x3b764,
+		0x3b770, 0x3b7b8,
+		0x3b7c0, 0x3b7e4,
+		0x3b7f8, 0x3b7fc,
+		0x3b814, 0x3b814,
+		0x3b82c, 0x3b82c,
+		0x3b880, 0x3b88c,
+		0x3b8e8, 0x3b8ec,
+		0x3b900, 0x3b928,
+		0x3b930, 0x3b948,
+		0x3b960, 0x3b968,
+		0x3b970, 0x3b99c,
+		0x3b9f0, 0x3ba38,
+		0x3ba40, 0x3ba40,
+		0x3ba48, 0x3ba50,
+		0x3ba5c, 0x3ba64,
+		0x3ba70, 0x3bab8,
+		0x3bac0, 0x3bae4,
+		0x3baf8, 0x3bb10,
+		0x3bb28, 0x3bb28,
+		0x3bb3c, 0x3bb50,
+		0x3bbf0, 0x3bc10,
+		0x3bc28, 0x3bc28,
+		0x3bc3c, 0x3bc50,
+		0x3bcf0, 0x3bcfc,
+		0x3c000, 0x3c030,
+		0x3c038, 0x3c038,
+		0x3c040, 0x3c040,
+		0x3c100, 0x3c144,
+		0x3c190, 0x3c1a0,
+		0x3c1a8, 0x3c1b8,
+		0x3c1c4, 0x3c1c8,
+		0x3c1d0, 0x3c1d0,
+		0x3c200, 0x3c318,
+		0x3c400, 0x3c4b4,
+		0x3c4c0, 0x3c52c,
+		0x3c540, 0x3c61c,
+		0x3c800, 0x3c828,
+		0x3c834, 0x3c834,
+		0x3c8c0, 0x3c908,
+		0x3c910, 0x3c9ac,
+		0x3ca00, 0x3ca14,
+		0x3ca1c, 0x3ca2c,
+		0x3ca44, 0x3ca50,
+		0x3ca74, 0x3ca74,
+		0x3ca7c, 0x3cafc,
+		0x3cb08, 0x3cc24,
+		0x3cd00, 0x3cd00,
+		0x3cd08, 0x3cd14,
+		0x3cd1c, 0x3cd20,
+		0x3cd3c, 0x3cd3c,
+		0x3cd48, 0x3cd50,
+		0x3d200, 0x3d20c,
+		0x3d220, 0x3d220,
+		0x3d240, 0x3d240,
+		0x3d600, 0x3d60c,
+		0x3da00, 0x3da1c,
+		0x3de00, 0x3de20,
+		0x3de38, 0x3de3c,
+		0x3de80, 0x3de80,
+		0x3de88, 0x3dea8,
+		0x3deb0, 0x3deb4,
+		0x3dec8, 0x3ded4,
+		0x3dfb8, 0x3e004,
+		0x3e200, 0x3e200,
+		0x3e208, 0x3e240,
+		0x3e248, 0x3e280,
+		0x3e288, 0x3e2c0,
+		0x3e2c8, 0x3e2fc,
+		0x3e600, 0x3e630,
+		0x3ea00, 0x3eabc,
+		0x3eb00, 0x3eb10,
+		0x3eb20, 0x3eb30,
+		0x3eb40, 0x3eb50,
+		0x3eb60, 0x3eb70,
+		0x3f000, 0x3f028,
+		0x3f030, 0x3f048,
+		0x3f060, 0x3f068,
+		0x3f070, 0x3f09c,
+		0x3f0f0, 0x3f128,
+		0x3f130, 0x3f148,
+		0x3f160, 0x3f168,
+		0x3f170, 0x3f19c,
+		0x3f1f0, 0x3f238,
+		0x3f240, 0x3f240,
+		0x3f248, 0x3f250,
+		0x3f25c, 0x3f264,
+		0x3f270, 0x3f2b8,
+		0x3f2c0, 0x3f2e4,
+		0x3f2f8, 0x3f338,
+		0x3f340, 0x3f340,
+		0x3f348, 0x3f350,
+		0x3f35c, 0x3f364,
+		0x3f370, 0x3f3b8,
+		0x3f3c0, 0x3f3e4,
+		0x3f3f8, 0x3f428,
+		0x3f430, 0x3f448,
+		0x3f460, 0x3f468,
+		0x3f470, 0x3f49c,
+		0x3f4f0, 0x3f528,
+		0x3f530, 0x3f548,
+		0x3f560, 0x3f568,
+		0x3f570, 0x3f59c,
+		0x3f5f0, 0x3f638,
+		0x3f640, 0x3f640,
+		0x3f648, 0x3f650,
+		0x3f65c, 0x3f664,
+		0x3f670, 0x3f6b8,
+		0x3f6c0, 0x3f6e4,
+		0x3f6f8, 0x3f738,
+		0x3f740, 0x3f740,
+		0x3f748, 0x3f750,
+		0x3f75c, 0x3f764,
+		0x3f770, 0x3f7b8,
+		0x3f7c0, 0x3f7e4,
+		0x3f7f8, 0x3f7fc,
+		0x3f814, 0x3f814,
+		0x3f82c, 0x3f82c,
+		0x3f880, 0x3f88c,
+		0x3f8e8, 0x3f8ec,
+		0x3f900, 0x3f928,
+		0x3f930, 0x3f948,
+		0x3f960, 0x3f968,
+		0x3f970, 0x3f99c,
+		0x3f9f0, 0x3fa38,
+		0x3fa40, 0x3fa40,
+		0x3fa48, 0x3fa50,
+		0x3fa5c, 0x3fa64,
+		0x3fa70, 0x3fab8,
+		0x3fac0, 0x3fae4,
+		0x3faf8, 0x3fb10,
+		0x3fb28, 0x3fb28,
+		0x3fb3c, 0x3fb50,
+		0x3fbf0, 0x3fc10,
+		0x3fc28, 0x3fc28,
+		0x3fc3c, 0x3fc50,
+		0x3fcf0, 0x3fcfc,
+		0x40000, 0x4000c,
+		0x40040, 0x40050,
+		0x40060, 0x40068,
+		0x4007c, 0x4008c,
+		0x40094, 0x400b0,
+		0x400c0, 0x40144,
+		0x40180, 0x4018c,
+		0x40200, 0x40254,
+		0x40260, 0x40264,
+		0x40270, 0x40288,
+		0x40290, 0x40298,
+		0x402ac, 0x402c8,
+		0x402d0, 0x402e0,
+		0x402f0, 0x402f0,
+		0x40300, 0x4033c,
+		0x403f8, 0x403fc,
+		0x41304, 0x413c4,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
+		0x41480, 0x414d0,
+		0x44000, 0x44054,
+		0x4405c, 0x44078,
+		0x440c0, 0x44174,
+		0x44180, 0x441ac,
+		0x441b4, 0x441b8,
+		0x441c0, 0x44254,
+		0x4425c, 0x44278,
+		0x442c0, 0x44374,
+		0x44380, 0x443ac,
+		0x443b4, 0x443b8,
+		0x443c0, 0x44454,
+		0x4445c, 0x44478,
+		0x444c0, 0x44574,
+		0x44580, 0x445ac,
+		0x445b4, 0x445b8,
+		0x445c0, 0x44654,
+		0x4465c, 0x44678,
+		0x446c0, 0x44774,
+		0x44780, 0x447ac,
+		0x447b4, 0x447b8,
+		0x447c0, 0x44854,
+		0x4485c, 0x44878,
+		0x448c0, 0x44974,
+		0x44980, 0x449ac,
+		0x449b4, 0x449b8,
+		0x449c0, 0x449fc,
+		0x45000, 0x45004,
+		0x45010, 0x45030,
+		0x45040, 0x45060,
+		0x45068, 0x45068,
+		0x45080, 0x45084,
+		0x450a0, 0x450b0,
+		0x45200, 0x45204,
+		0x45210, 0x45230,
+		0x45240, 0x45260,
+		0x45268, 0x45268,
+		0x45280, 0x45284,
+		0x452a0, 0x452b0,
+		0x460c0, 0x460e4,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
+		0x47200, 0x47250,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
+		0x47600, 0x47618,
+		0x47800, 0x47814,
+		0x48000, 0x4800c,
+		0x48040, 0x48050,
+		0x48060, 0x48068,
+		0x4807c, 0x4808c,
+		0x48094, 0x480b0,
+		0x480c0, 0x48144,
+		0x48180, 0x4818c,
+		0x48200, 0x48254,
+		0x48260, 0x48264,
+		0x48270, 0x48288,
+		0x48290, 0x48298,
+		0x482ac, 0x482c8,
+		0x482d0, 0x482e0,
+		0x482f0, 0x482f0,
+		0x48300, 0x4833c,
+		0x483f8, 0x483fc,
+		0x49304, 0x493c4,
+		0x49400, 0x4940c,
+		0x49414, 0x4941c,
+		0x49480, 0x494d0,
+		0x4c000, 0x4c054,
+		0x4c05c, 0x4c078,
+		0x4c0c0, 0x4c174,
+		0x4c180, 0x4c1ac,
+		0x4c1b4, 0x4c1b8,
+		0x4c1c0, 0x4c254,
+		0x4c25c, 0x4c278,
+		0x4c2c0, 0x4c374,
+		0x4c380, 0x4c3ac,
+		0x4c3b4, 0x4c3b8,
+		0x4c3c0, 0x4c454,
+		0x4c45c, 0x4c478,
+		0x4c4c0, 0x4c574,
+		0x4c580, 0x4c5ac,
+		0x4c5b4, 0x4c5b8,
+		0x4c5c0, 0x4c654,
+		0x4c65c, 0x4c678,
+		0x4c6c0, 0x4c774,
+		0x4c780, 0x4c7ac,
+		0x4c7b4, 0x4c7b8,
+		0x4c7c0, 0x4c854,
+		0x4c85c, 0x4c878,
+		0x4c8c0, 0x4c974,
+		0x4c980, 0x4c9ac,
+		0x4c9b4, 0x4c9b8,
+		0x4c9c0, 0x4c9fc,
+		0x4d000, 0x4d004,
+		0x4d010, 0x4d030,
+		0x4d040, 0x4d060,
+		0x4d068, 0x4d068,
+		0x4d080, 0x4d084,
+		0x4d0a0, 0x4d0b0,
+		0x4d200, 0x4d204,
+		0x4d210, 0x4d230,
+		0x4d240, 0x4d260,
+		0x4d268, 0x4d268,
+		0x4d280, 0x4d284,
+		0x4d2a0, 0x4d2b0,
+		0x4e0c0, 0x4e0e4,
+		0x4f000, 0x4f03c,
+		0x4f044, 0x4f08c,
+		0x4f200, 0x4f250,
+		0x4f400, 0x4f408,
+		0x4f414, 0x4f420,
+		0x4f600, 0x4f618,
+		0x4f800, 0x4f814,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
+		0x50400, 0x50400,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
+		0x50c00, 0x50c00,
+		0x51000, 0x5101c,
+		0x51300, 0x51308,
+	};
+
+	static const unsigned int t6_reg_ranges[] = {
+		0x1008, 0x101c,
+		0x1024, 0x10a8,
+		0x10b4, 0x10f8,
+		0x1100, 0x1114,
+		0x111c, 0x112c,
+		0x1138, 0x113c,
+		0x1144, 0x114c,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
+		0x11fc, 0x1254,
+		0x1280, 0x133c,
+		0x1800, 0x18fc,
+		0x3000, 0x302c,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
+		0x30e0, 0x30fc,
+		0x3140, 0x357c,
+		0x35a8, 0x35cc,
+		0x35ec, 0x35ec,
+		0x3600, 0x5624,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
+		0x580c, 0x5814,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
+		0x5940, 0x595c,
+		0x5980, 0x598c,
+		0x59b0, 0x59c8,
+		0x59d0, 0x59dc,
+		0x59fc, 0x5a18,
+		0x5a60, 0x5a6c,
+		0x5a80, 0x5a8c,
+		0x5a94, 0x5a9c,
+		0x5b94, 0x5bfc,
+		0x5c10, 0x5e48,
+		0x5e50, 0x5e94,
+		0x5ea0, 0x5eb0,
+		0x5ec0, 0x5ec0,
+		0x5ec8, 0x5ecc,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x619c,
+		0x7700, 0x7798,
+		0x77c0, 0x7880,
+		0x78cc, 0x78fc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d84,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8de4,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
+		0x8ea0, 0x8f88,
+		0x8fb8, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9100, 0x9124,
+		0x9400, 0x9470,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x9704,
+		0x9710, 0x971c,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0xa020,
+		0xd004, 0xd03c,
+		0xd100, 0xd118,
+		0xd200, 0xd214,
+		0xd220, 0xd234,
+		0xd240, 0xd254,
+		0xd260, 0xd274,
+		0xd280, 0xd294,
+		0xd2a0, 0xd2b4,
+		0xd2c0, 0xd2d4,
+		0xd2e0, 0xd2f4,
+		0xd300, 0xd31c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0xf008,
+		0x11000, 0x11014,
+		0x11048, 0x1106c,
+		0x11074, 0x11088,
+		0x11098, 0x11120,
+		0x1112c, 0x1117c,
+		0x11190, 0x112e0,
+		0x11300, 0x1130c,
+		0x12000, 0x1206c,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x192b0,
+		0x192bc, 0x192bc,
+		0x19348, 0x1934c,
+		0x193f8, 0x19418,
+		0x19420, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
+		0x19490, 0x194cc,
+		0x194f0, 0x194f8,
+		0x19c00, 0x19c48,
+		0x19c50, 0x19c80,
+		0x19c94, 0x19c98,
+		0x19ca0, 0x19cbc,
+		0x19ce4, 0x19ce4,
+		0x19cf0, 0x19cf8,
+		0x19d00, 0x19d28,
+		0x19d50, 0x19d78,
+		0x19d94, 0x19d98,
+		0x19da0, 0x19dc8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e6c,
+		0x19ea0, 0x19ebc,
+		0x19ec4, 0x19ef4,
+		0x19f04, 0x19f2c,
+		0x19f34, 0x19f34,
+		0x19f40, 0x19f50,
+		0x19f90, 0x19fac,
+		0x19fc4, 0x19fc8,
+		0x19fd0, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e008, 0x1e00c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
+		0x1e284, 0x1e290,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e408, 0x1e40c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
+		0x1e684, 0x1e690,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e808, 0x1e80c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
+		0x1ea84, 0x1ea90,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec08, 0x1ec0c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
+		0x1ee84, 0x1ee90,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f008, 0x1f00c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
+		0x1f284, 0x1f290,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f408, 0x1f40c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
+		0x1f684, 0x1f690,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f808, 0x1f80c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
+		0x1fa84, 0x1fa90,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc08, 0x1fc0c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
+		0x1fe84, 0x1fe90,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
+		0x30048, 0x30048,
+		0x30050, 0x30050,
+		0x3005c, 0x30060,
+		0x30068, 0x30068,
+		0x30070, 0x30070,
+		0x30100, 0x30168,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
+		0x30200, 0x30320,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
+		0x30540, 0x3061c,
+		0x30800, 0x308a0,
+		0x308c0, 0x30908,
+		0x30910, 0x309b8,
+		0x30a00, 0x30a04,
+		0x30a0c, 0x30a14,
+		0x30a1c, 0x30a2c,
+		0x30a44, 0x30a50,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
+		0x30d00, 0x30d14,
+		0x30d1c, 0x30d3c,
+		0x30d44, 0x30d4c,
+		0x30d54, 0x30d74,
+		0x30d7c, 0x30d7c,
+		0x30de0, 0x30de0,
+		0x30e00, 0x30ed4,
+		0x30f00, 0x30fa4,
+		0x30fc0, 0x30fc4,
+		0x31000, 0x31004,
+		0x31080, 0x310fc,
+		0x31208, 0x31220,
+		0x3123c, 0x31254,
+		0x31300, 0x31300,
+		0x31308, 0x3131c,
+		0x31338, 0x3133c,
+		0x31380, 0x31380,
+		0x31388, 0x313a8,
+		0x313b4, 0x313b4,
+		0x31400, 0x31420,
+		0x31438, 0x3143c,
+		0x31480, 0x31480,
+		0x314a8, 0x314a8,
+		0x314b0, 0x314b4,
+		0x314c8, 0x314d4,
+		0x31a40, 0x31a4c,
+		0x31af0, 0x31b20,
+		0x31b38, 0x31b3c,
+		0x31b80, 0x31b80,
+		0x31ba8, 0x31ba8,
+		0x31bb0, 0x31bb4,
+		0x31bc8, 0x31bd4,
+		0x32140, 0x3218c,
+		0x321f0, 0x321f4,
+		0x32200, 0x32200,
+		0x32218, 0x32218,
+		0x32400, 0x32400,
+		0x32408, 0x3241c,
+		0x32618, 0x32620,
+		0x32664, 0x32664,
+		0x326a8, 0x326a8,
+		0x326ec, 0x326ec,
+		0x32a00, 0x32abc,
+		0x32b00, 0x32b38,
+		0x32b40, 0x32b58,
+		0x32b60, 0x32b78,
+		0x32c00, 0x32c00,
+		0x32c08, 0x32c3c,
+		0x32e00, 0x32e2c,
+		0x32f00, 0x32f2c,
+		0x33000, 0x3302c,
+		0x33034, 0x33050,
+		0x33058, 0x33058,
+		0x33060, 0x3308c,
+		0x3309c, 0x330ac,
+		0x330c0, 0x330c0,
+		0x330c8, 0x330d0,
+		0x330d8, 0x330e0,
+		0x330ec, 0x3312c,
+		0x33134, 0x33150,
+		0x33158, 0x33158,
+		0x33160, 0x3318c,
+		0x3319c, 0x331ac,
+		0x331c0, 0x331c0,
+		0x331c8, 0x331d0,
+		0x331d8, 0x331e0,
+		0x331ec, 0x33290,
+		0x33298, 0x332c4,
+		0x332e4, 0x33390,
+		0x33398, 0x333c4,
+		0x333e4, 0x3342c,
+		0x33434, 0x33450,
+		0x33458, 0x33458,
+		0x33460, 0x3348c,
+		0x3349c, 0x334ac,
+		0x334c0, 0x334c0,
+		0x334c8, 0x334d0,
+		0x334d8, 0x334e0,
+		0x334ec, 0x3352c,
+		0x33534, 0x33550,
+		0x33558, 0x33558,
+		0x33560, 0x3358c,
+		0x3359c, 0x335ac,
+		0x335c0, 0x335c0,
+		0x335c8, 0x335d0,
+		0x335d8, 0x335e0,
+		0x335ec, 0x33690,
+		0x33698, 0x336c4,
+		0x336e4, 0x33790,
+		0x33798, 0x337c4,
+		0x337e4, 0x337fc,
+		0x33814, 0x33814,
+		0x33854, 0x33868,
+		0x33880, 0x3388c,
+		0x338c0, 0x338d0,
+		0x338e8, 0x338ec,
+		0x33900, 0x3392c,
+		0x33934, 0x33950,
+		0x33958, 0x33958,
+		0x33960, 0x3398c,
+		0x3399c, 0x339ac,
+		0x339c0, 0x339c0,
+		0x339c8, 0x339d0,
+		0x339d8, 0x339e0,
+		0x339ec, 0x33a90,
+		0x33a98, 0x33ac4,
+		0x33ae4, 0x33b10,
+		0x33b24, 0x33b28,
+		0x33b38, 0x33b50,
+		0x33bf0, 0x33c10,
+		0x33c24, 0x33c28,
+		0x33c38, 0x33c50,
+		0x33cf0, 0x33cfc,
+		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
+		0x34048, 0x34048,
+		0x34050, 0x34050,
+		0x3405c, 0x34060,
+		0x34068, 0x34068,
+		0x34070, 0x34070,
+		0x34100, 0x34168,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
+		0x34200, 0x34320,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
+		0x34540, 0x3461c,
+		0x34800, 0x348a0,
+		0x348c0, 0x34908,
+		0x34910, 0x349b8,
+		0x34a00, 0x34a04,
+		0x34a0c, 0x34a14,
+		0x34a1c, 0x34a2c,
+		0x34a44, 0x34a50,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
+		0x34d00, 0x34d14,
+		0x34d1c, 0x34d3c,
+		0x34d44, 0x34d4c,
+		0x34d54, 0x34d74,
+		0x34d7c, 0x34d7c,
+		0x34de0, 0x34de0,
+		0x34e00, 0x34ed4,
+		0x34f00, 0x34fa4,
+		0x34fc0, 0x34fc4,
+		0x35000, 0x35004,
+		0x35080, 0x350fc,
+		0x35208, 0x35220,
+		0x3523c, 0x35254,
+		0x35300, 0x35300,
+		0x35308, 0x3531c,
+		0x35338, 0x3533c,
+		0x35380, 0x35380,
+		0x35388, 0x353a8,
+		0x353b4, 0x353b4,
+		0x35400, 0x35420,
+		0x35438, 0x3543c,
+		0x35480, 0x35480,
+		0x354a8, 0x354a8,
+		0x354b0, 0x354b4,
+		0x354c8, 0x354d4,
+		0x35a40, 0x35a4c,
+		0x35af0, 0x35b20,
+		0x35b38, 0x35b3c,
+		0x35b80, 0x35b80,
+		0x35ba8, 0x35ba8,
+		0x35bb0, 0x35bb4,
+		0x35bc8, 0x35bd4,
+		0x36140, 0x3618c,
+		0x361f0, 0x361f4,
+		0x36200, 0x36200,
+		0x36218, 0x36218,
+		0x36400, 0x36400,
+		0x36408, 0x3641c,
+		0x36618, 0x36620,
+		0x36664, 0x36664,
+		0x366a8, 0x366a8,
+		0x366ec, 0x366ec,
+		0x36a00, 0x36abc,
+		0x36b00, 0x36b38,
+		0x36b40, 0x36b58,
+		0x36b60, 0x36b78,
+		0x36c00, 0x36c00,
+		0x36c08, 0x36c3c,
+		0x36e00, 0x36e2c,
+		0x36f00, 0x36f2c,
+		0x37000, 0x3702c,
+		0x37034, 0x37050,
+		0x37058, 0x37058,
+		0x37060, 0x3708c,
+		0x3709c, 0x370ac,
+		0x370c0, 0x370c0,
+		0x370c8, 0x370d0,
+		0x370d8, 0x370e0,
+		0x370ec, 0x3712c,
+		0x37134, 0x37150,
+		0x37158, 0x37158,
+		0x37160, 0x3718c,
+		0x3719c, 0x371ac,
+		0x371c0, 0x371c0,
+		0x371c8, 0x371d0,
+		0x371d8, 0x371e0,
+		0x371ec, 0x37290,
+		0x37298, 0x372c4,
+		0x372e4, 0x37390,
+		0x37398, 0x373c4,
+		0x373e4, 0x3742c,
+		0x37434, 0x37450,
+		0x37458, 0x37458,
+		0x37460, 0x3748c,
+		0x3749c, 0x374ac,
+		0x374c0, 0x374c0,
+		0x374c8, 0x374d0,
+		0x374d8, 0x374e0,
+		0x374ec, 0x3752c,
+		0x37534, 0x37550,
+		0x37558, 0x37558,
+		0x37560, 0x3758c,
+		0x3759c, 0x375ac,
+		0x375c0, 0x375c0,
+		0x375c8, 0x375d0,
+		0x375d8, 0x375e0,
+		0x375ec, 0x37690,
+		0x37698, 0x376c4,
+		0x376e4, 0x37790,
+		0x37798, 0x377c4,
+		0x377e4, 0x377fc,
+		0x37814, 0x37814,
+		0x37854, 0x37868,
+		0x37880, 0x3788c,
+		0x378c0, 0x378d0,
+		0x378e8, 0x378ec,
+		0x37900, 0x3792c,
+		0x37934, 0x37950,
+		0x37958, 0x37958,
+		0x37960, 0x3798c,
+		0x3799c, 0x379ac,
+		0x379c0, 0x379c0,
+		0x379c8, 0x379d0,
+		0x379d8, 0x379e0,
+		0x379ec, 0x37a90,
+		0x37a98, 0x37ac4,
+		0x37ae4, 0x37b10,
+		0x37b24, 0x37b28,
+		0x37b38, 0x37b50,
+		0x37bf0, 0x37c10,
+		0x37c24, 0x37c28,
+		0x37c38, 0x37c50,
+		0x37cf0, 0x37cfc,
+		0x40040, 0x40040,
+		0x40080, 0x40084,
+		0x40100, 0x40100,
+		0x40140, 0x401bc,
+		0x40200, 0x40214,
+		0x40228, 0x40228,
+		0x40240, 0x40258,
+		0x40280, 0x40280,
+		0x40304, 0x40304,
+		0x40330, 0x4033c,
+		0x41304, 0x413c8,
+		0x413d0, 0x413dc,
+		0x413f0, 0x413f0,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
+		0x41480, 0x414d0,
+		0x44000, 0x4407c,
+		0x440c0, 0x441ac,
+		0x441b4, 0x4427c,
+		0x442c0, 0x443ac,
+		0x443b4, 0x4447c,
+		0x444c0, 0x445ac,
+		0x445b4, 0x4467c,
+		0x446c0, 0x447ac,
+		0x447b4, 0x4487c,
+		0x448c0, 0x449ac,
+		0x449b4, 0x44a7c,
+		0x44ac0, 0x44bac,
+		0x44bb4, 0x44c7c,
+		0x44cc0, 0x44dac,
+		0x44db4, 0x44e7c,
+		0x44ec0, 0x44fac,
+		0x44fb4, 0x4507c,
+		0x450c0, 0x451ac,
+		0x451b4, 0x451fc,
+		0x45800, 0x45804,
+		0x45810, 0x45830,
+		0x45840, 0x45860,
+		0x45868, 0x45868,
+		0x45880, 0x45884,
+		0x458a0, 0x458b0,
+		0x45a00, 0x45a04,
+		0x45a10, 0x45a30,
+		0x45a40, 0x45a60,
+		0x45a68, 0x45a68,
+		0x45a80, 0x45a84,
+		0x45aa0, 0x45ab0,
+		0x460c0, 0x460e4,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
+		0x47200, 0x47250,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
+		0x47600, 0x47618,
+		0x47800, 0x47814,
+		0x47820, 0x4782c,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
+		0x50300, 0x50384,
+		0x50400, 0x50400,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
+		0x50b00, 0x50b84,
+		0x50c00, 0x50c00,
+		0x51000, 0x51020,
+		0x51028, 0x510b0,
+		0x51300, 0x51324,
+	};
+
+	u32 *buf_end = (u32 *)((char *)buf + buf_size);
+	const unsigned int *reg_ranges;
+	int reg_ranges_size, range;
+	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	/* Select the right set of register ranges to dump depending on the
+	 * adapter chip type.
+	 */
+	switch (chip_version) {
+	case CHELSIO_T4:
+		reg_ranges = t4_reg_ranges;
+		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
+		break;
+
+	case CHELSIO_T5:
+		reg_ranges = t5_reg_ranges;
+		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+		break;
+
+	case CHELSIO_T6:
+		reg_ranges = t6_reg_ranges;
+		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+		break;
+
+	default:
+		dev_err(adap->pdev_dev,
+			"Unsupported chip version %d\n", chip_version);
+		return;
+	}
+
+	/* Clear the register buffer and insert the appropriate register
+	 * values selected by the above register ranges.
+	 */
+	memset(buf, 0, buf_size);
+	for (range = 0; range < reg_ranges_size; range += 2) {
+		unsigned int reg = reg_ranges[range];
+		unsigned int last_reg = reg_ranges[range + 1];
+		u32 *bufp = (u32 *)((char *)buf + reg);
+
+		/* Iterate across the register range filling in the register
+		 * buffer but don't write past the end of the register buffer.
+		 */
+		while (reg <= last_reg && bufp < buf_end) {
+			*bufp++ = t4_read_reg(adap, reg);
+			reg += sizeof(u32);
+		}
+	}
+}
+
+#define EEPROM_STAT_ADDR   0x7bfc
+#define VPD_BASE           0x400
+#define VPD_BASE_OLD       0
+#define VPD_LEN            1024
+#define CHELSIO_VPD_UNIQUE_ID 0x82
+
+/**
+ *	t4_seeprom_wp - enable/disable EEPROM write protection
+ *	@adapter: the adapter
+ *	@enable: whether to enable or disable write protection
+ *
+ *	Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, bool enable)
+{
+	unsigned int v = enable ? 0xc : 0;
+	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM.
+ */
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	int i, ret = 0, addr;
+	int ec, sn, pn, na;
+	u8 *vpd, csum;
+	unsigned int vpdr_len, kw_offset, id_len;
+
+	vpd = vmalloc(VPD_LEN);
+	if (!vpd)
+		return -ENOMEM;
+
+	/* Card information normally starts at VPD_BASE but early cards had
+	 * it at 0.
+	 */
+	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
+	if (ret < 0)
+		goto out;
+
+	/* The VPD shall have a unique identifier specified by the PCI SIG.
+	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+	 * is expected to automatically put this entry at the
+	 * beginning of the VPD.
+	 */
+	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
+
+	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
+	if (ret < 0)
+		goto out;
+
+	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
+		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	id_len = pci_vpd_lrdt_size(vpd);
+	if (id_len > ID_LEN)
+		id_len = ID_LEN;
+
+	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+	if (i < 0) {
+		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
+	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
+	if (vpdr_len + kw_offset > VPD_LEN) {
+		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+#define FIND_VPD_KW(var, name) do { \
+	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
+	if (var < 0) { \
+		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
+		ret = -EINVAL; \
+		goto out; \
+	} \
+	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
+} while (0)
+
+	FIND_VPD_KW(i, "RV");
+	for (csum = 0; i >= 0; i--)
+		csum += vpd[i];
+
+	if (csum) {
+		dev_err(adapter->pdev_dev,
+			"corrupted VPD EEPROM, actual csum %u\n", csum);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	FIND_VPD_KW(ec, "EC");
+	FIND_VPD_KW(sn, "SN");
+	FIND_VPD_KW(pn, "PN");
+	FIND_VPD_KW(na, "NA");
+#undef FIND_VPD_KW
+
+	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
+	strim(p->id);
+	memcpy(p->ec, vpd + ec, EC_LEN);
+	strim(p->ec);
+	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
+	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+	strim(p->sn);
+	i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
+	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+	strim(p->pn);
+	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+	strim((char *)p->na);
+
+out:
+	vfree(vpd);
+	return ret;
+}
+
+/**
+ *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM and retrieves the Core
+ *	Clock.  This can only be called after a connection to the firmware
+ *	is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	u32 cclk_param, cclk_val;
+	int ret;
+
+	/* Grab the raw VPD parameters.
+	 */
+	ret = t4_get_raw_vpd_params(adapter, p);
+	if (ret)
+		return ret;
+
+	/* Ask firmware for the Core Clock since it knows how to translate the
+	 * Reference Clock ('V2') VPD field into a Core Clock value ...
+	 */
+	cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
+	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+			      1, &cclk_param, &cclk_val);
+
+	if (ret)
+		return ret;
+	p->cclk = cclk_val;
+
+	return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+	SF_ATTEMPTS = 10,             /* max retries for SF operations */
+
+	/* flash command opcodes */
+	SF_PROG_PAGE    = 2,          /* program page */
+	SF_WR_DISABLE   = 4,          /* disable writes */
+	SF_RD_STATUS    = 5,          /* read status register */
+	SF_WR_ENABLE    = 6,          /* enable writes */
+	SF_RD_DATA_FAST = 0xb,        /* read flash */
+	SF_RD_ID        = 0x9f,       /* read ID */
+	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
+
+	FW_MAX_SIZE = 16 * SF_SEC_SIZE,
+};
+
+/**
+ *	sf1_read - read data from the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to read
+ *	@cont: whether another operation will be chained
+ *	@lock: whether to lock SF for PL access only
+ *	@valp: where to store the read data
+ *
+ *	Reads up to 4 bytes of data from the serial flash.  The location of
+ *	the read needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		    int lock, u32 *valp)
+{
+	int ret;
+
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
+		return -EBUSY;
+	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
+	ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
+	if (!ret)
+		*valp = t4_read_reg(adapter, SF_DATA_A);
+	return ret;
+}
+
+/**
+ *	sf1_write - write data to the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to write
+ *	@cont: whether another operation will be chained
+ *	@lock: whether to lock SF for PL access only
+ *	@val: value to write
+ *
+ *	Writes up to 4 bytes of data to the serial flash.  The location of
+ *	the write needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		     int lock, u32 val)
+{
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
+		return -EBUSY;
+	t4_write_reg(adapter, SF_DATA_A, val);
+	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
+	return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ *	flash_wait_op - wait for a flash operation to complete
+ *	@adapter: the adapter
+ *	@attempts: max number of polls of the status register
+ *	@delay: delay between polls in ms
+ *
+ *	Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+	int ret;
+	u32 status;
+
+	while (1) {
+		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
+		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
+			return ret;
+		if (!(status & 1))
+			return 0;
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			msleep(delay);
+	}
+}
+
+/**
+ *	t4_read_flash - read words from serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address for the read
+ *	@nwords: how many 32-bit words to read
+ *	@data: where to store the read data
+ *	@byte_oriented: whether to store data as bytes or as words
+ *
+ *	Read the specified number of 32-bit words from the serial flash.
+ *	If @byte_oriented is set the read data is stored as a byte array
+ *	(i.e., big-endian), otherwise as 32-bit words in the platform's
+ *	natural endianness.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+		  unsigned int nwords, u32 *data, int byte_oriented)
+{
+	int ret;
+
+	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
+		return -EINVAL;
+
+	addr = swab32(addr) | SF_RD_DATA_FAST;
+
+	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
+	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
+		return ret;
+
+	for ( ; nwords; nwords--, data++) {
+		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+		if (nwords == 1)
+			t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
+		if (ret)
+			return ret;
+		if (byte_oriented)
+			*data = (__force __u32)(cpu_to_be32(*data));
+	}
+	return 0;
+}
+
+/**
+ *	t4_write_flash - write up to a page of data to the serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address to write
+ *	@n: length of data to write in bytes
+ *	@data: the data to write
+ *
+ *	Writes up to a page of data (256 bytes) to the serial flash starting
+ *	at the given address.  All the data must be written to the same page.
+ */
+static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+			  unsigned int n, const u8 *data)
+{
+	int ret;
+	u32 buf[64];
+	unsigned int i, c, left, val, offset = addr & 0xff;
+
+	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
+		return -EINVAL;
+
+	val = swab32(addr) | SF_PROG_PAGE;
+
+	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+		goto unlock;
+
+	for (left = n; left; left -= c) {
+		c = min(left, 4U);
+		for (val = 0, i = 0; i < c; ++i)
+			val = (val << 8) + *data++;
+
+		ret = sf1_write(adapter, c, c != left, 1, val);
+		if (ret)
+			goto unlock;
+	}
+	ret = flash_wait_op(adapter, 8, 1);
+	if (ret)
+		goto unlock;
+
+	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
+
+	/* Read the page to verify the write succeeded */
+	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+	if (ret)
+		return ret;
+
+	if (memcmp(data - n, (u8 *)buf + offset, n)) {
+		dev_err(adapter->pdev_dev,
+			"failed to correctly write the flash page at %#x\n",
+			addr);
+		return -EIO;
+	}
+	return 0;
+
+unlock:
+	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
+	return ret;
+}
+
+/**
+ *	t4_get_fw_version - read the firmware version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW version from flash.
+ */
+int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+	return t4_read_flash(adapter, FLASH_FW_START +
+			     offsetof(struct fw_hdr, fw_ver), 1,
+			     vers, 0);
+}
+
+/**
+ *	t4_get_tp_version - read the TP microcode version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the TP microcode version from flash.
+ */
+int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+	return t4_read_flash(adapter, FLASH_FW_START +
+			     offsetof(struct fw_hdr, tp_microcode_ver),
+			     1, vers, 0);
+}
+
+/**
+ *	t4_get_exprom_version - return the Expansion ROM version (if any)
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the Expansion ROM header from FLASH and returns the version
+ *	number (if present) through the @vers return value pointer.  We return
+ *	this in the Firmware Version Format since it's convenient.  Return
+ *	0 on success, -ENOENT if no Expansion ROM is present.
+ */
+int t4_get_exprom_version(struct adapter *adap, u32 *vers)
+{
+	struct exprom_header {
+		unsigned char hdr_arr[16];	/* must start with 0x55aa */
+		unsigned char hdr_ver[4];	/* Expansion ROM version */
+	} *hdr;
+	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+					   sizeof(u32))];
+	int ret;
+
+	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
+			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
+			    0);
+	if (ret)
+		return ret;
+
+	hdr = (struct exprom_header *)exprom_header_buf;
+	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+		return -ENOENT;
+
+	*vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
+		 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
+		 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
+		 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
+	return 0;
+}
+
+/**
+ *	t4_check_fw_version - check if the FW is supported with this driver
+ *	@adap: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if there's exact match, a negative error if the version could not be
+ *	read or there's a major version mismatch
+ */
+int t4_check_fw_version(struct adapter *adap)
+{
+	int i, ret, major, minor, micro;
+	int exp_major, exp_minor, exp_micro;
+	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+	/* Try multiple times before returning error */
+	for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
+		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+
+	if (ret)
+		return ret;
+
+	major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
+	minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
+	micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
+
+	switch (chip_version) {
+	case CHELSIO_T4:
+		exp_major = T4FW_MIN_VERSION_MAJOR;
+		exp_minor = T4FW_MIN_VERSION_MINOR;
+		exp_micro = T4FW_MIN_VERSION_MICRO;
+		break;
+	case CHELSIO_T5:
+		exp_major = T5FW_MIN_VERSION_MAJOR;
+		exp_minor = T5FW_MIN_VERSION_MINOR;
+		exp_micro = T5FW_MIN_VERSION_MICRO;
+		break;
+	case CHELSIO_T6:
+		exp_major = T6FW_MIN_VERSION_MAJOR;
+		exp_minor = T6FW_MIN_VERSION_MINOR;
+		exp_micro = T6FW_MIN_VERSION_MICRO;
+		break;
+	default:
+		dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
+			adap->chip);
+		return -EINVAL;
+	}
+
+	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
+	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
+		dev_err(adap->pdev_dev,
+			"Card has firmware version %u.%u.%u, minimum "
+			"supported firmware is %u.%u.%u.\n", major, minor,
+			micro, exp_major, exp_minor, exp_micro);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
+ */
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
+{
+
+	/* short circuit if it's the exact same firmware version */
+	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+		return 1;
+
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+		return 1;
+#undef SAME_INTF
+
+	return 0;
+}
+
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
+				int k, int c)
+{
+	const char *reason;
+
+	if (!card_fw_usable) {
+		reason = "incompatible or unusable";
+		goto install;
+	}
+
+	if (k > c) {
+		reason = "older than the version supported with this driver";
+		goto install;
+	}
+
+	return 0;
+
+install:
+	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
+		"installing firmware %u.%u.%u.%u on card.\n",
+		FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+		FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+		FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+		FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+
+	return 1;
+}
+
+int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
+	       const u8 *fw_data, unsigned int fw_size,
+	       struct fw_hdr *card_fw, enum dev_state state,
+	       int *reset)
+{
+	int ret, card_fw_usable, fs_fw_usable;
+	const struct fw_hdr *fs_fw;
+	const struct fw_hdr *drv_fw;
+
+	drv_fw = &fw_info->fw_hdr;
+
+	/* Read the header of the firmware on the card */
+	ret = -t4_read_flash(adap, FLASH_FW_START,
+			    sizeof(*card_fw) / sizeof(uint32_t),
+			    (uint32_t *)card_fw, 1);
+	if (ret == 0) {
+		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+	} else {
+		dev_err(adap->pdev_dev,
+			"Unable to read card's firmware header: %d\n", ret);
+		card_fw_usable = 0;
+	}
+
+	if (fw_data != NULL) {
+		fs_fw = (const void *)fw_data;
+		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+	} else {
+		fs_fw = NULL;
+		fs_fw_usable = 0;
+	}
+
+	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+		/* Common case: the firmware on the card is an exact match and
+		 * the filesystem one is an exact match too, or the filesystem
+		 * one is absent/incompatible.
+		 */
+	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
+		   should_install_fs_fw(adap, card_fw_usable,
+					be32_to_cpu(fs_fw->fw_ver),
+					be32_to_cpu(card_fw->fw_ver))) {
+		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
+				     fw_size, 0);
+		if (ret != 0) {
+			dev_err(adap->pdev_dev,
+				"failed to install firmware: %d\n", ret);
+			goto bye;
+		}
+
+		/* Installed successfully, update the cached header too. */
+		*card_fw = *fs_fw;
+		card_fw_usable = 1;
+		*reset = 0;	/* already reset as part of load_fw */
+	}
+
+	if (!card_fw_usable) {
+		uint32_t d, c, k;
+
+		d = be32_to_cpu(drv_fw->fw_ver);
+		c = be32_to_cpu(card_fw->fw_ver);
+		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
+			"chip state %d, "
+			"driver compiled with %d.%d.%d.%d, "
+			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+			state,
+			FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+			FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+			FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+			FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+			FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+			FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+		ret = EINVAL;
+		goto bye;
+	}
+
+	/* We're using whatever's on the card and it's known to be good. */
+	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
+	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+	return ret;
+}
+
+/**
+ *	t4_flash_erase_sectors - erase a range of flash sectors
+ *	@adapter: the adapter
+ *	@start: the first sector to erase
+ *	@end: the last sector to erase
+ *
+ *	Erases the sectors in the given inclusive range.
+ */
+static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+	int ret = 0;
+
+	if (end >= adapter->params.sf_nsec)
+		return -EINVAL;
+
+	while (start <= end) {
+		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+		    (ret = sf1_write(adapter, 4, 0, 1,
+				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
+		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
+			dev_err(adapter->pdev_dev,
+				"erase of flash sector %d failed, error %d\n",
+				start, ret);
+			break;
+		}
+		start++;
+	}
+	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
+	return ret;
+}
+
+/**
+ *	t4_flash_cfg_addr - return the address of the flash configuration file
+ *	@adapter: the adapter
+ *
+ *	Return the address within the flash where the Firmware Configuration
+ *	File is stored.
+ */
+unsigned int t4_flash_cfg_addr(struct adapter *adapter)
+{
+	if (adapter->params.sf_size == 0x100000)
+		return FLASH_FPGA_CFG_START;
+	else
+		return FLASH_CFG_START;
+}
+
+/* Return TRUE if the specified firmware matches the adapter.  I.e. T4
+ * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
+ * and emit an error message for mismatched firmware to save our caller the
+ * effort ...
+ */
+static bool t4_fw_matches_chip(const struct adapter *adap,
+			       const struct fw_hdr *hdr)
+{
+	/* The expression below will return FALSE for any unsupported adapter
+	 * which will keep us "honest" in the future ...
+	 */
+	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
+	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
+		return true;
+
+	dev_err(adap->pdev_dev,
+		"FW image (%d) is not suitable for this adapter (%d)\n",
+		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
+	return false;
+}
+
+/**
+ *	t4_load_fw - download firmware
+ *	@adap: the adapter
+ *	@fw_data: the firmware image to write
+ *	@size: image size
+ *
+ *	Write the supplied firmware image to the card's serial flash.
+ */
+int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+{
+	u32 csum;
+	int ret, addr;
+	unsigned int i;
+	u8 first_page[SF_PAGE_SIZE];
+	const __be32 *p = (const __be32 *)fw_data;
+	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+	unsigned int fw_img_start = adap->params.sf_fw_start;
+	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+
+	if (!size) {
+		dev_err(adap->pdev_dev, "FW image has no data\n");
+		return -EINVAL;
+	}
+	if (size & 511) {
+		dev_err(adap->pdev_dev,
+			"FW image size not multiple of 512 bytes\n");
+		return -EINVAL;
+	}
+	if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
+		dev_err(adap->pdev_dev,
+			"FW image size differs from size in FW header\n");
+		return -EINVAL;
+	}
+	if (size > FW_MAX_SIZE) {
+		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
+			FW_MAX_SIZE);
+		return -EFBIG;
+	}
+	if (!t4_fw_matches_chip(adap, hdr))
+		return -EINVAL;
+
+	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+		csum += be32_to_cpu(p[i]);
+
+	if (csum != 0xffffffff) {
+		dev_err(adap->pdev_dev,
+			"corrupted firmware image, checksum %#x\n", csum);
+		return -EINVAL;
+	}
+
+	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
+	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
+	if (ret)
+		goto out;
+
+	/*
+	 * We write the correct version at the end so the driver can see a bad
+	 * version if the FW write fails.  Start by writing a copy of the
+	 * first page with a bad version.
+	 */
+	memcpy(first_page, fw_data, SF_PAGE_SIZE);
+	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
+	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+	if (ret)
+		goto out;
+
+	addr = fw_img_start;
+	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+		addr += SF_PAGE_SIZE;
+		fw_data += SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+		if (ret)
+			goto out;
+	}
+
+	ret = t4_write_flash(adap,
+			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
+			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+out:
+	if (ret)
+		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
+			ret);
+	else
+		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+	return ret;
+}
+
+/**
+ *	t4_phy_fw_ver - return current PHY firmware version
+ *	@adap: the adapter
+ *	@phy_fw_ver: return value buffer for PHY firmware version
+ *
+ *	Returns the current version of external PHY firmware on the
+ *	adapter.
+ */
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+{
+	u32 param, val;
+	int ret;
+
+	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+			      &param, &val);
+	if (ret < 0)
+		return ret;
+	*phy_fw_ver = val;
+	return 0;
+}
+
+/**
+ *	t4_load_phy_fw - download port PHY firmware
+ *	@adap: the adapter
+ *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
+ *	@win_lock: the lock to use to guard the memory copy
+ *	@phy_fw_version: function to check PHY firmware versions
+ *	@phy_fw_data: the PHY firmware image to write
+ *	@phy_fw_size: image size
+ *
+ *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
+ *	@phy_fw_version is supplied, then it will be used to determine if
+ *	it's necessary to perform the transfer by comparing the version
+ *	of any existing adapter PHY firmware with that of the passed in
+ *	PHY firmware image.  If @win_lock is non-NULL then it will be used
+ *	around the call to t4_memory_rw() which transfers the PHY firmware
+ *	to the adapter.
+ *
+ *	A negative error number will be returned if an error occurs.  If
+ *	version number support is available and there's no need to upgrade
+ *	the firmware, 0 will be returned.  If firmware is successfully
+ *	transferred to the adapter, 1 will be retured.
+ *
+ *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
+ *	a result, a RESET of the adapter would cause that RAM to lose its
+ *	contents.  Thus, loading PHY firmware on such adapters must happen
+ *	after any FW_RESET_CMDs ...
+ */
+int t4_load_phy_fw(struct adapter *adap,
+		   int win, spinlock_t *win_lock,
+		   int (*phy_fw_version)(const u8 *, size_t),
+		   const u8 *phy_fw_data, size_t phy_fw_size)
+{
+	unsigned long mtype = 0, maddr = 0;
+	u32 param, val;
+	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
+	int ret;
+
+	/* If we have version number support, then check to see if the adapter
+	 * already has up-to-date PHY firmware loaded.
+	 */
+	 if (phy_fw_version) {
+		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
+		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+		if (ret < 0)
+			return ret;
+
+		if (cur_phy_fw_ver >= new_phy_fw_vers) {
+			CH_WARN(adap, "PHY Firmware already up-to-date, "
+				"version %#x\n", cur_phy_fw_ver);
+			return 0;
+		}
+	}
+
+	/* Ask the firmware where it wants us to copy the PHY firmware image.
+	 * The size of the file requires a special version of the READ coommand
+	 * which will pass the file size via the values field in PARAMS_CMD and
+	 * retrieve the return value from firmware and place it in the same
+	 * buffer values
+	 */
+	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+	val = phy_fw_size;
+	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
+				 &param, &val, 1);
+	if (ret < 0)
+		return ret;
+	mtype = val >> 8;
+	maddr = (val & 0xff) << 16;
+
+	/* Copy the supplied PHY Firmware image to the adapter memory location
+	 * allocated by the adapter firmware.
+	 */
+	if (win_lock)
+		spin_lock_bh(win_lock);
+	ret = t4_memory_rw(adap, win, mtype, maddr,
+			   phy_fw_size, (__be32 *)phy_fw_data,
+			   T4_MEMORY_WRITE);
+	if (win_lock)
+		spin_unlock_bh(win_lock);
+	if (ret)
+		return ret;
+
+	/* Tell the firmware that the PHY firmware image has been written to
+	 * RAM and it can now start copying it over to the PHYs.  The chip
+	 * firmware will RESET the affected PHYs as part of this operation
+	 * leaving them running the new PHY firmware image.
+	 */
+	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+				    &param, &val, 30000);
+
+	/* If we have version number support, then check to see that the new
+	 * firmware got loaded properly.
+	 */
+	if (phy_fw_version) {
+		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+		if (ret < 0)
+			return ret;
+
+		if (cur_phy_fw_ver != new_phy_fw_vers) {
+			CH_WARN(adap, "PHY Firmware did not update: "
+				"version on adapter %#x, "
+				"version flashed %#x\n",
+				cur_phy_fw_ver, new_phy_fw_vers);
+			return -ENXIO;
+		}
+	}
+
+	return 1;
+}
+
+/**
+ *	t4_fwcache - firmware cache operation
+ *	@adap: the adapter
+ *	@op  : the operation (flush or flush and invalidate)
+ */
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
+{
+	struct fw_params_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn =
+		cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+			    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+			    FW_PARAMS_CMD_PFN_V(adap->pf) |
+			    FW_PARAMS_CMD_VFN_V(0));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.param[0].mnem =
+		cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+			    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
+	c.param[0].val = (__force __be32)op;
+
+	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
+}
+
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+			unsigned int *pif_req_wrptr,
+			unsigned int *pif_rsp_wrptr)
+{
+	int i, j;
+	u32 cfg, val, req, rsp;
+
+	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+	if (cfg & LADBGEN_F)
+		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+	val = t4_read_reg(adap, CIM_DEBUGSTS_A);
+	req = POLADBGWRPTR_G(val);
+	rsp = PILADBGWRPTR_G(val);
+	if (pif_req_wrptr)
+		*pif_req_wrptr = req;
+	if (pif_rsp_wrptr)
+		*pif_rsp_wrptr = rsp;
+
+	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+		for (j = 0; j < 6; j++) {
+			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
+				     PILADBGRDPTR_V(rsp));
+			*pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
+			*pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
+			req++;
+			rsp++;
+		}
+		req = (req + 2) & POLADBGRDPTR_M;
+		rsp = (rsp + 2) & PILADBGRDPTR_M;
+	}
+	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+	u32 cfg;
+	int i, j, idx;
+
+	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+	if (cfg & LADBGEN_F)
+		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+	for (i = 0; i < CIM_MALA_SIZE; i++) {
+		for (j = 0; j < 5; j++) {
+			idx = 8 * i + j;
+			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
+				     PILADBGRDPTR_V(idx));
+			*ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
+			*ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
+		}
+	}
+	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < 8; i++) {
+		u32 *p = la_buf + i;
+
+		t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
+		j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
+		t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
+		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
+			*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
+	}
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+		     FW_PORT_CAP_ANEG)
+
+/**
+ *	t4_link_l1cfg - apply link configuration to MAC/PHY
+ *	@phy: the PHY to setup
+ *	@mac: the MAC to setup
+ *	@lc: the requested link configuration
+ *
+ *	Set up a port's MAC and PHY according to a desired link configuration.
+ *	- If the PHY can auto-negotiate first decide what to advertise, then
+ *	  enable/disable auto-negotiation as desired, and reset.
+ *	- If the PHY does not auto-negotiate just reset it.
+ *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ *	  otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+		  struct link_config *lc)
+{
+	struct fw_port_cmd c;
+	unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
+
+	lc->link_ok = 0;
+	if (lc->requested_fc & PAUSE_RX)
+		fc |= FW_PORT_CAP_FC_RX;
+	if (lc->requested_fc & PAUSE_TX)
+		fc |= FW_PORT_CAP_FC_TX;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				     FW_PORT_CMD_PORTID_V(port));
+	c.action_to_len16 =
+		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+			    FW_LEN16(c));
+
+	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+		c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+					     fc);
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+	} else if (lc->autoneg == AUTONEG_DISABLE) {
+		c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+	} else
+		c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_restart_aneg - restart autonegotiation
+ *	@adap: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@port: the port id
+ *
+ *	Restarts autonegotiation for the selected port.
+ */
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
+{
+	struct fw_port_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				     FW_PORT_CMD_PORTID_V(port));
+	c.action_to_len16 =
+		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+			    FW_LEN16(c));
+	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+typedef void (*int_handler_t)(struct adapter *adap);
+
+struct intr_info {
+	unsigned int mask;       /* bits to check in interrupt status */
+	const char *msg;         /* message to print or NULL */
+	short stat_idx;          /* stat counter to increment or -1 */
+	unsigned short fatal;    /* whether the condition reported is fatal */
+	int_handler_t int_handler; /* platform-specific int handler */
+};
+
+/**
+ *	t4_handle_intr_status - table driven interrupt handler
+ *	@adapter: the adapter that generated the interrupt
+ *	@reg: the interrupt status register to process
+ *	@acts: table of interrupt actions
+ *
+ *	A table driven interrupt handler that applies a set of masks to an
+ *	interrupt status word and performs the corresponding actions if the
+ *	interrupts described by the mask have occurred.  The actions include
+ *	optionally emitting a warning or alert message.  The table is terminated
+ *	by an entry specifying mask 0.  Returns the number of fatal interrupt
+ *	conditions.
+ */
+static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
+				 const struct intr_info *acts)
+{
+	int fatal = 0;
+	unsigned int mask = 0;
+	unsigned int status = t4_read_reg(adapter, reg);
+
+	for ( ; acts->mask; ++acts) {
+		if (!(status & acts->mask))
+			continue;
+		if (acts->fatal) {
+			fatal++;
+			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+				  status & acts->mask);
+		} else if (acts->msg && printk_ratelimit())
+			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+				 status & acts->mask);
+		if (acts->int_handler)
+			acts->int_handler(adapter);
+		mask |= acts->mask;
+	}
+	status &= mask;
+	if (status)                           /* clear processed interrupts */
+		t4_write_reg(adapter, reg, status);
+	return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info sysbus_intr_info[] = {
+		{ RNPP_F, "RXNP array parity error", -1, 1 },
+		{ RPCP_F, "RXPC array parity error", -1, 1 },
+		{ RCIP_F, "RXCIF array parity error", -1, 1 },
+		{ RCCP_F, "Rx completions control array parity error", -1, 1 },
+		{ RFTP_F, "RXFT array parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info pcie_port_intr_info[] = {
+		{ TPCP_F, "TXPC array parity error", -1, 1 },
+		{ TNPP_F, "TXNP array parity error", -1, 1 },
+		{ TFTP_F, "TXFT array parity error", -1, 1 },
+		{ TCAP_F, "TXCA array parity error", -1, 1 },
+		{ TCIP_F, "TXCIF array parity error", -1, 1 },
+		{ RCAP_F, "RXCA array parity error", -1, 1 },
+		{ OTDD_F, "outbound request TLP discarded", -1, 1 },
+		{ RDPE_F, "Rx data parity error", -1, 1 },
+		{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info pcie_intr_info[] = {
+		{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+		{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+		{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+		{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+		{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+		{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+		{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+		{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
+		{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+		{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+		{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+		{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+		{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+		{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
+		{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
+		{ UNXSPLCPLERR_F, "PCI unexpected split completion error",
+		  -1, 0 },
+		{ 0 }
+	};
+
+	static struct intr_info t5_pcie_intr_info[] = {
+		{ MSTGRPPERR_F, "Master Response Read Queue parity error",
+		  -1, 1 },
+		{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+		{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+		{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
+		  -1, 1 },
+		{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
+		  -1, 1 },
+		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+		{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+		{ DREQWRPERR_F, "PCI DMA channel write request parity error",
+		  -1, 1 },
+		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+		{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
+		{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+		{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+		{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
+		  -1, 1 },
+		{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
+		  -1, 1 },
+		{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+		{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+		{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+		{ READRSPERR_F, "Outbound read error", -1, 0 },
+		{ 0 }
+	};
+
+	int fat;
+
+	if (is_t4(adapter->params.chip))
+		fat = t4_handle_intr_status(adapter,
+				PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
+				sysbus_intr_info) +
+			t4_handle_intr_status(adapter,
+					PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
+					pcie_port_intr_info) +
+			t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
+					      pcie_intr_info);
+	else
+		fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
+					    t5_pcie_intr_info);
+
+	if (fat)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info tp_intr_info[] = {
+		{ 0x3fffffff, "TP parity error", -1, 1 },
+		{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void sge_intr_handler(struct adapter *adapter)
+{
+	u64 v;
+	u32 err;
+
+	static const struct intr_info sge_intr_info[] = {
+		{ ERR_CPL_EXCEED_IQE_SIZE_F,
+		  "SGE received CPL exceeding IQE size", -1, 1 },
+		{ ERR_INVALID_CIDX_INC_F,
+		  "SGE GTS CIDX increment too large", -1, 0 },
+		{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+		{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
+		{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
+		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
+		{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
+		  0 },
+		{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
+		  0 },
+		{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
+		  0 },
+		{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
+		  0 },
+		{ ERR_ING_CTXT_PRIO_F,
+		  "SGE too many priority ingress contexts", -1, 0 },
+		{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+		{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
+		{ 0 }
+	};
+
+	static struct intr_info t4t5_sge_intr_info[] = {
+		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+		{ ERR_EGR_CTXT_PRIO_F,
+		  "SGE too many priority egress contexts", -1, 0 },
+		{ 0 }
+	};
+
+	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
+		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
+	if (v) {
+		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
+				(unsigned long long)v);
+		t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
+		t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
+	}
+
+	v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+		v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+					   t4t5_sge_intr_info);
+
+	err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+	if (err & ERROR_QID_VALID_F) {
+		dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+			ERROR_QID_G(err));
+		if (err & UNCAPTURED_ERROR_F)
+			dev_err(adapter->pdev_dev,
+				"SGE UNCAPTURED_ERROR set (clearing)\n");
+		t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+			     UNCAPTURED_ERROR_F);
+	}
+
+	if (v != 0)
+		t4_fatal_err(adapter);
+}
+
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+		      OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+		      IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info cim_intr_info[] = {
+		{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
+		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+		{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+		{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+		{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+		{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info cim_upintr_info[] = {
+		{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+		{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+		{ ILLWRINT_F, "CIM illegal write", -1, 1 },
+		{ ILLRDINT_F, "CIM illegal read", -1, 1 },
+		{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+		{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+		{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+		{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+		{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+		{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+		{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+		{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+		{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+		{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+		{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+		{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+		{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+		{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+		{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+		{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+		{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+		{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+		{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+		{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+		{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+		{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+		{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+		{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
+		{ 0 }
+	};
+
+	int fat;
+
+	if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
+		t4_report_fw_error(adapter);
+
+	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
+				    cim_intr_info) +
+	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
+				    cim_upintr_info);
+	if (fat)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info ulprx_intr_info[] = {
+		{ 0x1800000, "ULPRX context error", -1, 1 },
+		{ 0x7fffff, "ULPRX parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info ulptx_intr_info[] = {
+		{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
+		  0 },
+		{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
+		  0 },
+		{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
+		  0 },
+		{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
+		  0 },
+		{ 0xfffffff, "ULPTX parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pmtx_intr_info[] = {
+		{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+		{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+		{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+		{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
+		{ PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
+		{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+		{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
+		  -1, 1 },
+		{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+		{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info pmrx_intr_info[] = {
+		{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
+		{ PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
+		{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+		{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
+		  -1, 1 },
+		{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+		{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info cplsw_intr_info[] = {
+		{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+		{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+		{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+		{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+		{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+		{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void le_intr_handler(struct adapter *adap)
+{
+	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+	static const struct intr_info le_intr_info[] = {
+		{ LIPMISS_F, "LE LIP miss", -1, 0 },
+		{ LIP0_F, "LE 0 LIP error", -1, 0 },
+		{ PARITYERR_F, "LE parity error", -1, 1 },
+		{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+		{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
+		{ 0 }
+	};
+
+	static struct intr_info t6_le_intr_info[] = {
+		{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+		{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+		{ TCAMINTPERR_F, "LE parity error", -1, 1 },
+		{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+		{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+				  (chip <= CHELSIO_T5) ?
+				  le_intr_info : t6_le_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+	static const struct intr_info mps_rx_intr_info[] = {
+		{ 0xffffff, "MPS Rx parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info mps_tx_intr_info[] = {
+		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+		  -1, 1 },
+		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+		  -1, 1 },
+		{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
+		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info mps_trc_intr_info[] = {
+		{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+		{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+		  -1, 1 },
+		{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info mps_stat_sram_intr_info[] = {
+		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info mps_stat_tx_intr_info[] = {
+		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info mps_stat_rx_intr_info[] = {
+		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+	static const struct intr_info mps_cls_intr_info[] = {
+		{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+		{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+		{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
+				    mps_rx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
+				    mps_tx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
+				    mps_trc_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
+				    mps_stat_sram_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
+				    mps_stat_tx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
+				    mps_stat_rx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
+				    mps_cls_intr_info);
+
+	t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
+	t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
+	if (fat)
+		t4_fatal_err(adapter);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+		      ECC_UE_INT_CAUSE_F)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void mem_intr_handler(struct adapter *adapter, int idx)
+{
+	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
+
+	unsigned int addr, cnt_addr, v;
+
+	if (idx <= MEM_EDC1) {
+		addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+		cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
+	} else if (idx == MEM_MC) {
+		if (is_t4(adapter->params.chip)) {
+			addr = MC_INT_CAUSE_A;
+			cnt_addr = MC_ECC_STATUS_A;
+		} else {
+			addr = MC_P_INT_CAUSE_A;
+			cnt_addr = MC_P_ECC_STATUS_A;
+		}
+	} else {
+		addr = MC_REG(MC_P_INT_CAUSE_A, 1);
+		cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
+	}
+
+	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
+	if (v & PERR_INT_CAUSE_F)
+		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
+			  name[idx]);
+	if (v & ECC_CE_INT_CAUSE_F) {
+		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
+
+		t4_edc_err_read(adapter, idx);
+
+		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
+		if (printk_ratelimit())
+			dev_warn(adapter->pdev_dev,
+				 "%u %s correctable ECC data error%s\n",
+				 cnt, name[idx], cnt > 1 ? "s" : "");
+	}
+	if (v & ECC_UE_INT_CAUSE_F)
+		dev_alert(adapter->pdev_dev,
+			  "%s uncorrectable ECC data error\n", name[idx]);
+
+	t4_write_reg(adapter, addr, v);
+	if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void ma_intr_handler(struct adapter *adap)
+{
+	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
+
+	if (status & MEM_PERR_INT_CAUSE_F) {
+		dev_alert(adap->pdev_dev,
+			  "MA parity error, parity status %#x\n",
+			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
+		if (is_t5(adap->params.chip))
+			dev_alert(adap->pdev_dev,
+				  "MA parity error, parity status %#x\n",
+				  t4_read_reg(adap,
+					      MA_PARITY_ERROR_STATUS2_A));
+	}
+	if (status & MEM_WRAP_INT_CAUSE_F) {
+		v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
+		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
+			  "client %u to address %#x\n",
+			  MEM_WRAP_CLIENT_NUM_G(v),
+			  MEM_WRAP_ADDRESS_G(v) << 4);
+	}
+	t4_write_reg(adap, MA_INT_CAUSE_A, status);
+	t4_fatal_err(adap);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void smb_intr_handler(struct adapter *adap)
+{
+	static const struct intr_info smb_intr_info[] = {
+		{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+		{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+		{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void ncsi_intr_handler(struct adapter *adap)
+{
+	static const struct intr_info ncsi_intr_info[] = {
+		{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+		{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+		{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+		{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void xgmac_intr_handler(struct adapter *adap, int port)
+{
+	u32 v, int_cause_reg;
+
+	if (is_t4(adap->params.chip))
+		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
+	else
+		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
+
+	v = t4_read_reg(adap, int_cause_reg);
+
+	v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
+	if (!v)
+		return;
+
+	if (v & TXFIFO_PRTY_ERR_F)
+		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
+			  port);
+	if (v & RXFIFO_PRTY_ERR_F)
+		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
+			  port);
+	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
+	t4_fatal_err(adap);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void pl_intr_handler(struct adapter *adap)
+{
+	static const struct intr_info pl_intr_info[] = {
+		{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
+		{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
+		t4_fatal_err(adap);
+}
+
+#define PF_INTR_MASK (PFSW_F)
+#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
+		EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
+		CPL_SWITCH_F | SGE_F | ULP_TX_F)
+
+/**
+ *	t4_slow_intr_handler - control path interrupt handler
+ *	@adapter: the adapter
+ *
+ *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
+ *	The designation 'slow' is because it involves register reads, while
+ *	data interrupts typically don't involve any MMIOs.
+ */
+int t4_slow_intr_handler(struct adapter *adapter)
+{
+	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
+
+	if (!(cause & GLBL_INTR_MASK))
+		return 0;
+	if (cause & CIM_F)
+		cim_intr_handler(adapter);
+	if (cause & MPS_F)
+		mps_intr_handler(adapter);
+	if (cause & NCSI_F)
+		ncsi_intr_handler(adapter);
+	if (cause & PL_F)
+		pl_intr_handler(adapter);
+	if (cause & SMB_F)
+		smb_intr_handler(adapter);
+	if (cause & XGMAC0_F)
+		xgmac_intr_handler(adapter, 0);
+	if (cause & XGMAC1_F)
+		xgmac_intr_handler(adapter, 1);
+	if (cause & XGMAC_KR0_F)
+		xgmac_intr_handler(adapter, 2);
+	if (cause & XGMAC_KR1_F)
+		xgmac_intr_handler(adapter, 3);
+	if (cause & PCIE_F)
+		pcie_intr_handler(adapter);
+	if (cause & MC_F)
+		mem_intr_handler(adapter, MEM_MC);
+	if (is_t5(adapter->params.chip) && (cause & MC1_F))
+		mem_intr_handler(adapter, MEM_MC1);
+	if (cause & EDC0_F)
+		mem_intr_handler(adapter, MEM_EDC0);
+	if (cause & EDC1_F)
+		mem_intr_handler(adapter, MEM_EDC1);
+	if (cause & LE_F)
+		le_intr_handler(adapter);
+	if (cause & TP_F)
+		tp_intr_handler(adapter);
+	if (cause & MA_F)
+		ma_intr_handler(adapter);
+	if (cause & PM_TX_F)
+		pmtx_intr_handler(adapter);
+	if (cause & PM_RX_F)
+		pmrx_intr_handler(adapter);
+	if (cause & ULP_RX_F)
+		ulprx_intr_handler(adapter);
+	if (cause & CPL_SWITCH_F)
+		cplsw_intr_handler(adapter);
+	if (cause & SGE_F)
+		sge_intr_handler(adapter);
+	if (cause & ULP_TX_F)
+		ulptx_intr_handler(adapter);
+
+	/* Clear the interrupts just processed for which we are the master. */
+	t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+	(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
+	return 1;
+}
+
+/**
+ *	t4_intr_enable - enable interrupts
+ *	@adapter: the adapter whose interrupts should be enabled
+ *
+ *	Enable PF-specific interrupts for the calling function and the top-level
+ *	interrupt concentrator for global interrupts.  Interrupts are already
+ *	enabled at each module,	here we just enable the roots of the interrupt
+ *	hierarchies.
+ *
+ *	Note: this function should be called only when the driver manages
+ *	non PF-specific interrupts from the various HW modules.  Only one PCI
+ *	function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+	u32 val = 0;
+	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
+	t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
+		     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
+		     ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
+		     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+		     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+		     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+		     DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
+	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
+	t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
+}
+
+/**
+ *	t4_intr_disable - disable interrupts
+ *	@adapter: the adapter whose interrupts should be disabled
+ *
+ *	Disable interrupts.  We only disable the top-level interrupt
+ *	concentrators.  The caller must be a PCI function managing global
+ *	interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
+	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
+}
+
+/**
+ *	hash_mac_addr - return the hash value of a MAC address
+ *	@addr: the 48-bit Ethernet MAC address
+ *
+ *	Hashes a MAC address according to the hash function used by HW inexact
+ *	(hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+	a ^= b;
+	a ^= (a >> 12);
+	a ^= (a >> 6);
+	return a & 0x3f;
+}
+
+/**
+ *	t4_config_rss_range - configure a portion of the RSS mapping table
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@viid: virtual interface whose RSS subtable is to be written
+ *	@start: start entry in the table to write
+ *	@n: how many table entries to write
+ *	@rspq: values for the response queue lookup table
+ *	@nrspq: number of values in @rspq
+ *
+ *	Programs the selected part of the VI's RSS mapping table with the
+ *	provided values.  If @nrspq < @n the supplied values are used repeatedly
+ *	until the full table range is populated.
+ *
+ *	The caller must ensure the values in @rspq are in the range allowed for
+ *	@viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+			int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+	int ret;
+	const u16 *rsp = rspq;
+	const u16 *rsp_end = rspq + nrspq;
+	struct fw_rss_ind_tbl_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+			       FW_RSS_IND_TBL_CMD_VIID_V(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
+	while (n > 0) {
+		int nq = min(n, 32);
+		__be32 *qp = &cmd.iq0_to_iq2;
+
+		cmd.niqid = cpu_to_be16(nq);
+		cmd.startidx = cpu_to_be16(start);
+
+		start += nq;
+		n -= nq;
+
+		while (nq > 0) {
+			unsigned int v;
+
+			v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+			v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+			v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+
+			*qp++ = cpu_to_be32(v);
+			nq -= 3;
+		}
+
+		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ *	t4_config_glbl_rss - configure the global RSS mode
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@mode: global RSS mode
+ *	@flags: mode-specific flags
+ *
+ *	Sets the global RSS mode.
+ */
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+		       unsigned int flags)
+{
+	struct fw_rss_glb_config_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+		c.u.manual.mode_pkd =
+			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+		c.u.basicvirtual.mode_pkd =
+			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
+	} else
+		return -EINVAL;
+	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_config_vi_rss - configure per VI RSS settings
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@viid: the VI id
+ *	@flags: RSS flags
+ *	@defq: id of the default RSS queue for the VI.
+ *
+ *	Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+		     unsigned int flags, unsigned int defq)
+{
+	struct fw_rss_vi_config_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+					FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
+	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/* Read an RSS table row */
+static int rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+	t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
+	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
+				   5, 0, val);
+}
+
+/**
+ *	t4_read_rss - read the contents of the RSS mapping table
+ *	@adapter: the adapter
+ *	@map: holds the contents of the RSS mapping table
+ *
+ *	Reads the contents of the RSS hash->queue mapping table.
+ */
+int t4_read_rss(struct adapter *adapter, u16 *map)
+{
+	u32 val;
+	int i, ret;
+
+	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+		ret = rd_rss_row(adapter, i, &val);
+		if (ret)
+			return ret;
+		*map++ = LKPTBLQUEUE0_G(val);
+		*map++ = LKPTBLQUEUE1_G(val);
+	}
+	return 0;
+}
+
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+	return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
+/**
+ *	t4_fw_tp_pio_rw - Access TP PIO through LDST
+ *	@adap: the adapter
+ *	@vals: where the indirect register values are stored/written
+ *	@nregs: how many indirect registers to read/write
+ *	@start_idx: index of first indirect register to read/write
+ *	@rw: Read (1) or Write (0)
+ *
+ *	Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+			    unsigned int start_index, unsigned int rw)
+{
+	int ret, i;
+	int cmd = FW_LDST_ADDRSPC_TP_PIO;
+	struct fw_ldst_cmd c;
+
+	for (i = 0 ; i < nregs; i++) {
+		memset(&c, 0, sizeof(c));
+		c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+						FW_CMD_REQUEST_F |
+						(rw ? FW_CMD_READ_F :
+						      FW_CMD_WRITE_F) |
+						FW_LDST_CMD_ADDRSPACE_V(cmd));
+		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+		c.u.addrval.addr = cpu_to_be32(start_index + i);
+		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
+		ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+		if (!ret && rw)
+			vals[i] = be32_to_cpu(c.u.addrval.val);
+	}
+}
+
+/**
+ *	t4_read_rss_key - read the global RSS key
+ *	@adap: the adapter
+ *	@key: 10-entry array holding the 320-bit RSS key
+ *
+ *	Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+	if (t4_use_ldst(adap))
+		t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+	else
+		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+				 TP_RSS_SECRET_KEY0_A);
+}
+
+/**
+ *	t4_write_rss_key - program one of the RSS keys
+ *	@adap: the adapter
+ *	@key: 10-entry array holding the 320-bit RSS key
+ *	@idx: which RSS key to write
+ *
+ *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
+ *	0..15 the corresponding entry in the RSS key table is written,
+ *	otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+{
+	u8 rss_key_addr_cnt = 16;
+	u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+	 * allows access to key addresses 16-63 by using KeyWrAddrX
+	 * as index[5:4](upper 2) into key table
+	 */
+	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+		rss_key_addr_cnt = 32;
+
+	if (t4_use_ldst(adap))
+		t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+	else
+		t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+				  TP_RSS_SECRET_KEY0_A);
+
+	if (idx >= 0 && idx < rss_key_addr_cnt) {
+		if (rss_key_addr_cnt > 16)
+			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+				     KEYWRADDRX_V(idx >> 4) |
+				     T6_VFWRADDR_V(idx) | KEYWREN_F);
+		else
+			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+				     KEYWRADDR_V(idx) | KEYWREN_F);
+	}
+}
+
+/**
+ *	t4_read_rss_pf_config - read PF RSS Configuration Table
+ *	@adapter: the adapter
+ *	@index: the entry in the PF RSS table to read
+ *	@valp: where to store the returned value
+ *
+ *	Reads the PF RSS Configuration Table at the specified index and returns
+ *	the value found there.
+ */
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+			   u32 *valp)
+{
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, valp, 1,
+				TP_RSS_PF0_CONFIG_A + index, 1);
+	else
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 valp, 1, TP_RSS_PF0_CONFIG_A + index);
+}
+
+/**
+ *	t4_read_rss_vf_config - read VF RSS Configuration Table
+ *	@adapter: the adapter
+ *	@index: the entry in the VF RSS table to read
+ *	@vfl: where to store the returned VFL
+ *	@vfh: where to store the returned VFH
+ *
+ *	Reads the VF RSS Configuration Table at the specified index and returns
+ *	the (VFL, VFH) values found there.
+ */
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+			   u32 *vfl, u32 *vfh)
+{
+	u32 vrt, mask, data;
+
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+		mask = VFWRADDR_V(VFWRADDR_M);
+		data = VFWRADDR_V(index);
+	} else {
+		 mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
+		 data = T6_VFWRADDR_V(index);
+	}
+
+	/* Request that the index'th VF Table values be read into VFL/VFH.
+	 */
+	vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
+	vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
+	vrt |= data | VFRDEN_F;
+	t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
+
+	/* Grab the VFL/VFH values ...
+	 */
+	if (t4_use_ldst(adapter)) {
+		t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+		t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+	} else {
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 vfl, 1, TP_RSS_VFL_CONFIG_A);
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 vfh, 1, TP_RSS_VFH_CONFIG_A);
+	}
+}
+
+/**
+ *	t4_read_rss_pf_map - read PF RSS Map
+ *	@adapter: the adapter
+ *
+ *	Reads the PF RSS Map register and returns its value.
+ */
+u32 t4_read_rss_pf_map(struct adapter *adapter)
+{
+	u32 pfmap;
+
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+	else
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &pfmap, 1, TP_RSS_PF_MAP_A);
+	return pfmap;
+}
+
+/**
+ *	t4_read_rss_pf_mask - read PF RSS Mask
+ *	@adapter: the adapter
+ *
+ *	Reads the PF RSS Mask register and returns its value.
+ */
+u32 t4_read_rss_pf_mask(struct adapter *adapter)
+{
+	u32 pfmask;
+
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+	else
+		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &pfmask, 1, TP_RSS_PF_MSK_A);
+	return pfmask;
+}
+
+/**
+ *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
+ *	@adap: the adapter
+ *	@v4: holds the TCP/IP counter values
+ *	@v6: holds the TCP/IPv6 counter values
+ *
+ *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
+ *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
+ */
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6)
+{
+	u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
+
+#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
+#define STAT(x)     val[STAT_IDX(x)]
+#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
+
+	if (v4) {
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
+		v4->tcp_out_rsts = STAT(OUT_RST);
+		v4->tcp_in_segs  = STAT64(IN_SEG);
+		v4->tcp_out_segs = STAT64(OUT_SEG);
+		v4->tcp_retrans_segs = STAT64(RXT_SEG);
+	}
+	if (v6) {
+		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
+		v6->tcp_out_rsts = STAT(OUT_RST);
+		v6->tcp_in_segs  = STAT64(IN_SEG);
+		v6->tcp_out_segs = STAT64(OUT_SEG);
+		v6->tcp_retrans_segs = STAT64(RXT_SEG);
+	}
+#undef STAT64
+#undef STAT
+#undef STAT_IDX
+}
+
+/**
+ *	t4_tp_get_err_stats - read TP's error MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+	int nchan = adap->params.arch.nchan;
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+	int nchan = adap->params.arch.nchan;
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+			 nchan, TP_MIB_CPL_IN_REQ_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+			 nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
+}
+
+/**
+ *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+			 2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ *	@adap: the adapter
+ *	@idx: the port index
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+		       struct tp_fcoe_stats *st)
+{
+	u32 val[2];
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+			 1, TP_MIB_FCOE_DDP_0_A + idx);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+			 1, TP_MIB_FCOE_DROP_0_A + idx);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+			 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+	st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+	u32 val[4];
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+			 TP_MIB_USM_PKTS_A);
+	st->frames = val[0];
+	st->drops = val[1];
+	st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
+ *	t4_read_mtu_tbl - returns the values in the HW path MTU table
+ *	@adap: the adapter
+ *	@mtus: where to store the MTU values
+ *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ *	Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+	u32 v;
+	int i;
+
+	for (i = 0; i < NMTUS; ++i) {
+		t4_write_reg(adap, TP_MTU_TABLE_A,
+			     MTUINDEX_V(0xff) | MTUVALUE_V(i));
+		v = t4_read_reg(adap, TP_MTU_TABLE_A);
+		mtus[i] = MTUVALUE_G(v);
+		if (mtu_log)
+			mtu_log[i] = MTUWIDTH_G(v);
+	}
+}
+
+/**
+ *	t4_read_cong_tbl - reads the congestion control table
+ *	@adap: the adapter
+ *	@incr: where to store the alpha values
+ *
+ *	Reads the additive increments programmed into the HW congestion
+ *	control table.
+ */
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
+{
+	unsigned int mtu, w;
+
+	for (mtu = 0; mtu < NMTUS; ++mtu)
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			t4_write_reg(adap, TP_CCTRL_TABLE_A,
+				     ROWINDEX_V(0xffff) | (mtu << 5) | w);
+			incr[mtu][w] = (u16)t4_read_reg(adap,
+						TP_CCTRL_TABLE_A) & 0x1fff;
+		}
+}
+
+/**
+ *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ *	@adap: the adapter
+ *	@addr: the indirect TP register address
+ *	@mask: specifies the field within the register to modify
+ *	@val: new value for the field
+ *
+ *	Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+			    unsigned int mask, unsigned int val)
+{
+	t4_write_reg(adap, TP_PIO_ADDR_A, addr);
+	val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
+	t4_write_reg(adap, TP_PIO_DATA_A, val);
+}
+
+/**
+ *	init_cong_ctrl - initialize congestion control parameters
+ *	@a: the alpha values for congestion control
+ *	@b: the beta values for congestion control
+ *
+ *	Initialize the congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+	a[9] = 2;
+	a[10] = 3;
+	a[11] = 4;
+	a[12] = 5;
+	a[13] = 6;
+	a[14] = 7;
+	a[15] = 8;
+	a[16] = 9;
+	a[17] = 10;
+	a[18] = 14;
+	a[19] = 17;
+	a[20] = 21;
+	a[21] = 25;
+	a[22] = 30;
+	a[23] = 35;
+	a[24] = 45;
+	a[25] = 60;
+	a[26] = 80;
+	a[27] = 100;
+	a[28] = 200;
+	a[29] = 300;
+	a[30] = 400;
+	a[31] = 500;
+
+	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+	b[9] = b[10] = 1;
+	b[11] = b[12] = 2;
+	b[13] = b[14] = b[15] = b[16] = 3;
+	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+	b[28] = b[29] = 6;
+	b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ *	t4_load_mtus - write the MTU and congestion control HW tables
+ *	@adap: the adapter
+ *	@mtus: the values for the MTU table
+ *	@alpha: the values for the congestion control alpha parameter
+ *	@beta: the values for the congestion control beta parameter
+ *
+ *	Write the HW MTU table with the supplied MTUs and the high-speed
+ *	congestion control table with the supplied alpha, beta, and MTUs.
+ *	We write the two tables together because the additive increments
+ *	depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+		  const unsigned short *alpha, const unsigned short *beta)
+{
+	static const unsigned int avg_pkts[NCCTRL_WIN] = {
+		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+		28672, 40960, 57344, 81920, 114688, 163840, 229376
+	};
+
+	unsigned int i, w;
+
+	for (i = 0; i < NMTUS; ++i) {
+		unsigned int mtu = mtus[i];
+		unsigned int log2 = fls(mtu);
+
+		if (!(mtu & ((1 << log2) >> 2)))     /* round */
+			log2--;
+		t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
+			     MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
+
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			unsigned int inc;
+
+			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+				  CC_MIN_INCR);
+
+			t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
+				     (w << 16) | (beta[w] << 13) | inc);
+		}
+	}
+}
+
+/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks.  The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+	u64 v = bytes256 * adap->params.vpd.cclk;
+
+	return v * 62 + v / 2;
+}
+
+/**
+ *	t4_get_chan_txrate - get the current per channel Tx rates
+ *	@adap: the adapter
+ *	@nic_rate: rates for NIC traffic
+ *	@ofld_rate: rates for offloaded traffic
+ *
+ *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ *	for each channel.
+ */
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+	u32 v;
+
+	v = t4_read_reg(adap, TP_TX_TRATE_A);
+	nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
+	nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
+	if (adap->params.arch.nchan == NCHAN) {
+		nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
+		nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
+	}
+
+	v = t4_read_reg(adap, TP_TX_ORATE_A);
+	ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
+	ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
+	if (adap->params.arch.nchan == NCHAN) {
+		ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
+		ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
+	}
+}
+
+/**
+ *	t4_set_trace_filter - configure one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the desired trace filter parameters
+ *	@idx: which filter to configure
+ *	@enable: whether to enable or disable the filter
+ *
+ *	Configures one of the tracing filters available in HW.  If @enable is
+ *	%0 @tp is not examined and may be %NULL. The user is responsible to
+ *	set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+			int idx, int enable)
+{
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg, cfg;
+	u32 multitrc = TRCMULTIFILTER_F;
+
+	if (!enable) {
+		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+		return 0;
+	}
+
+	cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
+	if (cfg & TRCMULTIFILTER_F) {
+		/* If multiple tracers are enabled, then maximum
+		 * capture size is 2.5KB (FIFO size of a single channel)
+		 * minus 2 flits for CPL_TRACE_PKT header.
+		 */
+		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
+			return -EINVAL;
+	} else {
+		/* If multiple tracers are disabled, to avoid deadlocks
+		 * maximum packet capture size of 9600 bytes is recommended.
+		 * Also in this mode, only trace0 can be enabled and running.
+		 */
+		multitrc = 0;
+		if (tp->snap_len > 9600 || idx)
+			return -EINVAL;
+	}
+
+	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
+	    tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
+	    tp->min_len > TFMINPKTSIZE_M)
+		return -EINVAL;
+
+	/* stop the tracer we'll be changing */
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+
+	idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
+	data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		t4_write_reg(adap, data_reg, tp->data[i]);
+		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+	}
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
+		     TFCAPTUREMAX_V(tp->snap_len) |
+		     TFMINPKTSIZE_V(tp->min_len));
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
+		     TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
+		     (is_t4(adap->params.chip) ?
+		     TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
+		     T5_TFPORT_V(tp->port) | T5_TFEN_F |
+		     T5_TFINVERTMATCH_V(tp->invert)));
+
+	return 0;
+}
+
+/**
+ *	t4_get_trace_filter - query one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the current trace filter parameters
+ *	@idx: which trace filter to query
+ *	@enabled: non-zero if the filter is enabled
+ *
+ *	Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+			 int *enabled)
+{
+	u32 ctla, ctlb;
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg;
+
+	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
+	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
+
+	if (is_t4(adap->params.chip)) {
+		*enabled = !!(ctla & TFEN_F);
+		tp->port =  TFPORT_G(ctla);
+		tp->invert = !!(ctla & TFINVERTMATCH_F);
+	} else {
+		*enabled = !!(ctla & T5_TFEN_F);
+		tp->port = T5_TFPORT_G(ctla);
+		tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
+	}
+	tp->snap_len = TFCAPTUREMAX_G(ctlb);
+	tp->min_len = TFMINPKTSIZE_G(ctlb);
+	tp->skip_ofst = TFOFFSET_G(ctla);
+	tp->skip_len = TFLENGTH_G(ctla);
+
+	ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
+	data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+	}
+}
+
+/**
+ *	t4_pmtx_get_stats - returns the HW stats from PMTX
+ *	@adap: the adapter
+ *	@cnt: where to store the count statistics
+ *	@cycles: where to store the cycle statistics
+ *
+ *	Returns performance statistics from PMTX.
+ */
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+	int i;
+	u32 data[2];
+
+	for (i = 0; i < PM_NSTATS; i++) {
+		t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
+		cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
+		if (is_t4(adap->params.chip)) {
+			cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
+		} else {
+			t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
+					 PM_TX_DBG_DATA_A, data, 2,
+					 PM_TX_DBG_STAT_MSB_A);
+			cycles[i] = (((u64)data[0] << 32) | data[1]);
+		}
+	}
+}
+
+/**
+ *	t4_pmrx_get_stats - returns the HW stats from PMRX
+ *	@adap: the adapter
+ *	@cnt: where to store the count statistics
+ *	@cycles: where to store the cycle statistics
+ *
+ *	Returns performance statistics from PMRX.
+ */
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+	int i;
+	u32 data[2];
+
+	for (i = 0; i < PM_NSTATS; i++) {
+		t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
+		cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
+		if (is_t4(adap->params.chip)) {
+			cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
+		} else {
+			t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
+					 PM_RX_DBG_DATA_A, data, 2,
+					 PM_RX_DBG_STAT_MSB_A);
+			cycles[i] = (((u64)data[0] << 32) | data[1]);
+		}
+	}
+}
+
+/**
+ *	t4_get_mps_bg_map - return the buffer groups associated with a port
+ *	@adap: the adapter
+ *	@idx: the port index
+ *
+ *	Returns a bitmap indicating which MPS buffer groups are associated
+ *	with the given port.  Bit i is set if buffer group i is used by the
+ *	port.
+ */
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
+{
+	u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
+
+	if (n == 0)
+		return idx == 0 ? 0xf : 0;
+	if (n == 1)
+		return idx < 2 ? (3 << (2 * idx)) : 0;
+	return 1 << idx;
+}
+
+/**
+ *      t4_get_port_type_description - return Port Type string description
+ *      @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+	static const char *const port_type_description[] = {
+		"R XFI",
+		"R XAUI",
+		"T SGMII",
+		"T XFI",
+		"T XAUI",
+		"KX4",
+		"CX4",
+		"KX",
+		"KR",
+		"R SFP+",
+		"KR/KX",
+		"KR/KX/KX4",
+		"R QSFP_10G",
+		"R QSA",
+		"R QSFP",
+		"R BP40_BA",
+	};
+
+	if (port_type < ARRAY_SIZE(port_type_description))
+		return port_type_description[port_type];
+	return "UNKNOWN";
+}
+
+/**
+ *      t4_get_port_stats_offset - collect port stats relative to a previous
+ *                                 snapshot
+ *      @adap: The adapter
+ *      @idx: The port
+ *      @stats: Current stats to fill
+ *      @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+			      struct port_stats *stats,
+			      struct port_stats *offset)
+{
+	u64 *s, *o;
+	int i;
+
+	t4_get_port_stats(adap, idx, stats);
+	for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+			i < (sizeof(struct port_stats) / sizeof(u64));
+			i++, s++, o++)
+		*s -= *o;
+}
+
+/**
+ *	t4_get_port_stats - collect port statistics
+ *	@adap: the adapter
+ *	@idx: the port index
+ *	@p: the stats structure to fill
+ *
+ *	Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+	t4_read_reg64(adap, \
+	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
+	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
+	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
+	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
+	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
+	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
+	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
+	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
+	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
+	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
+	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
+	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
+	p->tx_drop             = GET_STAT(TX_PORT_DROP);
+	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
+	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
+	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
+	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
+	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
+	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
+	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
+	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
+	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
+
+	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
+	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
+	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
+	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
+	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
+	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
+	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
+	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
+	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
+	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
+	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
+	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
+	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
+	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
+	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
+	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
+	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
+	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
+	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
+	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
+	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
+	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
+	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
+	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
+	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
+
+	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ *	t4_get_lb_stats - collect loopback port statistics
+ *	@adap: the adapter
+ *	@idx: the loopback port index
+ *	@p: the stats structure to fill
+ *
+ *	Return HW statistics for the given loopback port.
+ */
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
+{
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+	t4_read_reg64(adap, \
+	(is_t4(adap->params.chip) ? \
+	PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+	T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+	p->octets           = GET_STAT(BYTES);
+	p->frames           = GET_STAT(FRAMES);
+	p->bcast_frames     = GET_STAT(BCAST);
+	p->mcast_frames     = GET_STAT(MCAST);
+	p->ucast_frames     = GET_STAT(UCAST);
+	p->error_frames     = GET_STAT(ERROR);
+
+	p->frames_64        = GET_STAT(64B);
+	p->frames_65_127    = GET_STAT(65B_127B);
+	p->frames_128_255   = GET_STAT(128B_255B);
+	p->frames_256_511   = GET_STAT(256B_511B);
+	p->frames_512_1023  = GET_STAT(512B_1023B);
+	p->frames_1024_1518 = GET_STAT(1024B_1518B);
+	p->frames_1519_max  = GET_STAT(1519B_MAX);
+	p->drop             = GET_STAT(DROP_FRAMES);
+
+	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/*     t4_mk_filtdelwr - create a delete filter WR
+ *     @ftid: the filter ID
+ *     @wr: the filter work request to populate
+ *     @qid: ingress queue to receive the delete notification
+ *
+ *     Creates a filter work request to delete the supplied filter.  If @qid is
+ *     negative the delete notification is suppressed.
+ */
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+	memset(wr, 0, sizeof(*wr));
+	wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
+	wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
+	wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
+				    FW_FILTER_WR_NOREPLY_V(qid < 0));
+	wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
+	if (qid >= 0)
+		wr->rx_chan_rx_rpl_iq =
+			cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+	(var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+					FW_CMD_REQUEST_F | \
+					FW_CMD_##rd_wr##_F); \
+	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
+} while (0)
+
+int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
+			  u32 addr, u32 val)
+{
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F |
+					FW_CMD_WRITE_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.addrval.addr = cpu_to_be32(addr);
+	c.u.addrval.val = cpu_to_be32(val);
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_mdio_rd - read a PHY register through MDIO
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@phy_addr: the PHY address
+ *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ *	@reg: the register to read
+ *	@valp: where to store the value
+ *
+ *	Issues a FW command through the given mailbox to read a PHY register.
+ */
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 *valp)
+{
+	int ret;
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F | FW_CMD_READ_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+					 FW_LDST_CMD_MMD_V(mmd));
+	c.u.mdio.raddr = cpu_to_be16(reg);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0)
+		*valp = be16_to_cpu(c.u.mdio.rval);
+	return ret;
+}
+
+/**
+ *	t4_mdio_wr - write a PHY register through MDIO
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@phy_addr: the PHY address
+ *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ *	@reg: the register to write
+ *	@valp: value to write
+ *
+ *	Issues a FW command through the given mailbox to write a PHY register.
+ */
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 val)
+{
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+					 FW_LDST_CMD_MMD_V(mmd));
+	c.u.mdio.raddr = cpu_to_be16(reg);
+	c.u.mdio.rval = cpu_to_be16(val);
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_sge_decode_idma_state - decode the idma state
+ *	@adap: the adapter
+ *	@state: the state idma is stuck in
+ */
+void t4_sge_decode_idma_state(struct adapter *adapter, int state)
+{
+	static const char * const t4_decode[] = {
+		"IDMA_IDLE",
+		"IDMA_PUSH_MORE_CPL_FIFO",
+		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+		"Not used",
+		"IDMA_PHYSADDR_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+		"IDMA_PHYSADDR_SEND_PAYLOAD",
+		"IDMA_SEND_FIFO_TO_IMSG",
+		"IDMA_FL_REQ_DATA_FL_PREP",
+		"IDMA_FL_REQ_DATA_FL",
+		"IDMA_FL_DROP",
+		"IDMA_FL_H_REQ_HEADER_FL",
+		"IDMA_FL_H_SEND_PCIEHDR",
+		"IDMA_FL_H_PUSH_CPL_FIFO",
+		"IDMA_FL_H_SEND_CPL",
+		"IDMA_FL_H_SEND_IP_HDR_FIRST",
+		"IDMA_FL_H_SEND_IP_HDR",
+		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
+		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_H_SEND_IP_HDR_PADDING",
+		"IDMA_FL_D_SEND_PCIEHDR",
+		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+		"IDMA_FL_D_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_PCIEHDR",
+		"IDMA_FL_PUSH_CPL_FIFO",
+		"IDMA_FL_SEND_CPL",
+		"IDMA_FL_SEND_PAYLOAD_FIRST",
+		"IDMA_FL_SEND_PAYLOAD",
+		"IDMA_FL_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_SEND_PADDING",
+		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
+		"IDMA_FL_SEND_FIFO_TO_IMSG",
+		"IDMA_FL_REQ_DATAFL_DONE",
+		"IDMA_FL_REQ_HEADERFL_DONE",
+	};
+	static const char * const t5_decode[] = {
+		"IDMA_IDLE",
+		"IDMA_ALMOST_IDLE",
+		"IDMA_PUSH_MORE_CPL_FIFO",
+		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+		"IDMA_PHYSADDR_SEND_PAYLOAD",
+		"IDMA_SEND_FIFO_TO_IMSG",
+		"IDMA_FL_REQ_DATA_FL",
+		"IDMA_FL_DROP",
+		"IDMA_FL_DROP_SEND_INC",
+		"IDMA_FL_H_REQ_HEADER_FL",
+		"IDMA_FL_H_SEND_PCIEHDR",
+		"IDMA_FL_H_PUSH_CPL_FIFO",
+		"IDMA_FL_H_SEND_CPL",
+		"IDMA_FL_H_SEND_IP_HDR_FIRST",
+		"IDMA_FL_H_SEND_IP_HDR",
+		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
+		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_H_SEND_IP_HDR_PADDING",
+		"IDMA_FL_D_SEND_PCIEHDR",
+		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+		"IDMA_FL_D_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_PCIEHDR",
+		"IDMA_FL_PUSH_CPL_FIFO",
+		"IDMA_FL_SEND_CPL",
+		"IDMA_FL_SEND_PAYLOAD_FIRST",
+		"IDMA_FL_SEND_PAYLOAD",
+		"IDMA_FL_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_SEND_PADDING",
+		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
+	};
+	static const u32 sge_regs[] = {
+		SGE_DEBUG_DATA_LOW_INDEX_2_A,
+		SGE_DEBUG_DATA_LOW_INDEX_3_A,
+		SGE_DEBUG_DATA_HIGH_INDEX_10_A,
+	};
+	const char **sge_idma_decode;
+	int sge_idma_decode_nstates;
+	int i;
+
+	if (is_t4(adapter->params.chip)) {
+		sge_idma_decode = (const char **)t4_decode;
+		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+	} else {
+		sge_idma_decode = (const char **)t5_decode;
+		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+	}
+
+	if (state < sge_idma_decode_nstates)
+		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
+	else
+		CH_WARN(adapter, "idma state %d unknown\n", state);
+
+	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
+		CH_WARN(adapter, "SGE register %#x value %#x\n",
+			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
+}
+
+/**
+ *      t4_sge_ctxt_flush - flush the SGE context cache
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *
+ *      Issues a FW command through the given mailbox to flush the
+ *      SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+	int ret;
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+					FW_CMD_REQUEST_F | FW_CMD_READ_F |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	return ret;
+}
+
+/**
+ *      t4_fw_hello - establish communication with FW
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @evt_mbox: mailbox to receive async FW events
+ *      @master: specifies the caller's willingness to be the device master
+ *	@state: returns the current device state (if non-NULL)
+ *
+ *	Issues a command to establish communication with FW.  Returns either
+ *	an error (negative integer) or the mailbox of the Master PF.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+		enum dev_master master, enum dev_state *state)
+{
+	int ret;
+	struct fw_hello_cmd c;
+	u32 v;
+	unsigned int master_mbox;
+	int retries = FW_CMD_HELLO_RETRIES;
+
+retry:
+	memset(&c, 0, sizeof(c));
+	INIT_CMD(c, HELLO, WRITE);
+	c.err_to_clearinit = cpu_to_be32(
+		FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
+		FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
+		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
+					mbox : FW_HELLO_CMD_MBMASTER_M) |
+		FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
+		FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
+		FW_HELLO_CMD_CLEARINIT_F);
+
+	/*
+	 * Issue the HELLO command to the firmware.  If it's not successful
+	 * but indicates that we got a "busy" or "timeout" condition, retry
+	 * the HELLO until we exhaust our retry limit.  If we do exceed our
+	 * retry limit, check to see if the firmware left us any error
+	 * information and report that if so.
+	 */
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret < 0) {
+		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+			goto retry;
+		if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
+			t4_report_fw_error(adap);
+		return ret;
+	}
+
+	v = be32_to_cpu(c.err_to_clearinit);
+	master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
+	if (state) {
+		if (v & FW_HELLO_CMD_ERR_F)
+			*state = DEV_STATE_ERR;
+		else if (v & FW_HELLO_CMD_INIT_F)
+			*state = DEV_STATE_INIT;
+		else
+			*state = DEV_STATE_UNINIT;
+	}
+
+	/*
+	 * If we're not the Master PF then we need to wait around for the
+	 * Master PF Driver to finish setting up the adapter.
+	 *
+	 * Note that we also do this wait if we're a non-Master-capable PF and
+	 * there is no current Master PF; a Master PF may show up momentarily
+	 * and we wouldn't want to fail pointlessly.  (This can happen when an
+	 * OS loads lots of different drivers rapidly at the same time).  In
+	 * this case, the Master PF returned by the firmware will be
+	 * PCIE_FW_MASTER_M so the test below will work ...
+	 */
+	if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
+	    master_mbox != mbox) {
+		int waiting = FW_CMD_HELLO_TIMEOUT;
+
+		/*
+		 * Wait for the firmware to either indicate an error or
+		 * initialized state.  If we see either of these we bail out
+		 * and report the issue to the caller.  If we exhaust the
+		 * "hello timeout" and we haven't exhausted our retries, try
+		 * again.  Otherwise bail with a timeout error.
+		 */
+		for (;;) {
+			u32 pcie_fw;
+
+			msleep(50);
+			waiting -= 50;
+
+			/*
+			 * If neither Error nor Initialialized are indicated
+			 * by the firmware keep waiting till we exaust our
+			 * timeout ... and then retry if we haven't exhausted
+			 * our retries ...
+			 */
+			pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+			if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
+				if (waiting <= 0) {
+					if (retries-- > 0)
+						goto retry;
+
+					return -ETIMEDOUT;
+				}
+				continue;
+			}
+
+			/*
+			 * We either have an Error or Initialized condition
+			 * report errors preferentially.
+			 */
+			if (state) {
+				if (pcie_fw & PCIE_FW_ERR_F)
+					*state = DEV_STATE_ERR;
+				else if (pcie_fw & PCIE_FW_INIT_F)
+					*state = DEV_STATE_INIT;
+			}
+
+			/*
+			 * If we arrived before a Master PF was selected and
+			 * there's not a valid Master PF, grab its identity
+			 * for our caller.
+			 */
+			if (master_mbox == PCIE_FW_MASTER_M &&
+			    (pcie_fw & PCIE_FW_MASTER_VLD_F))
+				master_mbox = PCIE_FW_MASTER_G(pcie_fw);
+			break;
+		}
+	}
+
+	return master_mbox;
+}
+
+/**
+ *	t4_fw_bye - end communication with FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_bye_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	INIT_CMD(c, BYE, WRITE);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_init_cmd - ask FW to initialize the device
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to FW to partially initialize the device.  This
+ *	performs initialization that generally doesn't depend on user input.
+ */
+int t4_early_init(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_initialize_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	INIT_CMD(c, INITIALIZE, WRITE);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_fw_reset - issue a reset to FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@reset: specifies the type of reset to perform
+ *
+ *	Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+	struct fw_reset_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	INIT_CMD(c, RESET, WRITE);
+	c.val = cpu_to_be32(reset);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW RESET command (if desired)
+ *	@force: force uP into RESET even if FW RESET command fails
+ *
+ *	Issues a RESET command to firmware (if desired) with a HALT indication
+ *	and then puts the microprocessor into RESET state.  The RESET command
+ *	will only be issued if a legitimate mailbox is provided (mbox <=
+ *	PCIE_FW_MASTER_M).
+ *
+ *	This is generally used in order for the host to safely manipulate the
+ *	adapter without fear of conflicting with whatever the firmware might
+ *	be doing.  The only way out of this state is to RESTART the firmware
+ *	...
+ */
+static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+	int ret = 0;
+
+	/*
+	 * If a legitimate mailbox is provided, issue a RESET command
+	 * with a HALT indication.
+	 */
+	if (mbox <= PCIE_FW_MASTER_M) {
+		struct fw_reset_cmd c;
+
+		memset(&c, 0, sizeof(c));
+		INIT_CMD(c, RESET, WRITE);
+		c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
+		c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
+		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+	}
+
+	/*
+	 * Normally we won't complete the operation if the firmware RESET
+	 * command fails but if our caller insists we'll go ahead and put the
+	 * uP into RESET.  This can be useful if the firmware is hung or even
+	 * missing ...  We'll have to take the risk of putting the uP into
+	 * RESET without the cooperation of firmware in that case.
+	 *
+	 * We also force the firmware's HALT flag to be on in case we bypassed
+	 * the firmware RESET command above or we're dealing with old firmware
+	 * which doesn't have the HALT capability.  This will serve as a flag
+	 * for the incoming firmware to know that it's coming out of a HALT
+	 * rather than a RESET ... if it's new enough to understand that ...
+	 */
+	if (ret == 0 || force) {
+		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
+				 PCIE_FW_HALT_F);
+	}
+
+	/*
+	 * And we always return the result of the firmware RESET command
+	 * even when we force the uP into RESET ...
+	 */
+	return ret;
+}
+
+/**
+ *	t4_fw_restart - restart the firmware by taking the uP out of RESET
+ *	@adap: the adapter
+ *	@reset: if we want to do a RESET to restart things
+ *
+ *	Restart firmware previously halted by t4_fw_halt().  On successful
+ *	return the previous PF Master remains as the new PF Master and there
+ *	is no need to issue a new HELLO command, etc.
+ *
+ *	We do this in two ways:
+ *
+ *	 1. If we're dealing with newer firmware we'll simply want to take
+ *	    the chip's microprocessor out of RESET.  This will cause the
+ *	    firmware to start up from its start vector.  And then we'll loop
+ *	    until the firmware indicates it's started again (PCIE_FW.HALT
+ *	    reset to 0) or we timeout.
+ *
+ *	 2. If we're dealing with older firmware then we'll need to RESET
+ *	    the chip since older firmware won't recognize the PCIE_FW.HALT
+ *	    flag and automatically RESET itself on startup.
+ */
+static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+	if (reset) {
+		/*
+		 * Since we're directing the RESET instead of the firmware
+		 * doing it automatically, we need to clear the PCIE_FW.HALT
+		 * bit.
+		 */
+		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
+
+		/*
+		 * If we've been given a valid mailbox, first try to get the
+		 * firmware to do the RESET.  If that works, great and we can
+		 * return success.  Otherwise, if we haven't been given a
+		 * valid mailbox or the RESET command failed, fall back to
+		 * hitting the chip with a hammer.
+		 */
+		if (mbox <= PCIE_FW_MASTER_M) {
+			t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
+			msleep(100);
+			if (t4_fw_reset(adap, mbox,
+					PIORST_F | PIORSTMODE_F) == 0)
+				return 0;
+		}
+
+		t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
+		msleep(2000);
+	} else {
+		int ms;
+
+		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
+		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+			if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
+				return 0;
+			msleep(100);
+			ms += 100;
+		}
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+/**
+ *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW RESET command (if desired)
+ *	@fw_data: the firmware image to write
+ *	@size: image size
+ *	@force: force upgrade even if firmware doesn't cooperate
+ *
+ *	Perform all of the steps necessary for upgrading an adapter's
+ *	firmware image.  Normally this requires the cooperation of the
+ *	existing firmware in order to halt all existing activities
+ *	but if an invalid mailbox token is passed in we skip that step
+ *	(though we'll still put the adapter microprocessor into RESET in
+ *	that case).
+ *
+ *	On successful return the new firmware will have been loaded and
+ *	the adapter will have been fully RESET losing all previous setup
+ *	state.  On unsuccessful return the adapter may be completely hosed ...
+ *	positive errno indicates that the adapter is ~probably~ intact, a
+ *	negative errno indicates that things are looking bad ...
+ */
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+		  const u8 *fw_data, unsigned int size, int force)
+{
+	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+	int reset, ret;
+
+	if (!t4_fw_matches_chip(adap, fw_hdr))
+		return -EINVAL;
+
+	ret = t4_fw_halt(adap, mbox, force);
+	if (ret < 0 && !force)
+		return ret;
+
+	ret = t4_load_fw(adap, fw_data, size);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Older versions of the firmware don't understand the new
+	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+	 * restart.  So for newly loaded older firmware we'll have to do the
+	 * RESET for it so it starts up on a clean slate.  We can tell if
+	 * the newly loaded firmware will handle this right by checking
+	 * its header flags to see if it advertises the capability.
+	 */
+	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+	return t4_fw_restart(adap, mbox, reset);
+}
+
+/**
+ *	t4_fixup_host_params - fix up host-dependent parameters
+ *	@adap: the adapter
+ *	@page_size: the host's Base Page Size
+ *	@cache_line_size: the host's Cache Line Size
+ *
+ *	Various registers in T4 contain values which are dependent on the
+ *	host's Base Page and Cache Line Sizes.  This function will fix all of
+ *	those registers with the appropriate values as passed in ...
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+			 unsigned int cache_line_size)
+{
+	unsigned int page_shift = fls(page_size) - 1;
+	unsigned int sge_hps = page_shift - 10;
+	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+	unsigned int fl_align_log = fls(fl_align) - 1;
+
+	t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
+		     HOSTPAGESIZEPF0_V(sge_hps) |
+		     HOSTPAGESIZEPF1_V(sge_hps) |
+		     HOSTPAGESIZEPF2_V(sge_hps) |
+		     HOSTPAGESIZEPF3_V(sge_hps) |
+		     HOSTPAGESIZEPF4_V(sge_hps) |
+		     HOSTPAGESIZEPF5_V(sge_hps) |
+		     HOSTPAGESIZEPF6_V(sge_hps) |
+		     HOSTPAGESIZEPF7_V(sge_hps));
+
+	if (is_t4(adap->params.chip)) {
+		t4_set_reg_field(adap, SGE_CONTROL_A,
+				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+				 EGRSTATUSPAGESIZE_F,
+				 INGPADBOUNDARY_V(fl_align_log -
+						  INGPADBOUNDARY_SHIFT_X) |
+				 EGRSTATUSPAGESIZE_V(stat_len != 64));
+	} else {
+		/* T5 introduced the separation of the Free List Padding and
+		 * Packing Boundaries.  Thus, we can select a smaller Padding
+		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
+		 * Bandwidth, and use a Packing Boundary which is large enough
+		 * to avoid false sharing between CPUs, etc.
+		 *
+		 * For the PCI Link, the smaller the Padding Boundary the
+		 * better.  For the Memory Controller, a smaller Padding
+		 * Boundary is better until we cross under the Memory Line
+		 * Size (the minimum unit of transfer to/from Memory).  If we
+		 * have a Padding Boundary which is smaller than the Memory
+		 * Line Size, that'll involve a Read-Modify-Write cycle on the
+		 * Memory Controller which is never good.  For T5 the smallest
+		 * Padding Boundary which we can select is 32 bytes which is
+		 * larger than any known Memory Controller Line Size so we'll
+		 * use that.
+		 *
+		 * T5 has a different interpretation of the "0" value for the
+		 * Packing Boundary.  This corresponds to 16 bytes instead of
+		 * the expected 32 bytes.  We never have a Packing Boundary
+		 * less than 32 bytes so we can't use that special value but
+		 * on the other hand, if we wanted 32 bytes, the best we can
+		 * really do is 64 bytes.
+		*/
+		if (fl_align <= 32) {
+			fl_align = 64;
+			fl_align_log = 6;
+		}
+		t4_set_reg_field(adap, SGE_CONTROL_A,
+				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+				 EGRSTATUSPAGESIZE_F,
+				 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+				 EGRSTATUSPAGESIZE_V(stat_len != 64));
+		t4_set_reg_field(adap, SGE_CONTROL2_A,
+				 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+				 INGPACKBOUNDARY_V(fl_align_log -
+						   INGPACKBOUNDARY_SHIFT_X));
+	}
+	/*
+	 * Adjust various SGE Free List Host Buffer Sizes.
+	 *
+	 * This is something of a crock since we're using fixed indices into
+	 * the array which are also known by the sge.c code and the T4
+	 * Firmware Configuration File.  We need to come up with a much better
+	 * approach to managing this array.  For now, the first four entries
+	 * are:
+	 *
+	 *   0: Host Page Size
+	 *   1: 64KB
+	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+	 *
+	 * For the single-MTU buffers in unpacked mode we need to include
+	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+	 * Padding boundary.  All of these are accommodated in the Factory
+	 * Default Firmware Configuration File but we need to adjust it for
+	 * this host's cache line size.
+	 */
+	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
+	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
+		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
+		     & ~(fl_align-1));
+	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
+		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
+		     & ~(fl_align-1));
+
+	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
+
+	return 0;
+}
+
+/**
+ *	t4_fw_initialize - ask FW to initialize the device
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to FW to partially initialize the device.  This
+ *	performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_initialize_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	INIT_CMD(c, INITIALIZE, WRITE);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_query_params_rw - query FW or device parameters
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF
+ *	@vf: the VF
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@val: the parameter values
+ *	@rw: Write and read flag
+ *
+ *	Reads the value of FW or device parameters.  Up to 7 parameters can be
+ *	queried at once.
+ */
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		       unsigned int vf, unsigned int nparams, const u32 *params,
+		       u32 *val, int rw)
+{
+	int i, ret;
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				  FW_PARAMS_CMD_PFN_V(pf) |
+				  FW_PARAMS_CMD_VFN_V(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+	for (i = 0; i < nparams; i++) {
+		*p++ = cpu_to_be32(*params++);
+		if (rw)
+			*p = cpu_to_be32(*(val + i));
+		p++;
+	}
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0)
+		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+			*val++ = be32_to_cpu(*p);
+	return ret;
+}
+
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int nparams, const u32 *params,
+		    u32 *val)
+{
+	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
+/**
+ *      t4_set_params_timeout - sets FW or device parameters
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @pf: the PF
+ *      @vf: the VF
+ *      @nparams: the number of parameters
+ *      @params: the parameter names
+ *      @val: the parameter values
+ *      @timeout: the timeout time
+ *
+ *      Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *      specified at once.
+ */
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+			  unsigned int pf, unsigned int vf,
+			  unsigned int nparams, const u32 *params,
+			  const u32 *val, int timeout)
+{
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				  FW_PARAMS_CMD_PFN_V(pf) |
+				  FW_PARAMS_CMD_VFN_V(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+	while (nparams--) {
+		*p++ = cpu_to_be32(*params++);
+		*p++ = cpu_to_be32(*val++);
+	}
+
+	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
+}
+
+/**
+ *	t4_set_params - sets FW or device parameters
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF
+ *	@vf: the VF
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@val: the parameter values
+ *
+ *	Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *	specified at once.
+ */
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		  unsigned int vf, unsigned int nparams, const u32 *params,
+		  const u32 *val)
+{
+	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+				     FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ *	t4_cfg_pfvf - configure PF/VF resource limits
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF being configured
+ *	@vf: the VF being configured
+ *	@txq: the max number of egress queues
+ *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
+ *	@rxqi: the max number of interrupt-capable ingress queues
+ *	@rxq: the max number of interruptless ingress queues
+ *	@tc: the PCI traffic class
+ *	@vi: the max number of virtual interfaces
+ *	@cmask: the channel access rights mask for the PF/VF
+ *	@pmask: the port access rights mask for the PF/VF
+ *	@nexact: the maximum number of exact MPS filters
+ *	@rcaps: read capabilities
+ *	@wxcaps: write/execute capabilities
+ *
+ *	Configures resource limits and capabilities for a physical or virtual
+ *	function.
+ */
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+		unsigned int rxqi, unsigned int rxq, unsigned int tc,
+		unsigned int vi, unsigned int cmask, unsigned int pmask,
+		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
+{
+	struct fw_pfvf_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+				  FW_PFVF_CMD_VFN_V(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+				     FW_PFVF_CMD_NIQ_V(rxq));
+	c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
+				    FW_PFVF_CMD_PMASK_V(pmask) |
+				    FW_PFVF_CMD_NEQ_V(txq));
+	c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
+				      FW_PFVF_CMD_NVI_V(vi) |
+				      FW_PFVF_CMD_NEXACTF_V(nexact));
+	c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+					FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+					FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_alloc_vi - allocate a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@port: physical port associated with the VI
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@nmac: number of MAC addresses needed (1 to 5)
+ *	@mac: the MAC addresses of the VI
+ *	@rss_size: size of RSS table slice associated with this VI
+ *
+ *	Allocates a virtual interface for the given physical port.  If @mac is
+ *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ *	@mac should be large enough to hold @nmac Ethernet addresses, they are
+ *	stored consecutively so the space needed is @nmac * 6 bytes.
+ *	Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+		unsigned int *rss_size)
+{
+	int ret;
+	struct fw_vi_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+				  FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+	c.portid_pkd = FW_VI_CMD_PORTID_V(port);
+	c.nmac = nmac - 1;
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret)
+		return ret;
+
+	if (mac) {
+		memcpy(mac, c.mac, sizeof(c.mac));
+		switch (nmac) {
+		case 5:
+			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+		case 4:
+			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+		case 3:
+			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+		case 2:
+			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
+		}
+	}
+	if (rss_size)
+		*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+	return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
+}
+
+/**
+ *	t4_free_vi - free a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@viid: virtual interface identifiler
+ *
+ *	Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int viid)
+{
+	struct fw_vi_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+				  FW_CMD_REQUEST_F |
+				  FW_CMD_EXEC_F |
+				  FW_VI_CMD_PFN_V(pf) |
+				  FW_VI_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+	c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+}
+
+/**
+ *	t4_set_rxmode - set Rx properties of a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@mtu: the new MTU or -1
+ *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
+		  bool sleep_ok)
+{
+	struct fw_vi_rxmode_cmd c;
+
+	/* convert to FW values */
+	if (mtu < 0)
+		mtu = FW_RXMODE_MTU_NO_CHG;
+	if (promisc < 0)
+		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
+	if (all_multi < 0)
+		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
+	if (bcast < 0)
+		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
+	if (vlanex < 0)
+		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_RXMODE_CMD_VIID_V(viid));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.mtu_to_vlanexen =
+		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@free: if true any existing filters for this VI id are first removed
+ *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
+ *	@addr: the MAC address(es)
+ *	@idx: where to store the index of each allocated filter
+ *	@hash: pointer to hash address filter bitmap
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Allocates an exact-match filter for each of the supplied addresses and
+ *	sets it to the corresponding address.  If @idx is not %NULL it should
+ *	have at least @naddr entries, each of which will be set to the index of
+ *	the filter allocated for the corresponding MAC address.  If a filter
+ *	could not be allocated for an address its index is set to 0xffff.
+ *	If @hash is not %NULL addresses that fail to allocate an exact filter
+ *	are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ *	Returns a negative error number or the number of filters allocated.
+ */
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+		      unsigned int viid, bool free, unsigned int naddr,
+		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
+{
+	int offset, ret = 0;
+	struct fw_vi_mac_cmd c;
+	unsigned int nfilters = 0;
+	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+	unsigned int rem = naddr;
+
+	if (naddr > max_naddr)
+		return -EINVAL;
+
+	for (offset = 0; offset < naddr ; /**/) {
+		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+					 rem : ARRAY_SIZE(c.u.exact));
+		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+						     u.exact[fw_naddr]), 16);
+		struct fw_vi_mac_exact *p;
+		int i;
+
+		memset(&c, 0, sizeof(c));
+		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+					   FW_CMD_REQUEST_F |
+					   FW_CMD_WRITE_F |
+					   FW_CMD_EXEC_V(free) |
+					   FW_VI_MAC_CMD_VIID_V(viid));
+		c.freemacs_to_len16 =
+			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+				    FW_CMD_LEN16_V(len16));
+
+		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+			p->valid_to_idx =
+				cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+					    FW_VI_MAC_CMD_IDX_V(
+						    FW_VI_MAC_ADD_MAC));
+			memcpy(p->macaddr, addr[offset + i],
+			       sizeof(p->macaddr));
+		}
+
+		/* It's okay if we run out of space in our MAC address arena.
+		 * Some of the addresses we submit may get stored so we need
+		 * to run through the reply to see what the results were ...
+		 */
+		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+		if (ret && ret != -FW_ENOMEM)
+			break;
+
+		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+			u16 index = FW_VI_MAC_CMD_IDX_G(
+					be16_to_cpu(p->valid_to_idx));
+
+			if (idx)
+				idx[offset + i] = (index >= max_naddr ?
+						   0xffff : index);
+			if (index < max_naddr)
+				nfilters++;
+			else if (hash)
+				*hash |= (1ULL <<
+					  hash_mac_addr(addr[offset + i]));
+		}
+
+		free = false;
+		offset += fw_naddr;
+		rem -= fw_naddr;
+	}
+
+	if (ret == 0 || ret == -FW_ENOMEM)
+		ret = nfilters;
+	return ret;
+}
+
+/**
+ *	t4_change_mac - modifies the exact-match filter for a MAC address
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@idx: index of existing filter for old value of MAC address, or -1
+ *	@addr: the new MAC address value
+ *	@persist: whether a new MAC allocation should be persistent
+ *	@add_smt: if true also add the address to the HW SMT
+ *
+ *	Modifies an exact-match filter and sets it to the new MAC address.
+ *	Note that in general it is not possible to modify the value of a given
+ *	filter so the generic way to modify an address filter is to free the one
+ *	being used by the old address value and allocate a new filter for the
+ *	new address value.  @idx can be -1 if the address is a new addition.
+ *
+ *	Returns a negative error number or the index of the filter with the new
+ *	MAC value.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		  int idx, const u8 *addr, bool persist, bool add_smt)
+{
+	int ret, mode;
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_exact *p = c.u.exact;
+	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
+
+	if (idx < 0)                             /* new allocation */
+		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_MAC_CMD_VIID_V(viid));
+	c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
+	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+				      FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+				      FW_VI_MAC_CMD_IDX_V(idx));
+	memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0) {
+		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
+		if (ret >= max_mac_addr)
+			ret = -ENOMEM;
+	}
+	return ret;
+}
+
+/**
+ *	t4_set_addr_hash - program the MAC inexact-match hash filter
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@ucast: whether the hash filter should also match unicast addresses
+ *	@vec: the value to be written to the hash filter
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     bool ucast, u64 vec, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_ENABLE_CMD_VIID_V(viid));
+	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+					  FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+					  FW_CMD_LEN16_V(1));
+	c.u.hash.hashvec = cpu_to_be64(vec);
+	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ *      t4_enable_vi_params - enable/disable a virtual interface
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @viid: the VI id
+ *      @rx_en: 1=enable Rx, 0=disable Rx
+ *      @tx_en: 1=enable Tx, 0=disable Tx
+ *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ *      Enables/disables a virtual interface.  Note that setting DCB Enable
+ *      only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+	struct fw_vi_enable_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				   FW_VI_ENABLE_CMD_VIID_V(viid));
+	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+				     FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+				     FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
+				     FW_LEN16(c));
+	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_enable_vi - enable/disable a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@rx_en: 1=enable Rx, 0=disable Rx
+ *	@tx_en: 1=enable Tx, 0=disable Tx
+ *
+ *	Enables/disables a virtual interface.
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		 bool rx_en, bool tx_en)
+{
+	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
+}
+
+/**
+ *	t4_identify_port - identify a VI's port by blinking its LED
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@nblinks: how many times to blink LED at 2.5 Hz
+ *
+ *	Identifies a VI's port by blinking its LED.
+ */
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     unsigned int nblinks)
+{
+	struct fw_vi_enable_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				   FW_VI_ENABLE_CMD_VIID_V(viid));
+	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
+	c.blinkdur = cpu_to_be16(nblinks);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_iq_free - free an ingress queue and its FLs
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queues
+ *	@vf: the VF owning the queues
+ *	@iqtype: the ingress queue type
+ *	@iqid: ingress queue id
+ *	@fl0id: FL0 queue id or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *	Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id)
+{
+	struct fw_iq_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+				  FW_IQ_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+	c.iqid = cpu_to_be16(iqid);
+	c.fl0id = cpu_to_be16(fl0id);
+	c.fl1id = cpu_to_be16(fl1id);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_eth_eq_free - free an Ethernet egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		   unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_eth_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				  FW_EQ_ETH_CMD_PFN_V(pf) |
+				  FW_EQ_ETH_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+	c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_ctrl_eq_free - free a control egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_ctrl_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				  FW_EQ_CTRL_CMD_PFN_V(pf) |
+				  FW_EQ_CTRL_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+	c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_ofld_eq_free - free an offload egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees a control egress queue.
+ */
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_ofld_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+				  FW_EQ_OFLD_CMD_PFN_V(pf) |
+				  FW_EQ_OFLD_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+	c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_handle_fw_rpl - process a FW reply message
+ *	@adap: the adapter
+ *	@rpl: start of the FW message
+ *
+ *	Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+	u8 opcode = *(const u8 *)rpl;
+
+	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
+		int speed = 0, fc = 0;
+		const struct fw_port_cmd *p = (void *)rpl;
+		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
+		int port = adap->chan_map[chan];
+		struct port_info *pi = adap2pinfo(adap, port);
+		struct link_config *lc = &pi->link_cfg;
+		u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+		int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+		u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+
+		if (stat & FW_PORT_CMD_RXPAUSE_F)
+			fc |= PAUSE_RX;
+		if (stat & FW_PORT_CMD_TXPAUSE_F)
+			fc |= PAUSE_TX;
+		if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+			speed = 100;
+		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+			speed = 1000;
+		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+			speed = 10000;
+		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+			speed = 40000;
+
+		if (link_ok != lc->link_ok || speed != lc->speed ||
+		    fc != lc->fc) {                    /* something changed */
+			lc->link_ok = link_ok;
+			lc->speed = speed;
+			lc->fc = fc;
+			lc->supported = be16_to_cpu(p->u.info.pcap);
+			t4_os_link_changed(adap, port, link_ok);
+		}
+		if (mod != pi->mod_type) {
+			pi->mod_type = mod;
+			t4_os_portmod_changed(adap, port);
+		}
+	}
+	return 0;
+}
+
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
+{
+	u16 val;
+
+	if (pci_is_pcie(adapter->pdev)) {
+		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
+		p->speed = val & PCI_EXP_LNKSTA_CLS;
+		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
+	}
+}
+
+/**
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@caps: link capabilities
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void init_link_config(struct link_config *lc, unsigned int caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = 0;
+	lc->speed = 0;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & FW_PORT_CAP_ANEG) {
+		lc->advertising = lc->supported & ADVERT_MASK;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+#define CIM_PF_NOACCESS 0xeeeeeeee
+
+int t4_wait_dev_ready(void __iomem *regs)
+{
+	u32 whoami;
+
+	whoami = readl(regs + PL_WHOAMI_A);
+	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
+		return 0;
+
+	msleep(500);
+	whoami = readl(regs + PL_WHOAMI_A);
+	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
+}
+
+struct flash_desc {
+	u32 vendor_and_model_id;
+	u32 size_mb;
+};
+
+static int get_flash_params(struct adapter *adap)
+{
+	/* Table for non-Numonix supported flash parts.  Numonix parts are left
+	 * to the preexisting code.  All flash parts have 64KB sectors.
+	 */
+	static struct flash_desc supported_flash[] = {
+		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
+	};
+
+	int ret;
+	u32 info;
+
+	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
+	if (!ret)
+		ret = sf1_read(adap, 3, 0, 1, &info);
+	t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
+	if (ret)
+		return ret;
+
+	for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
+		if (supported_flash[ret].vendor_and_model_id == info) {
+			adap->params.sf_size = supported_flash[ret].size_mb;
+			adap->params.sf_nsec =
+				adap->params.sf_size / SF_SEC_SIZE;
+			return 0;
+		}
+
+	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
+		return -EINVAL;
+	info >>= 16;                           /* log2 of size */
+	if (info >= 0x14 && info < 0x18)
+		adap->params.sf_nsec = 1 << (info - 16);
+	else if (info == 0x18)
+		adap->params.sf_nsec = 64;
+	else
+		return -EINVAL;
+	adap->params.sf_size = 1 << info;
+	adap->params.sf_fw_start =
+		t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
+
+	if (adap->params.sf_size < FLASH_MIN_SIZE)
+		dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
+			 adap->params.sf_size, FLASH_MIN_SIZE);
+	return 0;
+}
+
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+	u16 val;
+	u32 pcie_cap;
+
+	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+	if (pcie_cap) {
+		pci_read_config_word(adapter->pdev,
+				     pcie_cap + PCI_EXP_DEVCTL2, &val);
+		val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+		val |= range;
+		pci_write_config_word(adapter->pdev,
+				      pcie_cap + PCI_EXP_DEVCTL2, val);
+	}
+}
+
+/**
+ *	t4_prep_adapter - prepare SW and HW for operation
+ *	@adapter: the adapter
+ *	@reset: if true perform a HW reset
+ *
+ *	Initialize adapter SW state for the various HW modules, set initial
+ *	values for some adapter tunables, take PHYs out of reset, and
+ *	initialize the MDIO interface.
+ */
+int t4_prep_adapter(struct adapter *adapter)
+{
+	int ret, ver;
+	uint16_t device_id;
+	u32 pl_rev;
+
+	get_pci_mode(adapter, &adapter->params.pci);
+	pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
+
+	ret = get_flash_params(adapter);
+	if (ret < 0) {
+		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
+		return ret;
+	}
+
+	/* Retrieve adapter's device ID
+	 */
+	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
+	ver = device_id >> 12;
+	adapter->params.chip = 0;
+	switch (ver) {
+	case CHELSIO_T4:
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+		adapter->params.arch.sge_fl_db = DBPRIO_F;
+		adapter->params.arch.mps_tcam_size =
+				 NUM_MPS_CLS_SRAM_L_INSTANCES;
+		adapter->params.arch.mps_rplc_size = 128;
+		adapter->params.arch.nchan = NCHAN;
+		adapter->params.arch.vfcount = 128;
+		break;
+	case CHELSIO_T5:
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+		adapter->params.arch.mps_tcam_size =
+				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		adapter->params.arch.mps_rplc_size = 128;
+		adapter->params.arch.nchan = NCHAN;
+		adapter->params.arch.vfcount = 128;
+		break;
+	case CHELSIO_T6:
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+		adapter->params.arch.sge_fl_db = 0;
+		adapter->params.arch.mps_tcam_size =
+				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		adapter->params.arch.mps_rplc_size = 256;
+		adapter->params.arch.nchan = 2;
+		adapter->params.arch.vfcount = 256;
+		break;
+	default:
+		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+			device_id);
+		return -EINVAL;
+	}
+
+	adapter->params.cim_la_size = CIMLA_SIZE;
+	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+	/*
+	 * Default port for debugging in case we can't reach FW.
+	 */
+	adapter->params.nports = 1;
+	adapter->params.portvec = 1;
+	adapter->params.vpd.cclk = 50000;
+
+	/* Set pci completion timeout value to 4 seconds. */
+	set_pcie_completion_timeout(adapter, 0xd);
+	return 0;
+}
+
+/**
+ *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ *	@adapter: the adapter
+ *	@qid: the Queue ID
+ *	@qtype: the Ingress or Egress type for @qid
+ *	@user: true if this request is for a user mode queue
+ *	@pbar2_qoffset: BAR2 Queue Offset
+ *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ *	Returns the BAR2 SGE Queue Registers information associated with the
+ *	indicated Absolute Queue ID.  These are passed back in return value
+ *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ *	This may return an error which indicates that BAR2 SGE Queue
+ *	registers aren't available.  If an error is not returned, then the
+ *	following values are returned:
+ *
+ *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ *	require the "Inferred Queue ID" ability may be used.  E.g. the
+ *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ *	then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter,
+		      unsigned int qid,
+		      enum t4_bar2_qtype qtype,
+		      int user,
+		      u64 *pbar2_qoffset,
+		      unsigned int *pbar2_qid)
+{
+	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+	u64 bar2_page_offset, bar2_qoffset;
+	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+	/* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
+	if (!user && is_t4(adapter->params.chip))
+		return -EINVAL;
+
+	/* Get our SGE Page Size parameters.
+	 */
+	page_shift = adapter->params.sge.hps + 10;
+	page_size = 1 << page_shift;
+
+	/* Get the right Queues per Page parameters for our Queue.
+	 */
+	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
+		     ? adapter->params.sge.eq_qpp
+		     : adapter->params.sge.iq_qpp);
+	qpp_mask = (1 << qpp_shift) - 1;
+
+	/*  Calculate the basics of the BAR2 SGE Queue register area:
+	 *  o The BAR2 page the Queue registers will be in.
+	 *  o The BAR2 Queue ID.
+	 *  o The BAR2 Queue ID Offset into the BAR2 page.
+	 */
+	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
+	bar2_qid = qid & qpp_mask;
+	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
+	 * hardware will infer the Absolute Queue ID simply from the writes to
+	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
+	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+	 * from the BAR2 Page and BAR2 Queue ID.
+	 *
+	 * One important censequence of this is that some BAR2 SGE registers
+	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+	 * there.  But other registers synthesize the SGE Queue ID purely
+	 * from the writes to the registers -- the Write Combined Doorbell
+	 * Buffer is a good example.  These BAR2 SGE Registers are only
+	 * available for those BAR2 SGE Register areas where the SGE Absolute
+	 * Queue ID can be inferred from simple writes.
+	 */
+	bar2_qoffset = bar2_page_offset;
+	bar2_qinferred = (bar2_qid_offset < page_size);
+	if (bar2_qinferred) {
+		bar2_qoffset += bar2_qid_offset;
+		bar2_qid = 0;
+	}
+
+	*pbar2_qoffset = bar2_qoffset;
+	*pbar2_qid = bar2_qid;
+	return 0;
+}
+
+/**
+ *	t4_init_devlog_params - initialize adapter->params.devlog
+ *	@adap: the adapter
+ *
+ *	Initialize various fields of the adapter's Firmware Device Log
+ *	Parameters structure.
+ */
+int t4_init_devlog_params(struct adapter *adap)
+{
+	struct devlog_params *dparams = &adap->params.devlog;
+	u32 pf_dparams;
+	unsigned int devlog_meminfo;
+	struct fw_devlog_cmd devlog_cmd;
+	int ret;
+
+	/* If we're dealing with newer firmware, the Device Log Paramerters
+	 * are stored in a designated register which allows us to access the
+	 * Device Log even if we can't talk to the firmware.
+	 */
+	pf_dparams =
+		t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
+	if (pf_dparams) {
+		unsigned int nentries, nentries128;
+
+		dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
+		dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
+
+		nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
+		nentries = (nentries128 + 1) * 128;
+		dparams->size = nentries * sizeof(struct fw_devlog_e);
+
+		return 0;
+	}
+
+	/* Otherwise, ask the firmware for it's Device Log Parameters.
+	 */
+	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+	devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+					     FW_CMD_REQUEST_F | FW_CMD_READ_F);
+	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
+	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+			 &devlog_cmd);
+	if (ret)
+		return ret;
+
+	devlog_meminfo =
+		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
+	dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+	dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
+
+	return 0;
+}
+
+/**
+ *	t4_init_sge_params - initialize adap->params.sge
+ *	@adapter: the adapter
+ *
+ *	Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+	struct sge_params *sge_params = &adapter->params.sge;
+	u32 hps, qpp;
+	unsigned int s_hps, s_qpp;
+
+	/* Extract the SGE Page Size for our PF.
+	 */
+	hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
+	s_hps = (HOSTPAGESIZEPF0_S +
+		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
+	sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
+
+	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
+	 */
+	s_qpp = (QUEUESPERPAGEPF0_S +
+		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
+	qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
+	sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
+	qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
+	sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
+
+	return 0;
+}
+
+/**
+ *      t4_init_tp_params - initialize adap->params.tp
+ *      @adap: the adapter
+ *
+ *      Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+	int chan;
+	u32 v;
+
+	v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+	adap->params.tp.tre = TIMERRESOLUTION_G(v);
+	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
+
+	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+	for (chan = 0; chan < NCHAN; chan++)
+		adap->params.tp.tx_modq[chan] = chan;
+
+	/* Cache the adapter's Compressed Filter Mode and global Incress
+	 * Configuration.
+	 */
+	if (t4_use_ldst(adap)) {
+		t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+				TP_VLAN_PRI_MAP_A, 1);
+		t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+				TP_INGRESS_CONFIG_A, 1);
+	} else {
+		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &adap->params.tp.vlan_pri_map, 1,
+				 TP_VLAN_PRI_MAP_A);
+		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+				 &adap->params.tp.ingress_config, 1,
+				 TP_INGRESS_CONFIG_A);
+	}
+
+	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+	 * shift positions of several elements of the Compressed Filter Tuple
+	 * for this adapter which we need frequently ...
+	 */
+	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
+	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+	adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
+	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+							       PROTOCOL_F);
+
+	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+	 * represents the presence of an Outer VLAN instead of a VNIC ID.
+	 */
+	if ((adap->params.tp.ingress_config & VNIC_F) == 0)
+		adap->params.tp.vnic_shift = -1;
+
+	return 0;
+}
+
+/**
+ *      t4_filter_field_shift - calculate filter field shift
+ *      @adap: the adapter
+ *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ *      Return the shift position of a filter field within the Compressed
+ *      Filter Tuple.  The filter field is specified via its selection bit
+ *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+	unsigned int sel;
+	int field_shift;
+
+	if ((filter_mode & filter_sel) == 0)
+		return -1;
+
+	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+		switch (filter_mode & sel) {
+		case FCOE_F:
+			field_shift += FT_FCOE_W;
+			break;
+		case PORT_F:
+			field_shift += FT_PORT_W;
+			break;
+		case VNIC_ID_F:
+			field_shift += FT_VNIC_ID_W;
+			break;
+		case VLAN_F:
+			field_shift += FT_VLAN_W;
+			break;
+		case TOS_F:
+			field_shift += FT_TOS_W;
+			break;
+		case PROTOCOL_F:
+			field_shift += FT_PROTOCOL_W;
+			break;
+		case ETHERTYPE_F:
+			field_shift += FT_ETHERTYPE_W;
+			break;
+		case MACMATCH_F:
+			field_shift += FT_MACMATCH_W;
+			break;
+		case MPSHITTYPE_F:
+			field_shift += FT_MPSHITTYPE_W;
+			break;
+		case FRAGMENTATION_F:
+			field_shift += FT_FRAGMENTATION_W;
+			break;
+		}
+	}
+	return field_shift;
+}
+
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+	int i, ret;
+	struct fw_rss_vi_config_cmd rvc;
+
+	memset(&rvc, 0, sizeof(rvc));
+
+	for_each_port(adap, i) {
+		struct port_info *p = adap2pinfo(adap, i);
+
+		rvc.op_to_viid =
+			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
+		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+		if (ret)
+			return ret;
+		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+	}
+	return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+	u8 addr[6];
+	int ret, i, j = 0;
+	struct fw_port_cmd c;
+	struct fw_rss_vi_config_cmd rvc;
+
+	memset(&c, 0, sizeof(c));
+	memset(&rvc, 0, sizeof(rvc));
+
+	for_each_port(adap, i) {
+		unsigned int rss_size;
+		struct port_info *p = adap2pinfo(adap, i);
+
+		while ((adap->params.portvec & (1 << j)) == 0)
+			j++;
+
+		c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+					     FW_CMD_REQUEST_F | FW_CMD_READ_F |
+					     FW_PORT_CMD_PORTID_V(j));
+		c.action_to_len16 = cpu_to_be32(
+			FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+			FW_LEN16(c));
+		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+		if (ret)
+			return ret;
+
+		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+		if (ret < 0)
+			return ret;
+
+		p->viid = ret;
+		p->tx_chan = j;
+		p->lport = j;
+		p->rss_size = rss_size;
+		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+		adap->port[i]->dev_port = j;
+
+		ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+			FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+		p->port_type = FW_PORT_CMD_PTYPE_G(ret);
+		p->mod_type = FW_PORT_MOD_TYPE_NA;
+
+		rvc.op_to_viid =
+			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				    FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+		if (ret)
+			return ret;
+		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+
+		init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
+		j++;
+	}
+	return 0;
+}
+
+/**
+ *	t4_read_cimq_cfg - read CIM queue configuration
+ *	@adap: the adapter
+ *	@base: holds the queue base addresses in bytes
+ *	@size: holds the queue sizes in bytes
+ *	@thres: holds the queue full thresholds in bytes
+ *
+ *	Returns the current configuration of the CIM queues, starting with
+ *	the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+	unsigned int i, v;
+	int cim_num_obq = is_t4(adap->params.chip) ?
+				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+	for (i = 0; i < CIM_NUM_IBQ; i++) {
+		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
+			     QUENUMSELECT_V(i));
+		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+		/* value is in 256-byte units */
+		*base++ = CIMQBASE_G(v) * 256;
+		*size++ = CIMQSIZE_G(v) * 256;
+		*thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
+	}
+	for (i = 0; i < cim_num_obq; i++) {
+		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+			     QUENUMSELECT_V(i));
+		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+		/* value is in 256-byte units */
+		*base++ = CIMQBASE_G(v) * 256;
+		*size++ = CIMQSIZE_G(v) * 256;
+	}
+}
+
+/**
+ *	t4_read_cim_ibq - read the contents of a CIM inbound queue
+ *	@adap: the adapter
+ *	@qid: the queue index
+ *	@data: where to store the queue contents
+ *	@n: capacity of @data in 32-bit words
+ *
+ *	Reads the contents of the selected CIM queue starting at address 0 up
+ *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *	error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+	int i, err, attempts;
+	unsigned int addr;
+	const unsigned int nwords = CIM_IBQ_SIZE * 4;
+
+	if (qid > 5 || (n & 3))
+		return -EINVAL;
+
+	addr = qid * nwords;
+	if (n > nwords)
+		n = nwords;
+
+	/* It might take 3-10ms before the IBQ debug read access is allowed.
+	 * Wait for 1 Sec with a delay of 1 usec.
+	 */
+	attempts = 1000000;
+
+	for (i = 0; i < n; i++, addr++) {
+		t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
+			     IBQDBGEN_F);
+		err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
+				      attempts, 1);
+		if (err)
+			return err;
+		*data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
+	}
+	t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
+	return i;
+}
+
+/**
+ *	t4_read_cim_obq - read the contents of a CIM outbound queue
+ *	@adap: the adapter
+ *	@qid: the queue index
+ *	@data: where to store the queue contents
+ *	@n: capacity of @data in 32-bit words
+ *
+ *	Reads the contents of the selected CIM queue starting at address 0 up
+ *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *	error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+	int i, err;
+	unsigned int addr, v, nwords;
+	int cim_num_obq = is_t4(adap->params.chip) ?
+				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+	if ((qid > (cim_num_obq - 1)) || (n & 3))
+		return -EINVAL;
+
+	t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+		     QUENUMSELECT_V(qid));
+	v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+
+	addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
+	nwords = CIMQSIZE_G(v) * 64;  /* same */
+	if (n > nwords)
+		n = nwords;
+
+	for (i = 0; i < n; i++, addr++) {
+		t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
+			     OBQDBGEN_F);
+		err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
+				      2, 1);
+		if (err)
+			return err;
+		*data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
+	}
+	t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
+	return i;
+}
+
+/**
+ *	t4_cim_read - read a block from CIM internal address space
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM address space
+ *	@n: number of words to read
+ *	@valp: where to store the result
+ *
+ *	Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+		unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+		return -EBUSY;
+
+	for ( ; !ret && n--; addr += 4) {
+		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
+		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+				      0, 5, 2);
+		if (!ret)
+			*valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
+	}
+	return ret;
+}
+
+/**
+ *	t4_cim_write - write a block into CIM internal address space
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM address space
+ *	@n: number of words to write
+ *	@valp: set of values to write
+ *
+ *	Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+		 const unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+		return -EBUSY;
+
+	for ( ; !ret && n--; addr += 4) {
+		t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
+		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
+		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+				      0, 5, 2);
+	}
+	return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr,
+			 unsigned int val)
+{
+	return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ *	t4_cim_read_la - read CIM LA capture buffer
+ *	@adap: the adapter
+ *	@la_buf: where to store the LA data
+ *	@wrptr: the HW write pointer within the capture buffer
+ *
+ *	Reads the contents of the CIM LA buffer with the most recent entry at
+ *	the end	of the returned data and with the entry at @wrptr first.
+ *	We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+	int i, ret;
+	unsigned int cfg, val, idx;
+
+	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+	if (ret)
+		return ret;
+
+	if (cfg & UPDBGLAEN_F) {	/* LA is running, freeze it */
+		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
+		if (ret)
+			return ret;
+	}
+
+	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+	if (ret)
+		goto restart;
+
+	idx = UPDBGLAWRPTR_G(val);
+	if (wrptr)
+		*wrptr = idx;
+
+	for (i = 0; i < adap->params.cim_la_size; i++) {
+		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+				    UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
+		if (ret)
+			break;
+		ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+		if (ret)
+			break;
+		if (val & UPDBGLARDEN_F) {
+			ret = -ETIMEDOUT;
+			break;
+		}
+		ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
+		if (ret)
+			break;
+		idx = (idx + 1) & UPDBGLARDPTR_M;
+	}
+restart:
+	if (cfg & UPDBGLAEN_F) {
+		int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+				      cfg & ~UPDBGLARDEN_F);
+		if (!ret)
+			ret = r;
+	}
+	return ret;
+}
+
+/**
+ *	t4_tp_read_la - read TP LA capture buffer
+ *	@adap: the adapter
+ *	@la_buf: where to store the LA data
+ *	@wrptr: the HW write pointer within the capture buffer
+ *
+ *	Reads the contents of the TP LA buffer with the most recent entry at
+ *	the end	of the returned data and with the entry at @wrptr first.
+ *	We leave the LA in the running state we find it in.
+ */
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
+{
+	bool last_incomplete;
+	unsigned int i, cfg, val, idx;
+
+	cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
+	if (cfg & DBGLAENABLE_F)			/* freeze LA */
+		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
+			     adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
+
+	val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
+	idx = DBGLAWPTR_G(val);
+	last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
+	if (last_incomplete)
+		idx = (idx + 1) & DBGLARPTR_M;
+	if (wrptr)
+		*wrptr = idx;
+
+	val &= 0xffff;
+	val &= ~DBGLARPTR_V(DBGLARPTR_M);
+	val |= adap->params.tp.la_mask;
+
+	for (i = 0; i < TPLA_SIZE; i++) {
+		t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
+		la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
+		idx = (idx + 1) & DBGLARPTR_M;
+	}
+
+	/* Wipe out last entry if it isn't valid */
+	if (last_incomplete)
+		la_buf[TPLA_SIZE - 1] = ~0ULL;
+
+	if (cfg & DBGLAENABLE_F)                    /* restore running state */
+		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
+			     cfg | adap->params.tp.la_mask);
+}
+
+/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ *	@adapter: the adapter
+ *	@idma: the adapter IDMA Monitor state
+ *
+ *	Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+			  struct sge_idma_monitor_state *idma)
+{
+	/* Initialize the state variables for detecting an SGE Ingress DMA
+	 * hang.  The SGE has internal counters which count up on each clock
+	 * tick whenever the SGE finds its Ingress DMA State Engines in the
+	 * same state they were on the previous clock tick.  The clock used is
+	 * the Core Clock so we have a limit on the maximum "time" they can
+	 * record; typically a very small number of seconds.  For instance,
+	 * with a 600MHz Core Clock, we can only count up to a bit more than
+	 * 7s.  So we'll synthesize a larger counter in order to not run the
+	 * risk of having the "timers" overflow and give us the flexibility to
+	 * maintain a Hung SGE State Machine of our own which operates across
+	 * a longer time frame.
+	 */
+	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+	idma->idma_stalled[0] = 0;
+	idma->idma_stalled[1] = 0;
+}
+
+/**
+ *	t4_idma_monitor - monitor SGE Ingress DMA state
+ *	@adapter: the adapter
+ *	@idma: the adapter IDMA Monitor state
+ *	@hz: number of ticks/second
+ *	@ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+		     struct sge_idma_monitor_state *idma,
+		     int hz, int ticks)
+{
+	int i, idma_same_state_cnt[2];
+
+	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
+	  * are counters inside the SGE which count up on each clock when the
+	  * SGE finds its Ingress DMA State Engines in the same states they
+	  * were in the previous clock.  The counters will peg out at
+	  * 0xffffffff without wrapping around so once they pass the 1s
+	  * threshold they'll stay above that till the IDMA state changes.
+	  */
+	t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
+	idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
+	idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+
+	for (i = 0; i < 2; i++) {
+		u32 debug0, debug11;
+
+		/* If the Ingress DMA Same State Counter ("timer") is less
+		 * than 1s, then we can reset our synthesized Stall Timer and
+		 * continue.  If we have previously emitted warnings about a
+		 * potential stalled Ingress Queue, issue a note indicating
+		 * that the Ingress Queue has resumed forward progress.
+		 */
+		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
+				dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
+					 "resumed after %d seconds\n",
+					 i, idma->idma_qid[i],
+					 idma->idma_stalled[i] / hz);
+			idma->idma_stalled[i] = 0;
+			continue;
+		}
+
+		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+		 * domain.  The first time we get here it'll be because we
+		 * passed the 1s Threshold; each additional time it'll be
+		 * because the RX Timer Callback is being fired on its regular
+		 * schedule.
+		 *
+		 * If the stall is below our Potential Hung Ingress Queue
+		 * Warning Threshold, continue.
+		 */
+		if (idma->idma_stalled[i] == 0) {
+			idma->idma_stalled[i] = hz;
+			idma->idma_warn[i] = 0;
+		} else {
+			idma->idma_stalled[i] += ticks;
+			idma->idma_warn[i] -= ticks;
+		}
+
+		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
+			continue;
+
+		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+		 */
+		if (idma->idma_warn[i] > 0)
+			continue;
+		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
+
+		/* Read and save the SGE IDMA State and Queue ID information.
+		 * We do this every time in case it changes across time ...
+		 * can't be too careful ...
+		 */
+		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
+		debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
+		debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+		dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
+			 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+			 i, idma->idma_qid[i], idma->idma_state[i],
+			 idma->idma_stalled[i] / hz,
+			 debug0, debug11);
+		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+	}
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
new file mode 100644
index 0000000..13708fd
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -0,0 +1,271 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+#include <linux/types.h>
+
+enum {
+	NCHAN          = 4,     /* # of HW channels */
+	MAX_MTU        = 9600,  /* max MAC MTU, excluding header + FCS */
+	EEPROMSIZE     = 17408, /* Serial EEPROM physical size */
+	EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
+	EEPROMPFSIZE   = 1024,  /* EEPROM writable area size for PFn, n>0 */
+	RSS_NENTRIES   = 2048,  /* # of entries in RSS mapping table */
+	TCB_SIZE       = 128,   /* TCB size */
+	NMTUS          = 16,    /* size of MTU table */
+	NCCTRL_WIN     = 32,    /* # of congestion control windows */
+	PM_NSTATS      = 5,     /* # of PM stats */
+	MBOX_LEN       = 64,    /* mailbox size in bytes */
+	TRACE_LEN      = 112,   /* length of trace data and mask */
+	FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
+};
+
+enum {
+	CIM_NUM_IBQ    = 6,     /* # of CIM IBQs */
+	CIM_NUM_OBQ    = 6,     /* # of CIM OBQs */
+	CIM_NUM_OBQ_T5 = 8,     /* # of CIM OBQs for T5 adapter */
+	CIMLA_SIZE     = 2048,  /* # of 32-bit words in CIM LA */
+	CIM_PIFLA_SIZE = 64,    /* # of 192-bit words in CIM PIF LA */
+	CIM_MALA_SIZE  = 64,    /* # of 160-bit words in CIM MA LA */
+	CIM_IBQ_SIZE   = 128,   /* # of 128-bit words in a CIM IBQ */
+	CIM_OBQ_SIZE   = 128,   /* # of 128-bit words in a CIM OBQ */
+	TPLA_SIZE      = 128,   /* # of 64-bit words in TP LA */
+	ULPRX_LA_SIZE  = 512,   /* # of 256-bit words in ULP_RX LA */
+};
+
+enum {
+	SF_PAGE_SIZE = 256,           /* serial flash page size */
+	SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
+};
+
+enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
+
+enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV };    /* mailbox owners */
+
+enum {
+	SGE_MAX_WR_LEN = 512,     /* max WR size in bytes */
+	SGE_NTIMERS = 6,          /* # of interrupt holdoff timer values */
+	SGE_NCOUNTERS = 4,        /* # of interrupt packet counter values */
+	SGE_MAX_IQ_SIZE = 65520,
+
+	SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
+	SGE_TIMER_UPD_CIDX = 7,   /* update cidx only */
+
+	SGE_EQ_IDXSIZE = 64,      /* egress queue pidx/cidx unit size */
+
+	SGE_INTRDST_PCI = 0,      /* interrupt destination is PCI-E */
+	SGE_INTRDST_IQ = 1,       /*   destination is an ingress queue */
+
+	SGE_UPDATEDEL_NONE = 0,   /* ingress queue pidx update delivery */
+	SGE_UPDATEDEL_INTR = 1,   /*   interrupt */
+	SGE_UPDATEDEL_STPG = 2,   /*   status page */
+	SGE_UPDATEDEL_BOTH = 3,   /*   interrupt and status page */
+
+	SGE_HOSTFCMODE_NONE = 0,  /* egress queue cidx updates */
+	SGE_HOSTFCMODE_IQ = 1,    /*   sent to ingress queue */
+	SGE_HOSTFCMODE_STPG = 2,  /*   sent to status page */
+	SGE_HOSTFCMODE_BOTH = 3,  /*   ingress queue and status page */
+
+	SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
+	SGE_FETCHBURSTMIN_32B = 1,
+	SGE_FETCHBURSTMIN_64B = 2,
+	SGE_FETCHBURSTMIN_128B = 3,
+
+	SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
+	SGE_FETCHBURSTMAX_128B = 1,
+	SGE_FETCHBURSTMAX_256B = 2,
+	SGE_FETCHBURSTMAX_512B = 3,
+
+	SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
+	SGE_CIDXFLUSHTHRESH_2 = 1,
+	SGE_CIDXFLUSHTHRESH_4 = 2,
+	SGE_CIDXFLUSHTHRESH_8 = 3,
+	SGE_CIDXFLUSHTHRESH_16 = 4,
+	SGE_CIDXFLUSHTHRESH_32 = 5,
+	SGE_CIDXFLUSHTHRESH_64 = 6,
+	SGE_CIDXFLUSHTHRESH_128 = 7,
+
+	SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
+};
+
+/* PCI-e memory window access */
+enum pcie_memwin {
+	MEMWIN_NIC      = 0,
+	MEMWIN_RSVD1    = 1,
+	MEMWIN_RSVD2    = 2,
+	MEMWIN_RDMA     = 3,
+	MEMWIN_RSVD4    = 4,
+	MEMWIN_FOISCSI  = 5,
+	MEMWIN_CSIOSTOR = 6,
+	MEMWIN_RSVD7    = 7,
+};
+
+struct sge_qstat {                /* data written to SGE queue status entries */
+	__be32 qid;
+	__be16 cidx;
+	__be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+	__be32 hdrbuflen_pidx;
+	__be32 pldbuflen_qid;
+	union {
+		u8 type_gen;
+		__be64 last_flit;
+	};
+};
+
+#define RSPD_NEWBUF_S    31
+#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
+#define RSPD_NEWBUF_F    RSPD_NEWBUF_V(1U)
+
+#define RSPD_LEN_S    0
+#define RSPD_LEN_M    0x7fffffff
+#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
+
+#define RSPD_QID_S    RSPD_LEN_S
+#define RSPD_QID_M    RSPD_LEN_M
+#define RSPD_QID_G(x) RSPD_LEN_G(x)
+
+#define RSPD_GEN_S    7
+
+#define RSPD_TYPE_S    4
+#define RSPD_TYPE_M    0x3
+#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define QINTR_CNT_EN_S    0
+#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
+#define QINTR_CNT_EN_F    QINTR_CNT_EN_V(1U)
+
+#define QINTR_TIMER_IDX_S    1
+#define QINTR_TIMER_IDX_M    0x7
+#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
+#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start)	((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs)	((nsecs) * SF_SEC_SIZE)
+
+enum {
+	/*
+	 * Various Expansion-ROM boot images, etc.
+	 */
+	FLASH_EXP_ROM_START_SEC = 0,
+	FLASH_EXP_ROM_NSECS = 6,
+	FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+	FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+	/*
+	 * iSCSI Boot Firmware Table (iBFT) and other driver-related
+	 * parameters ...
+	 */
+	FLASH_IBFT_START_SEC = 6,
+	FLASH_IBFT_NSECS = 1,
+	FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
+	FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+
+	/*
+	 * Boot configuration data.
+	 */
+	FLASH_BOOTCFG_START_SEC = 7,
+	FLASH_BOOTCFG_NSECS = 1,
+	FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
+	FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+
+	/*
+	 * Location of firmware image in FLASH.
+	 */
+	FLASH_FW_START_SEC = 8,
+	FLASH_FW_NSECS = 16,
+	FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+	FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+	/*
+	 * iSCSI persistent/crash information.
+	 */
+	FLASH_ISCSI_CRASH_START_SEC = 29,
+	FLASH_ISCSI_CRASH_NSECS = 1,
+	FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
+	FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+
+	/*
+	 * FCoE persistent/crash information.
+	 */
+	FLASH_FCOE_CRASH_START_SEC = 30,
+	FLASH_FCOE_CRASH_NSECS = 1,
+	FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
+	FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+
+	/*
+	 * Location of Firmware Configuration File in FLASH.  Since the FPGA
+	 * "FLASH" is smaller we need to store the Configuration File in a
+	 * different location -- which will overlap the end of the firmware
+	 * image if firmware ever gets that large ...
+	 */
+	FLASH_CFG_START_SEC = 31,
+	FLASH_CFG_NSECS = 1,
+	FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+	FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+	/* We don't support FLASH devices which can't support the full
+	 * standard set of sections which we need for normal
+	 * operations.
+	 */
+	FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+
+	FLASH_FPGA_CFG_START_SEC = 15,
+	FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
+
+	/*
+	 * Sectors 32-63 are reserved for FLASH failover.
+	 */
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+#define SGE_TIMESTAMP_S 0
+#define SGE_TIMESTAMP_M 0xfffffffffffffffULL
+#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
+#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+
+#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
new file mode 100644
index 0000000..a072d34
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -0,0 +1,1184 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_MSG_H
+#define __T4_MSG_H
+
+#include <linux/types.h>
+
+enum {
+	CPL_PASS_OPEN_REQ     = 0x1,
+	CPL_PASS_ACCEPT_RPL   = 0x2,
+	CPL_ACT_OPEN_REQ      = 0x3,
+	CPL_SET_TCB_FIELD     = 0x5,
+	CPL_GET_TCB           = 0x6,
+	CPL_CLOSE_CON_REQ     = 0x8,
+	CPL_CLOSE_LISTSRV_REQ = 0x9,
+	CPL_ABORT_REQ         = 0xA,
+	CPL_ABORT_RPL         = 0xB,
+	CPL_RX_DATA_ACK       = 0xD,
+	CPL_TX_PKT            = 0xE,
+	CPL_L2T_WRITE_REQ     = 0x12,
+	CPL_TID_RELEASE       = 0x1A,
+
+	CPL_CLOSE_LISTSRV_RPL = 0x20,
+	CPL_L2T_WRITE_RPL     = 0x23,
+	CPL_PASS_OPEN_RPL     = 0x24,
+	CPL_ACT_OPEN_RPL      = 0x25,
+	CPL_PEER_CLOSE        = 0x26,
+	CPL_ABORT_REQ_RSS     = 0x2B,
+	CPL_ABORT_RPL_RSS     = 0x2D,
+
+	CPL_CLOSE_CON_RPL     = 0x32,
+	CPL_ISCSI_HDR         = 0x33,
+	CPL_RDMA_CQE          = 0x35,
+	CPL_RDMA_CQE_READ_RSP = 0x36,
+	CPL_RDMA_CQE_ERR      = 0x37,
+	CPL_RX_DATA           = 0x39,
+	CPL_SET_TCB_RPL       = 0x3A,
+	CPL_RX_PKT            = 0x3B,
+	CPL_RX_DDP_COMPLETE   = 0x3F,
+
+	CPL_ACT_ESTABLISH     = 0x40,
+	CPL_PASS_ESTABLISH    = 0x41,
+	CPL_RX_DATA_DDP       = 0x42,
+	CPL_PASS_ACCEPT_REQ   = 0x44,
+	CPL_TRACE_PKT_T5      = 0x48,
+	CPL_RX_ISCSI_DDP      = 0x49,
+
+	CPL_RDMA_READ_REQ     = 0x60,
+
+	CPL_PASS_OPEN_REQ6    = 0x81,
+	CPL_ACT_OPEN_REQ6     = 0x83,
+
+	CPL_RDMA_TERMINATE    = 0xA2,
+	CPL_RDMA_WRITE        = 0xA4,
+	CPL_SGE_EGR_UPDATE    = 0xA5,
+
+	CPL_TRACE_PKT         = 0xB0,
+	CPL_ISCSI_DATA	      = 0xB2,
+
+	CPL_FW4_MSG           = 0xC0,
+	CPL_FW4_PLD           = 0xC1,
+	CPL_FW4_ACK           = 0xC3,
+
+	CPL_FW6_MSG           = 0xE0,
+	CPL_FW6_PLD           = 0xE1,
+	CPL_TX_PKT_LSO        = 0xED,
+	CPL_TX_PKT_XT         = 0xEE,
+
+	NUM_CPL_CMDS
+};
+
+enum CPL_error {
+	CPL_ERR_NONE               = 0,
+	CPL_ERR_TCAM_FULL          = 3,
+	CPL_ERR_BAD_LENGTH         = 15,
+	CPL_ERR_BAD_ROUTE          = 18,
+	CPL_ERR_CONN_RESET         = 20,
+	CPL_ERR_CONN_EXIST_SYNRECV = 21,
+	CPL_ERR_CONN_EXIST         = 22,
+	CPL_ERR_ARP_MISS           = 23,
+	CPL_ERR_BAD_SYN            = 24,
+	CPL_ERR_CONN_TIMEDOUT      = 30,
+	CPL_ERR_XMIT_TIMEDOUT      = 31,
+	CPL_ERR_PERSIST_TIMEDOUT   = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT  = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_RTX_NEG_ADVICE     = 35,
+	CPL_ERR_PERSIST_NEG_ADVICE = 36,
+	CPL_ERR_KEEPALV_NEG_ADVICE = 37,
+	CPL_ERR_ABORT_FAILED       = 42,
+	CPL_ERR_IWARP_FLM          = 50,
+};
+
+enum {
+	CPL_CONN_POLICY_AUTO = 0,
+	CPL_CONN_POLICY_ASK  = 1,
+	CPL_CONN_POLICY_FILTER = 2,
+	CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+	ULP_MODE_NONE          = 0,
+	ULP_MODE_ISCSI         = 2,
+	ULP_MODE_RDMA          = 4,
+	ULP_MODE_TCPDDP	       = 5,
+	ULP_MODE_FCOE          = 6,
+};
+
+enum {
+	ULP_CRC_HEADER = 1 << 0,
+	ULP_CRC_DATA   = 1 << 1
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+};
+
+enum {                     /* TX_PKT_XT checksum types */
+	TX_CSUM_TCP    = 0,
+	TX_CSUM_UDP    = 1,
+	TX_CSUM_CRC16  = 4,
+	TX_CSUM_CRC32  = 5,
+	TX_CSUM_CRC32C = 6,
+	TX_CSUM_FCOE   = 7,
+	TX_CSUM_TCPIP  = 8,
+	TX_CSUM_UDPIP  = 9,
+	TX_CSUM_TCPIP6 = 10,
+	TX_CSUM_UDPIP6 = 11,
+	TX_CSUM_IP     = 12,
+};
+
+union opcode_tid {
+	__be32 opcode_tid;
+	u8 opcode;
+};
+
+#define CPL_OPCODE_S    24
+#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
+#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
+#define TID_G(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define TID_TID_S    0
+#define TID_TID_M    0x3fff
+#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
+
+#define TID_QID_S    14
+#define TID_QID_M    0x3ff
+#define TID_QID_V(x) ((x) << TID_QID_S)
+#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
+
+struct rss_header {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 channel:2;
+	u8 filter_hit:1;
+	u8 filter_tid:1;
+	u8 hash_type:2;
+	u8 ipv6:1;
+	u8 send2fw:1;
+#else
+	u8 send2fw:1;
+	u8 ipv6:1;
+	u8 hash_type:2;
+	u8 filter_tid:1;
+	u8 filter_hit:1;
+	u8 channel:2;
+#endif
+	__be16 qid;
+	__be32 hash_val;
+};
+
+struct work_request_hdr {
+	__be32 wr_hi;
+	__be32 wr_mid;
+	__be64 wr_lo;
+};
+
+/* wr_hi fields */
+#define WR_OP_S    24
+#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
+
+#define WR_HDR struct work_request_hdr wr
+
+/* option 0 fields */
+#define TX_CHAN_S    2
+#define TX_CHAN_V(x) ((x) << TX_CHAN_S)
+
+#define ULP_MODE_S    8
+#define ULP_MODE_V(x) ((x) << ULP_MODE_S)
+
+#define RCV_BUFSIZ_S    12
+#define RCV_BUFSIZ_M    0x3FFU
+#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S)
+
+#define SMAC_SEL_S    28
+#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S)
+
+#define L2T_IDX_S    36
+#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S)
+
+#define WND_SCALE_S    50
+#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S)
+
+#define KEEP_ALIVE_S    54
+#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S)
+#define KEEP_ALIVE_F    KEEP_ALIVE_V(1ULL)
+
+#define MSS_IDX_S    60
+#define MSS_IDX_M    0xF
+#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S)
+#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M)
+
+/* option 2 fields */
+#define RSS_QUEUE_S    0
+#define RSS_QUEUE_M    0x3FF
+#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S)
+#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M)
+
+#define RSS_QUEUE_VALID_S    10
+#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S)
+#define RSS_QUEUE_VALID_F    RSS_QUEUE_VALID_V(1U)
+
+#define RX_FC_DISABLE_S    20
+#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S)
+#define RX_FC_DISABLE_F    RX_FC_DISABLE_V(1U)
+
+#define RX_FC_VALID_S    22
+#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S)
+#define RX_FC_VALID_F    RX_FC_VALID_V(1U)
+
+#define RX_CHANNEL_S    26
+#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S)
+
+#define WND_SCALE_EN_S    28
+#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S)
+#define WND_SCALE_EN_F    WND_SCALE_EN_V(1U)
+
+#define T5_OPT_2_VALID_S    31
+#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S)
+#define T5_OPT_2_VALID_F    T5_OPT_2_VALID_V(1U)
+
+struct cpl_pass_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be64 opt1;
+};
+
+/* option 0 fields */
+#define NO_CONG_S    4
+#define NO_CONG_V(x) ((x) << NO_CONG_S)
+#define NO_CONG_F    NO_CONG_V(1U)
+
+#define DELACK_S    5
+#define DELACK_V(x) ((x) << DELACK_S)
+#define DELACK_F    DELACK_V(1U)
+
+#define DSCP_S    22
+#define DSCP_M    0x3F
+#define DSCP_V(x) ((x) << DSCP_S)
+#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
+
+#define TCAM_BYPASS_S    48
+#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
+#define TCAM_BYPASS_F    TCAM_BYPASS_V(1ULL)
+
+#define NAGLE_S    49
+#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
+#define NAGLE_F    NAGLE_V(1ULL)
+
+/* option 1 fields */
+#define SYN_RSS_ENABLE_S    0
+#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
+#define SYN_RSS_ENABLE_F    SYN_RSS_ENABLE_V(1U)
+
+#define SYN_RSS_QUEUE_S    2
+#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
+
+#define CONN_POLICY_S    22
+#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
+
+struct cpl_pass_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be64 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+	__be64 opt0;
+};
+
+/* option 2 fields */
+#define RX_COALESCE_VALID_S    11
+#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
+#define RX_COALESCE_VALID_F    RX_COALESCE_VALID_V(1U)
+
+#define RX_COALESCE_S    12
+#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
+
+#define PACE_S    16
+#define PACE_V(x) ((x) << PACE_S)
+
+#define TX_QUEUE_S    23
+#define TX_QUEUE_M    0x7
+#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
+#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
+
+#define CCTRL_ECN_S    27
+#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
+#define CCTRL_ECN_F    CCTRL_ECN_V(1U)
+
+#define TSTAMPS_EN_S    29
+#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
+#define TSTAMPS_EN_F    TSTAMPS_EN_V(1U)
+
+#define SACK_EN_S    30
+#define SACK_EN_V(x) ((x) << SACK_EN_S)
+#define SACK_EN_F    SACK_EN_V(1U)
+
+struct cpl_t5_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+	__be64 opt0;
+	__be32 iss;
+	__be32 rsvd;
+};
+
+struct cpl_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be32 params;
+	__be32 opt2;
+};
+
+#define FILTER_TUPLE_S  24
+#define FILTER_TUPLE_M  0xFFFFFFFFFF
+#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S)
+#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M)
+struct cpl_t5_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be32 rsvd;
+	__be32 opt2;
+	__be64 params;
+};
+
+struct cpl_t6_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be32 rsvd;
+	__be32 opt2;
+	__be64 params;
+	__be32 rsvd2;
+	__be32 opt3;
+};
+
+struct cpl_act_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be32 params;
+	__be32 opt2;
+};
+
+struct cpl_t5_act_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be32 rsvd;
+	__be32 opt2;
+	__be64 params;
+};
+
+struct cpl_t6_act_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be32 rsvd;
+	__be32 opt2;
+	__be64 params;
+	__be32 rsvd2;
+	__be32 opt3;
+};
+
+struct cpl_act_open_rpl {
+	union opcode_tid ot;
+	__be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define AOPEN_STATUS_S    0
+#define AOPEN_STATUS_M    0xFF
+#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
+
+#define AOPEN_ATID_S    8
+#define AOPEN_ATID_M    0xFFFFFF
+#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
+
+struct cpl_pass_establish {
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 tos_stid;
+	__be16 mac_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_stid fields */
+#define PASS_OPEN_TID_S    0
+#define PASS_OPEN_TID_M    0xFFFFFF
+#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
+#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
+
+#define PASS_OPEN_TOS_S    24
+#define PASS_OPEN_TOS_M    0xFF
+#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
+#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define TCPOPT_WSCALE_OK_S	5
+#define TCPOPT_WSCALE_OK_M	0x1
+#define TCPOPT_WSCALE_OK_G(x)	\
+	(((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
+
+#define TCPOPT_SACK_S		6
+#define TCPOPT_SACK_M		0x1
+#define TCPOPT_SACK_G(x)	(((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
+
+#define TCPOPT_TSTAMP_S		7
+#define TCPOPT_TSTAMP_M		0x1
+#define TCPOPT_TSTAMP_G(x)	(((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
+
+#define TCPOPT_SND_WSCALE_S	8
+#define TCPOPT_SND_WSCALE_M	0xF
+#define TCPOPT_SND_WSCALE_G(x)	\
+	(((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
+
+#define TCPOPT_MSS_S	12
+#define TCPOPT_MSS_M	0xF
+#define TCPOPT_MSS_G(x)	(((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
+
+#define T6_TCP_HDR_LEN_S   8
+#define T6_TCP_HDR_LEN_V(x) ((x) << T6_TCP_HDR_LEN_S)
+#define T6_TCP_HDR_LEN_G(x) (((x) >> T6_TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
+
+#define T6_IP_HDR_LEN_S    14
+#define T6_IP_HDR_LEN_V(x) ((x) << T6_IP_HDR_LEN_S)
+#define T6_IP_HDR_LEN_G(x) (((x) >> T6_IP_HDR_LEN_S) & IP_HDR_LEN_M)
+
+#define T6_ETH_HDR_LEN_S    24
+#define T6_ETH_HDR_LEN_M    0xFF
+#define T6_ETH_HDR_LEN_V(x) ((x) << T6_ETH_HDR_LEN_S)
+#define T6_ETH_HDR_LEN_G(x) (((x) >> T6_ETH_HDR_LEN_S) & T6_ETH_HDR_LEN_M)
+
+struct cpl_act_establish {
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 tos_atid;
+	__be16 mac_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 cookie;
+};
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define QUEUENO_S    0
+#define QUEUENO_V(x) ((x) << QUEUENO_S)
+
+#define REPLY_CHAN_S    14
+#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
+#define REPLY_CHAN_F    REPLY_CHAN_V(1U)
+
+#define NO_REPLY_S    15
+#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
+#define NO_REPLY_F    NO_REPLY_V(1U)
+
+struct cpl_set_tcb_field {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 word_cookie;
+	__be64 mask;
+	__be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define TCB_WORD_S    0
+#define TCB_WORD(x)   ((x) << TCB_WORD_S)
+
+#define TCB_COOKIE_S    5
+#define TCB_COOKIE_M    0x7
+#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
+#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
+
+struct cpl_set_tcb_rpl {
+	union opcode_tid ot;
+	__be16 rsvd;
+	u8 cookie;
+	u8 status;
+	__be64 oldval;
+};
+
+struct cpl_close_con_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+	__be32 snd_nxt;
+	__be32 rcv_nxt;
+};
+
+struct cpl_close_listsvr_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 rsvd;
+};
+
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define LISTSVR_IPV6_S    14
+#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
+#define LISTSVR_IPV6_F    LISTSVR_IPV6_V(1U)
+
+struct cpl_close_listsvr_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_req_rss {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	u8 rsvd1;
+	u8 cmd;
+	u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	u8 rsvd1;
+	u8 cmd;
+	u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+	union opcode_tid ot;
+	__be32 rcv_nxt;
+};
+
+struct cpl_tid_release {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_tx_pkt_core {
+	__be32 ctrl0;
+	__be16 pack;
+	__be16 len;
+	__be64 ctrl1;
+};
+
+struct cpl_tx_pkt {
+	WR_HDR;
+	struct cpl_tx_pkt_core c;
+};
+
+#define cpl_tx_pkt_xt cpl_tx_pkt
+
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S    0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S    8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S    11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F    TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S    12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_T5_OVLAN_IDX_S	12
+#define TXPKT_T5_OVLAN_IDX_V(x)	((x) << TXPKT_T5_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S    16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S    21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F    TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S    24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S    12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S    20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S    20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S    30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S    34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define T6_TXPKT_ETHHDR_LEN_S    32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S    40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S    44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S    60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F    TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S    62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F    TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S    63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F    TXPKT_L4CSUM_DIS_V(1ULL)
+
+struct cpl_tx_pkt_lso_core {
+	__be32 lso_ctrl;
+	__be16 ipid_ofst;
+	__be16 mss;
+	__be32 seqno_offset;
+	__be32 len;
+	/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define LSO_TCPHDR_LEN_S    0
+#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
+
+#define LSO_IPHDR_LEN_S    4
+#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
+
+#define LSO_ETHHDR_LEN_S    16
+#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
+
+#define LSO_IPV6_S    20
+#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
+#define LSO_IPV6_F    LSO_IPV6_V(1U)
+
+#define LSO_LAST_SLICE_S    22
+#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
+#define LSO_LAST_SLICE_F    LSO_LAST_SLICE_V(1U)
+
+#define LSO_FIRST_SLICE_S    23
+#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
+#define LSO_FIRST_SLICE_F    LSO_FIRST_SLICE_V(1U)
+
+#define LSO_OPCODE_S    24
+#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
+
+#define LSO_T5_XFER_SIZE_S	   0
+#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
+
+struct cpl_tx_pkt_lso {
+	WR_HDR;
+	struct cpl_tx_pkt_lso_core c;
+	/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_iscsi_hdr {
+	union opcode_tid ot;
+	__be16 pdu_len_ddp;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	u8 rsvd;
+	u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define ISCSI_PDU_LEN_S    0
+#define ISCSI_PDU_LEN_M    0x7FFF
+#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
+#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
+
+#define ISCSI_DDP_S    15
+#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
+#define ISCSI_DDP_F    ISCSI_DDP_V(1U)
+
+struct cpl_rx_data {
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 dack_mode:2;
+	u8 psh:1;
+	u8 heartbeat:1;
+	u8 ddp_off:1;
+	u8 :3;
+#else
+	u8 :3;
+	u8 ddp_off:1;
+	u8 heartbeat:1;
+	u8 psh:1;
+	u8 dack_mode:2;
+#endif
+	u8 status;
+};
+
+struct cpl_rx_data_ack {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define RX_CREDITS_S    0
+#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S)
+
+#define RX_FORCE_ACK_S    28
+#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
+#define RX_FORCE_ACK_F    RX_FORCE_ACK_V(1U)
+
+struct cpl_rx_pkt {
+	struct rss_header rsshdr;
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 csum_calc:1;
+	u8 ipmi_pkt:1;
+	u8 vlan_ex:1;
+	u8 ip_frag:1;
+#else
+	u8 ip_frag:1;
+	u8 vlan_ex:1;
+	u8 ipmi_pkt:1;
+	u8 csum_calc:1;
+	u8 iff:4;
+#endif
+	__be16 csum;
+	__be16 vlan;
+	__be16 len;
+	__be32 l2info;
+	__be16 hdr_len;
+	__be16 err_vec;
+};
+
+#define RX_T6_ETHHDR_LEN_M    0xFF
+#define RX_T6_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_T6_ETHHDR_LEN_M)
+
+#define RXF_PSH_S    20
+#define RXF_PSH_V(x) ((x) << RXF_PSH_S)
+#define RXF_PSH_F    RXF_PSH_V(1U)
+
+#define RXF_SYN_S    21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F    RXF_SYN_V(1U)
+
+#define RXF_UDP_S    22
+#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
+#define RXF_UDP_F    RXF_UDP_V(1U)
+
+#define RXF_TCP_S    23
+#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
+#define RXF_TCP_F    RXF_TCP_V(1U)
+
+#define RXF_IP_S    24
+#define RXF_IP_V(x) ((x) << RXF_IP_S)
+#define RXF_IP_F    RXF_IP_V(1U)
+
+#define RXF_IP6_S    25
+#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
+#define RXF_IP6_F    RXF_IP6_V(1U)
+
+#define RXF_SYN_COOKIE_S    26
+#define RXF_SYN_COOKIE_V(x) ((x) << RXF_SYN_COOKIE_S)
+#define RXF_SYN_COOKIE_F    RXF_SYN_COOKIE_V(1U)
+
+#define RXF_FCOE_S    26
+#define RXF_FCOE_V(x) ((x) << RXF_FCOE_S)
+#define RXF_FCOE_F    RXF_FCOE_V(1U)
+
+#define RXF_LRO_S    27
+#define RXF_LRO_V(x) ((x) << RXF_LRO_S)
+#define RXF_LRO_F    RXF_LRO_V(1U)
+
+/* rx_pkt.l2info fields */
+#define RX_ETHHDR_LEN_S    0
+#define RX_ETHHDR_LEN_M    0x1F
+#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
+#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
+
+#define RX_T5_ETHHDR_LEN_S    0
+#define RX_T5_ETHHDR_LEN_M    0x3F
+#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
+#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
+
+#define RX_MACIDX_S    8
+#define RX_MACIDX_M    0x1FF
+#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
+#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
+
+#define RXF_SYN_S    21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F    RXF_SYN_V(1U)
+
+#define RX_CHAN_S    28
+#define RX_CHAN_M    0xF
+#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
+#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
+
+/* rx_pkt.hdr_len fields */
+#define RX_TCPHDR_LEN_S    0
+#define RX_TCPHDR_LEN_M    0x3F
+#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
+#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
+
+#define RX_IPHDR_LEN_S    6
+#define RX_IPHDR_LEN_M    0x3FF
+#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
+#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
+
+/* rx_pkt.err_vec fields */
+#define RXERR_CSUM_S    13
+#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S)
+#define RXERR_CSUM_F    RXERR_CSUM_V(1U)
+
+struct cpl_trace_pkt {
+	u8 opcode;
+	u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 runt:4;
+	u8 filter_hit:4;
+	u8 :6;
+	u8 err:1;
+	u8 trunc:1;
+#else
+	u8 filter_hit:4;
+	u8 runt:4;
+	u8 trunc:1;
+	u8 err:1;
+	u8 :6;
+#endif
+	__be16 rsvd;
+	__be16 len;
+	__be64 tstamp;
+};
+
+struct cpl_t5_trace_pkt {
+	__u8 opcode;
+	__u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u8 runt:4;
+	__u8 filter_hit:4;
+	__u8:6;
+	__u8 err:1;
+	__u8 trunc:1;
+#else
+	__u8 filter_hit:4;
+	__u8 runt:4;
+	__u8 trunc:1;
+	__u8 err:1;
+	__u8:6;
+#endif
+	__be16 rsvd;
+	__be16 len;
+	__be64 tstamp;
+	__be64 rsvd1;
+};
+
+struct cpl_l2t_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 params;
+	__be16 l2t_idx;
+	__be16 vlan;
+	u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define L2T_W_INFO_S    2
+#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
+
+#define L2T_W_PORT_S    8
+#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
+
+#define L2T_W_NOREPLY_S    15
+#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
+#define L2T_W_NOREPLY_F    L2T_W_NOREPLY_V(1U)
+
+struct cpl_l2t_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_rdma_terminate {
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+};
+
+struct cpl_sge_egr_update {
+	__be32 opcode_qid;
+	__be16 cidx;
+	__be16 pidx;
+};
+
+/* cpl_sge_egr_update.ot fields */
+#define EGR_QID_S    0
+#define EGR_QID_M    0x1FFFF
+#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
+
+/* cpl_fw*.type values */
+enum {
+	FW_TYPE_CMD_RPL = 0,
+	FW_TYPE_WR_RPL = 1,
+	FW_TYPE_CQE = 2,
+	FW_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+	FW_TYPE_RSSCPL = 4,
+};
+
+struct cpl_fw4_pld {
+	u8 opcode;
+	u8 rsvd0[3];
+	u8 type;
+	u8 rsvd1;
+	__be16 len;
+	__be64 data;
+	__be64 rsvd2;
+};
+
+struct cpl_fw6_pld {
+	u8 opcode;
+	u8 rsvd[5];
+	__be16 len;
+	__be64 data[4];
+};
+
+struct cpl_fw4_msg {
+	u8 opcode;
+	u8 type;
+	__be16 rsvd0;
+	__be32 rsvd1;
+	__be64 data[2];
+};
+
+struct cpl_fw4_ack {
+	union opcode_tid ot;
+	u8 credits;
+	u8 rsvd0[2];
+	u8 seq_vld;
+	__be32 snd_nxt;
+	__be32 snd_una;
+	__be64 rsvd1;
+};
+
+struct cpl_fw6_msg {
+	u8 opcode;
+	u8 type;
+	__be16 rsvd0;
+	__be32 rsvd1;
+	__be64 data[4];
+};
+
+/* cpl_fw6_msg.type values */
+enum {
+	FW6_TYPE_CMD_RPL = 0,
+	FW6_TYPE_WR_RPL = 1,
+	FW6_TYPE_CQE = 2,
+	FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+	FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
+};
+
+struct cpl_fw6_msg_ofld_connection_wr_rpl {
+	__u64   cookie;
+	__be32  tid;    /* or atid in case of active failure */
+	__u8    t_state;
+	__u8    retval;
+	__u8    rsvd[2];
+};
+
+enum {
+	ULP_TX_MEM_READ = 2,
+	ULP_TX_MEM_WRITE = 3,
+	ULP_TX_PKT = 4
+};
+
+enum {
+	ULP_TX_SC_NOOP = 0x80,
+	ULP_TX_SC_IMM  = 0x81,
+	ULP_TX_SC_DSGL = 0x82,
+	ULP_TX_SC_ISGL = 0x83
+};
+
+#define ULPTX_CMD_S    24
+#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
+
+struct ulptx_sge_pair {
+	__be32 len[2];
+	__be64 addr[2];
+};
+
+struct ulptx_sgl {
+	__be32 cmd_nsge;
+	__be32 len0;
+	__be64 addr0;
+	struct ulptx_sge_pair sge[0];
+};
+
+#define ULPTX_NSGE_S    0
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+
+#define ULPTX_MORE_S	23
+#define ULPTX_MORE_V(x)	((x) << ULPTX_MORE_S)
+#define ULPTX_MORE_F	ULPTX_MORE_V(1U)
+
+struct ulp_mem_io {
+	WR_HDR;
+	__be32 cmd;
+	__be32 len16;             /* command length */
+	__be32 dlen;              /* data length in 32-byte units */
+	__be32 lock_addr;
+};
+
+#define ULP_MEMIO_LOCK_S    31
+#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
+#define ULP_MEMIO_LOCK_F    ULP_MEMIO_LOCK_V(1U)
+
+/* additional ulp_mem_io.cmd fields */
+#define ULP_MEMIO_ORDER_S    23
+#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
+#define ULP_MEMIO_ORDER_F    ULP_MEMIO_ORDER_V(1U)
+
+#define T5_ULP_MEMIO_IMM_S    23
+#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
+#define T5_ULP_MEMIO_IMM_F    T5_ULP_MEMIO_IMM_V(1U)
+
+#define T5_ULP_MEMIO_ORDER_S    22
+#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
+#define T5_ULP_MEMIO_ORDER_F    T5_ULP_MEMIO_ORDER_V(1U)
+
+/* ulp_mem_io.lock_addr fields */
+#define ULP_MEMIO_ADDR_S    0
+#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
+
+/* ulp_mem_io.dlen fields */
+#define ULP_MEMIO_DATA_LEN_S    0
+#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
+
+#endif  /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
new file mode 100644
index 0000000..03ed00c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -0,0 +1,183 @@
+/*
+ * This file is part of the Chelsio T4/T5 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/* The code can defined cpp macros for creating a PCI Device ID Table. This is
+ * useful because it allows the PCI ID Table to be maintained in a single place.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ *   -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ *   -- The PCI Function Number to use in the PCI Device ID Table.  "0"
+ *   -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ *   -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ *   -- If defined, create a PCI Device ID Table with both
+ *   -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ *   -- Used for the individual PCI Device ID entries.  Note that we will
+ *   -- be adding a trailing comma (",") after all of the entries (and
+ *   -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ *   -- Used to finish the definition of the PCI ID Table.  Note that we
+ *   -- will be adding a trailing semi-colon (";") here.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ *   V  = "4" for T4; "5" for T5, etc.
+ *   F  = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ *   PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+	CH_PCI_ID_TABLE_ENTRY((devid) | \
+			      ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+	CH_PCI_ID_TABLE_ENTRY((devid) | \
+			      ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+	CH_PCI_ID_TABLE_ENTRY((devid) | \
+			      ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+	/* T4 adapters:
+	 */
+	CH_PCI_ID_TABLE_FENTRY(0x4000),	/* T440-dbg */
+	CH_PCI_ID_TABLE_FENTRY(0x4001),	/* T420-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4002),	/* T422-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4003),	/* T440-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4004),	/* T420-bch */
+	CH_PCI_ID_TABLE_FENTRY(0x4005),	/* T440-bch */
+	CH_PCI_ID_TABLE_FENTRY(0x4006),	/* T440-ch */
+	CH_PCI_ID_TABLE_FENTRY(0x4007),	/* T420-so */
+	CH_PCI_ID_TABLE_FENTRY(0x4008),	/* T420-cx */
+	CH_PCI_ID_TABLE_FENTRY(0x4009),	/* T420-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x400a),	/* T404-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x400b),	/* B420-sr */
+	CH_PCI_ID_TABLE_FENTRY(0x400c),	/* B404-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x400d),	/* T480-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x400e),	/* T440-LP-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4080),	/* Custom T480-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4081),	/* Custom T440-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4082),	/* Custom T420-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4083),	/* Custom T420-xaui */
+	CH_PCI_ID_TABLE_FENTRY(0x4084),	/* Custom T440-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4085),	/* Custom T420-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4086),	/* Custom T440-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x4087),	/* Custom T440-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x4088),	/* Custom T440 2-xaui, 2-xfi */
+
+	/* T5 adapters:
+	 */
+	CH_PCI_ID_TABLE_FENTRY(0x5000),	/* T580-dbg */
+	CH_PCI_ID_TABLE_FENTRY(0x5001),	/* T520-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5002),	/* T522-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5003),	/* T540-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5004),	/* T520-bch */
+	CH_PCI_ID_TABLE_FENTRY(0x5005),	/* T540-bch */
+	CH_PCI_ID_TABLE_FENTRY(0x5006),	/* T540-ch */
+	CH_PCI_ID_TABLE_FENTRY(0x5007),	/* T520-so */
+	CH_PCI_ID_TABLE_FENTRY(0x5008),	/* T520-cx */
+	CH_PCI_ID_TABLE_FENTRY(0x5009),	/* T520-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x500a),	/* T504-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x500b),	/* B520-sr */
+	CH_PCI_ID_TABLE_FENTRY(0x500c),	/* B504-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x500d),	/* T580-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x500e),	/* T540-LP-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5010),	/* T580-LP-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5011),	/* T520-LL-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5012),	/* T560-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5013),	/* T580-chr */
+	CH_PCI_ID_TABLE_FENTRY(0x5014),	/* T580-so */
+	CH_PCI_ID_TABLE_FENTRY(0x5015),	/* T502-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x5016),	/* T580-OCP-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5017),	/* T520-OCP-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5080),	/* Custom T540-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5081),	/* Custom T540-LL-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5082),	/* Custom T504-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5083),	/* Custom T540-LP-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5084),	/* Custom T580-cr */
+	CH_PCI_ID_TABLE_FENTRY(0x5085),	/* Custom 3x T580-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5086),	/* Custom 2x T580-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5087),	/* Custom T580-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5088),	/* Custom T570-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5089),	/* Custom T520-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5090),	/* Custom T540-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5091),	/* Custom T522-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5092),	/* Custom T520-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5093),	/* Custom T580-LP-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5094),	/* Custom T540-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5095),	/* Custom T540-CR-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5096),	/* Custom T580-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x5097),	/* Custom T520-KR */
+
+	/* T6 adapters:
+	 */
+	CH_PCI_ID_TABLE_FENTRY(0x6001),
+	CH_PCI_ID_TABLE_FENTRY(0x6002),
+	CH_PCI_ID_TABLE_FENTRY(0x6003),
+	CH_PCI_ID_TABLE_FENTRY(0x6004),
+	CH_PCI_ID_TABLE_FENTRY(0x6005),
+	CH_PCI_ID_TABLE_FENTRY(0x6006),
+	CH_PCI_ID_TABLE_FENTRY(0x6007),
+	CH_PCI_ID_TABLE_FENTRY(0x6009),
+	CH_PCI_ID_TABLE_FENTRY(0x600d),
+	CH_PCI_ID_TABLE_FENTRY(0x6010),
+	CH_PCI_ID_TABLE_FENTRY(0x6011),
+	CH_PCI_ID_TABLE_FENTRY(0x6014),
+	CH_PCI_ID_TABLE_FENTRY(0x6015),
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
new file mode 100644
index 0000000..fc3044c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -0,0 +1,3094 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_REGS_H
+#define __T4_REGS_H
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
+#define SGE_PF_KDOORBELL_A 0x0
+
+#define QID_S    15
+#define QID_V(x) ((x) << QID_S)
+
+#define DBPRIO_S    14
+#define DBPRIO_V(x) ((x) << DBPRIO_S)
+#define DBPRIO_F    DBPRIO_V(1U)
+
+#define PIDX_S    0
+#define PIDX_V(x) ((x) << PIDX_S)
+
+#define SGE_VF_KDOORBELL_A 0x0
+
+#define DBTYPE_S    13
+#define DBTYPE_V(x) ((x) << DBTYPE_S)
+#define DBTYPE_F    DBTYPE_V(1U)
+
+#define PIDX_T5_S    0
+#define PIDX_T5_M    0x1fffU
+#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
+#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
+
+#define SGE_PF_GTS_A 0x4
+
+#define INGRESSQID_S    16
+#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
+
+#define TIMERREG_S    13
+#define TIMERREG_V(x) ((x) << TIMERREG_S)
+
+#define SEINTARM_S    12
+#define SEINTARM_V(x) ((x) << SEINTARM_S)
+
+#define CIDXINC_S    0
+#define CIDXINC_M    0xfffU
+#define CIDXINC_V(x) ((x) << CIDXINC_S)
+
+#define SGE_CONTROL_A	0x1008
+#define SGE_CONTROL2_A	0x1124
+
+#define RXPKTCPLMODE_S    18
+#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
+#define RXPKTCPLMODE_F    RXPKTCPLMODE_V(1U)
+
+#define EGRSTATUSPAGESIZE_S    17
+#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
+#define EGRSTATUSPAGESIZE_F    EGRSTATUSPAGESIZE_V(1U)
+
+#define PKTSHIFT_S    10
+#define PKTSHIFT_M    0x7U
+#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
+#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
+
+#define INGPCIEBOUNDARY_S    7
+#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
+
+#define INGPADBOUNDARY_S    4
+#define INGPADBOUNDARY_M    0x7U
+#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
+#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
+
+#define EGRPCIEBOUNDARY_S    1
+#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
+
+#define  INGPACKBOUNDARY_S	16
+#define  INGPACKBOUNDARY_M	0x7U
+#define  INGPACKBOUNDARY_V(x)	((x) << INGPACKBOUNDARY_S)
+#define  INGPACKBOUNDARY_G(x)	(((x) >> INGPACKBOUNDARY_S) \
+				 & INGPACKBOUNDARY_M)
+
+#define VFIFO_ENABLE_S    10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F    VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S    6
+#define DBVFIFO_SIZE_M    0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S    0
+#define T6_DBVFIFO_SIZE_M    0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
+#define GLOBALENABLE_S    0
+#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
+#define GLOBALENABLE_F    GLOBALENABLE_V(1U)
+
+#define SGE_HOST_PAGE_SIZE_A 0x100c
+
+#define HOSTPAGESIZEPF7_S    28
+#define HOSTPAGESIZEPF7_M    0xfU
+#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
+#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
+
+#define HOSTPAGESIZEPF6_S    24
+#define HOSTPAGESIZEPF6_M    0xfU
+#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
+#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
+
+#define HOSTPAGESIZEPF5_S    20
+#define HOSTPAGESIZEPF5_M    0xfU
+#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
+#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
+
+#define HOSTPAGESIZEPF4_S    16
+#define HOSTPAGESIZEPF4_M    0xfU
+#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
+#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
+
+#define HOSTPAGESIZEPF3_S    12
+#define HOSTPAGESIZEPF3_M    0xfU
+#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
+#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
+
+#define HOSTPAGESIZEPF2_S    8
+#define HOSTPAGESIZEPF2_M    0xfU
+#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
+#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
+
+#define HOSTPAGESIZEPF1_S    4
+#define HOSTPAGESIZEPF1_M    0xfU
+#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
+#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
+
+#define HOSTPAGESIZEPF0_S    0
+#define HOSTPAGESIZEPF0_M    0xfU
+#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+
+#define QUEUESPERPAGEPF1_S    4
+
+#define QUEUESPERPAGEPF0_S    0
+#define QUEUESPERPAGEPF0_M    0xfU
+#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
+#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
+
+#define SGE_INT_CAUSE1_A	0x1024
+#define SGE_INT_CAUSE2_A	0x1030
+#define SGE_INT_CAUSE3_A	0x103c
+
+#define ERR_FLM_DBP_S    31
+#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
+#define ERR_FLM_DBP_F    ERR_FLM_DBP_V(1U)
+
+#define ERR_FLM_IDMA1_S    30
+#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
+#define ERR_FLM_IDMA1_F    ERR_FLM_IDMA1_V(1U)
+
+#define ERR_FLM_IDMA0_S    29
+#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
+#define ERR_FLM_IDMA0_F    ERR_FLM_IDMA0_V(1U)
+
+#define ERR_FLM_HINT_S    28
+#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
+#define ERR_FLM_HINT_F    ERR_FLM_HINT_V(1U)
+
+#define ERR_PCIE_ERROR3_S    27
+#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
+#define ERR_PCIE_ERROR3_F    ERR_PCIE_ERROR3_V(1U)
+
+#define ERR_PCIE_ERROR2_S    26
+#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
+#define ERR_PCIE_ERROR2_F    ERR_PCIE_ERROR2_V(1U)
+
+#define ERR_PCIE_ERROR1_S    25
+#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
+#define ERR_PCIE_ERROR1_F    ERR_PCIE_ERROR1_V(1U)
+
+#define ERR_PCIE_ERROR0_S    24
+#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
+#define ERR_PCIE_ERROR0_F    ERR_PCIE_ERROR0_V(1U)
+
+#define ERR_CPL_EXCEED_IQE_SIZE_S    22
+#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
+#define ERR_CPL_EXCEED_IQE_SIZE_F    ERR_CPL_EXCEED_IQE_SIZE_V(1U)
+
+#define ERR_INVALID_CIDX_INC_S    21
+#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
+#define ERR_INVALID_CIDX_INC_F    ERR_INVALID_CIDX_INC_V(1U)
+
+#define ERR_CPL_OPCODE_0_S    19
+#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
+#define ERR_CPL_OPCODE_0_F    ERR_CPL_OPCODE_0_V(1U)
+
+#define ERR_DROPPED_DB_S    18
+#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
+#define ERR_DROPPED_DB_F    ERR_DROPPED_DB_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID1_S    17
+#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
+#define ERR_DATA_CPL_ON_HIGH_QID1_F    ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID0_S    16
+#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
+#define ERR_DATA_CPL_ON_HIGH_QID0_F    ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
+
+#define ERR_BAD_DB_PIDX3_S    15
+#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
+#define ERR_BAD_DB_PIDX3_F    ERR_BAD_DB_PIDX3_V(1U)
+
+#define ERR_BAD_DB_PIDX2_S    14
+#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
+#define ERR_BAD_DB_PIDX2_F    ERR_BAD_DB_PIDX2_V(1U)
+
+#define ERR_BAD_DB_PIDX1_S    13
+#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
+#define ERR_BAD_DB_PIDX1_F    ERR_BAD_DB_PIDX1_V(1U)
+
+#define ERR_BAD_DB_PIDX0_S    12
+#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
+#define ERR_BAD_DB_PIDX0_F    ERR_BAD_DB_PIDX0_V(1U)
+
+#define ERR_ING_CTXT_PRIO_S    10
+#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
+#define ERR_ING_CTXT_PRIO_F    ERR_ING_CTXT_PRIO_V(1U)
+
+#define ERR_EGR_CTXT_PRIO_S    9
+#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
+#define ERR_EGR_CTXT_PRIO_F    ERR_EGR_CTXT_PRIO_V(1U)
+
+#define DBFIFO_HP_INT_S    8
+#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
+#define DBFIFO_HP_INT_F    DBFIFO_HP_INT_V(1U)
+
+#define DBFIFO_LP_INT_S    7
+#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
+#define DBFIFO_LP_INT_F    DBFIFO_LP_INT_V(1U)
+
+#define INGRESS_SIZE_ERR_S    5
+#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
+#define INGRESS_SIZE_ERR_F    INGRESS_SIZE_ERR_V(1U)
+
+#define EGRESS_SIZE_ERR_S    4
+#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
+#define EGRESS_SIZE_ERR_F    EGRESS_SIZE_ERR_V(1U)
+
+#define SGE_INT_ENABLE3_A 0x1040
+#define SGE_FL_BUFFER_SIZE0_A 0x1044
+#define SGE_FL_BUFFER_SIZE1_A 0x1048
+#define SGE_FL_BUFFER_SIZE2_A 0x104c
+#define SGE_FL_BUFFER_SIZE3_A 0x1050
+#define SGE_FL_BUFFER_SIZE4_A 0x1054
+#define SGE_FL_BUFFER_SIZE5_A 0x1058
+#define SGE_FL_BUFFER_SIZE6_A 0x105c
+#define SGE_FL_BUFFER_SIZE7_A 0x1060
+#define SGE_FL_BUFFER_SIZE8_A 0x1064
+
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
+#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
+
+#define THRESHOLD_0_S    24
+#define THRESHOLD_0_M    0x3fU
+#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
+#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
+
+#define THRESHOLD_1_S    16
+#define THRESHOLD_1_M    0x3fU
+#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
+#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
+
+#define THRESHOLD_2_S    8
+#define THRESHOLD_2_M    0x3fU
+#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
+#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
+
+#define THRESHOLD_3_S    0
+#define THRESHOLD_3_M    0x3fU
+#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
+#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
+
+#define SGE_CONM_CTRL_A 0x1094
+
+#define EGRTHRESHOLD_S    8
+#define EGRTHRESHOLD_M    0x3fU
+#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
+#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
+
+#define EGRTHRESHOLDPACKING_S    14
+#define EGRTHRESHOLDPACKING_M    0x3fU
+#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
+#define EGRTHRESHOLDPACKING_G(x) \
+	(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+
+#define T6_EGRTHRESHOLDPACKING_S    16
+#define T6_EGRTHRESHOLDPACKING_M    0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+	(((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
+#define SGE_TIMESTAMP_LO_A 0x1098
+#define SGE_TIMESTAMP_HI_A 0x109c
+
+#define TSOP_S    28
+#define TSOP_M    0x3U
+#define TSOP_V(x) ((x) << TSOP_S)
+#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
+
+#define TSVAL_S    0
+#define TSVAL_M    0xfffffffU
+#define TSVAL_V(x) ((x) << TSVAL_S)
+#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
+
+#define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
+
+#define HP_INT_THRESH_S    28
+#define HP_INT_THRESH_M    0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
+
+#define LP_INT_THRESH_S    12
+#define LP_INT_THRESH_M    0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define NOCOALESCE_S    26
+#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
+#define NOCOALESCE_F    NOCOALESCE_V(1U)
+
+#define ENABLE_DROP_S    13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F    ENABLE_DROP_V(1U)
+
+#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
+
+#define TIMERVALUE0_S    16
+#define TIMERVALUE0_M    0xffffU
+#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
+#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
+
+#define TIMERVALUE1_S    0
+#define TIMERVALUE1_M    0xffffU
+#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
+#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
+
+#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
+
+#define TIMERVALUE2_S    16
+#define TIMERVALUE2_M    0xffffU
+#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
+#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
+
+#define TIMERVALUE3_S    0
+#define TIMERVALUE3_M    0xffffU
+#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
+#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
+
+#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
+
+#define TIMERVALUE4_S    16
+#define TIMERVALUE4_M    0xffffU
+#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
+#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
+
+#define TIMERVALUE5_S    0
+#define TIMERVALUE5_M    0xffffU
+#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
+#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
+
+#define SGE_DEBUG_INDEX_A 0x10cc
+#define SGE_DEBUG_DATA_HIGH_A 0x10d0
+#define SGE_DEBUG_DATA_LOW_A 0x10d4
+
+#define SGE_DEBUG_DATA_LOW_INDEX_2_A	0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3_A	0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10_A	0x12a8
+
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S    18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F    UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S    17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F    ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S    0
+#define ERROR_QID_M    0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
+#define HP_INT_THRESH_S    28
+#define HP_INT_THRESH_M    0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
+
+#define HP_COUNT_S    16
+#define HP_COUNT_M    0x7ffU
+#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
+
+#define LP_INT_THRESH_S    12
+#define LP_INT_THRESH_M    0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
+
+#define LP_COUNT_S    0
+#define LP_COUNT_M    0x7ffU
+#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
+
+#define LP_INT_THRESH_T5_S    18
+#define LP_INT_THRESH_T5_M    0xfffU
+#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
+
+#define LP_COUNT_T5_S    0
+#define LP_COUNT_T5_M    0x3ffffU
+#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define SGE_STAT_TOTAL_A	0x10e4
+#define SGE_STAT_MATCH_A	0x10e8
+#define SGE_STAT_CFG_A		0x10ec
+
+#define STATMODE_S    2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
+#define STATSOURCE_T5_S    9
+#define STATSOURCE_T5_M    0xfU
+#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
+
+#define SGE_DBFIFO_STATUS2_A 0x1118
+
+#define HP_INT_THRESH_T5_S    10
+#define HP_INT_THRESH_T5_M    0xfU
+#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
+
+#define HP_COUNT_T5_S    0
+#define HP_COUNT_T5_M    0x3ffU
+#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
+
+#define ENABLE_DROP_S    13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F    ENABLE_DROP_V(1U)
+
+#define DROPPED_DB_S    0
+#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
+#define DROPPED_DB_F    DROPPED_DB_V(1U)
+
+#define SGE_CTXT_CMD_A 0x11fc
+#define SGE_DBQ_CTXT_BADDR_A 0x1084
+
+/* registers for module PCIE */
+#define PCIE_PF_CFG_A	0x40
+
+#define AIVEC_S    4
+#define AIVEC_M    0x3ffU
+#define AIVEC_V(x) ((x) << AIVEC_S)
+
+#define PCIE_PF_CLI_A	0x44
+#define PCIE_INT_CAUSE_A	0x3004
+
+#define UNXSPLCPLERR_S    29
+#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
+#define UNXSPLCPLERR_F    UNXSPLCPLERR_V(1U)
+
+#define PCIEPINT_S    28
+#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
+#define PCIEPINT_F    PCIEPINT_V(1U)
+
+#define PCIESINT_S    27
+#define PCIESINT_V(x) ((x) << PCIESINT_S)
+#define PCIESINT_F    PCIESINT_V(1U)
+
+#define RPLPERR_S    26
+#define RPLPERR_V(x) ((x) << RPLPERR_S)
+#define RPLPERR_F    RPLPERR_V(1U)
+
+#define RXWRPERR_S    25
+#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
+#define RXWRPERR_F    RXWRPERR_V(1U)
+
+#define RXCPLPERR_S    24
+#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
+#define RXCPLPERR_F    RXCPLPERR_V(1U)
+
+#define PIOTAGPERR_S    23
+#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
+#define PIOTAGPERR_F    PIOTAGPERR_V(1U)
+
+#define MATAGPERR_S    22
+#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
+#define MATAGPERR_F    MATAGPERR_V(1U)
+
+#define INTXCLRPERR_S    21
+#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
+#define INTXCLRPERR_F    INTXCLRPERR_V(1U)
+
+#define FIDPERR_S    20
+#define FIDPERR_V(x) ((x) << FIDPERR_S)
+#define FIDPERR_F    FIDPERR_V(1U)
+
+#define CFGSNPPERR_S    19
+#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
+#define CFGSNPPERR_F    CFGSNPPERR_V(1U)
+
+#define HRSPPERR_S    18
+#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
+#define HRSPPERR_F    HRSPPERR_V(1U)
+
+#define HREQPERR_S    17
+#define HREQPERR_V(x) ((x) << HREQPERR_S)
+#define HREQPERR_F    HREQPERR_V(1U)
+
+#define HCNTPERR_S    16
+#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
+#define HCNTPERR_F    HCNTPERR_V(1U)
+
+#define DRSPPERR_S    15
+#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
+#define DRSPPERR_F    DRSPPERR_V(1U)
+
+#define DREQPERR_S    14
+#define DREQPERR_V(x) ((x) << DREQPERR_S)
+#define DREQPERR_F    DREQPERR_V(1U)
+
+#define DCNTPERR_S    13
+#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
+#define DCNTPERR_F    DCNTPERR_V(1U)
+
+#define CRSPPERR_S    12
+#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
+#define CRSPPERR_F    CRSPPERR_V(1U)
+
+#define CREQPERR_S    11
+#define CREQPERR_V(x) ((x) << CREQPERR_S)
+#define CREQPERR_F    CREQPERR_V(1U)
+
+#define CCNTPERR_S    10
+#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
+#define CCNTPERR_F    CCNTPERR_V(1U)
+
+#define TARTAGPERR_S    9
+#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
+#define TARTAGPERR_F    TARTAGPERR_V(1U)
+
+#define PIOREQPERR_S    8
+#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
+#define PIOREQPERR_F    PIOREQPERR_V(1U)
+
+#define PIOCPLPERR_S    7
+#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
+#define PIOCPLPERR_F    PIOCPLPERR_V(1U)
+
+#define MSIXDIPERR_S    6
+#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
+#define MSIXDIPERR_F    MSIXDIPERR_V(1U)
+
+#define MSIXDATAPERR_S    5
+#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
+#define MSIXDATAPERR_F    MSIXDATAPERR_V(1U)
+
+#define MSIXADDRHPERR_S    4
+#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
+#define MSIXADDRHPERR_F    MSIXADDRHPERR_V(1U)
+
+#define MSIXADDRLPERR_S    3
+#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
+#define MSIXADDRLPERR_F    MSIXADDRLPERR_V(1U)
+
+#define MSIDATAPERR_S    2
+#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
+#define MSIDATAPERR_F    MSIDATAPERR_V(1U)
+
+#define MSIADDRHPERR_S    1
+#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
+#define MSIADDRHPERR_F    MSIADDRHPERR_V(1U)
+
+#define MSIADDRLPERR_S    0
+#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
+#define MSIADDRLPERR_F    MSIADDRLPERR_V(1U)
+
+#define READRSPERR_S    29
+#define READRSPERR_V(x) ((x) << READRSPERR_S)
+#define READRSPERR_F    READRSPERR_V(1U)
+
+#define TRGT1GRPPERR_S    28
+#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
+#define TRGT1GRPPERR_F    TRGT1GRPPERR_V(1U)
+
+#define IPSOTPERR_S    27
+#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
+#define IPSOTPERR_F    IPSOTPERR_V(1U)
+
+#define IPRETRYPERR_S    26
+#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
+#define IPRETRYPERR_F    IPRETRYPERR_V(1U)
+
+#define IPRXDATAGRPPERR_S    25
+#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
+#define IPRXDATAGRPPERR_F    IPRXDATAGRPPERR_V(1U)
+
+#define IPRXHDRGRPPERR_S    24
+#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
+#define IPRXHDRGRPPERR_F    IPRXHDRGRPPERR_V(1U)
+
+#define MAGRPPERR_S    22
+#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
+#define MAGRPPERR_F    MAGRPPERR_V(1U)
+
+#define VFIDPERR_S    21
+#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
+#define VFIDPERR_F    VFIDPERR_V(1U)
+
+#define HREQWRPERR_S    16
+#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
+#define HREQWRPERR_F    HREQWRPERR_V(1U)
+
+#define DREQWRPERR_S    13
+#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
+#define DREQWRPERR_F    DREQWRPERR_V(1U)
+
+#define CREQRDPERR_S    11
+#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
+#define CREQRDPERR_F    CREQRDPERR_V(1U)
+
+#define MSTTAGQPERR_S    10
+#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
+#define MSTTAGQPERR_F    MSTTAGQPERR_V(1U)
+
+#define PIOREQGRPPERR_S    8
+#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
+#define PIOREQGRPPERR_F    PIOREQGRPPERR_V(1U)
+
+#define PIOCPLGRPPERR_S    7
+#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
+#define PIOCPLGRPPERR_F    PIOCPLGRPPERR_V(1U)
+
+#define MSIXSTIPERR_S    2
+#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
+#define MSIXSTIPERR_F    MSIXSTIPERR_V(1U)
+
+#define MSTTIMEOUTPERR_S    1
+#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
+#define MSTTIMEOUTPERR_F    MSTTIMEOUTPERR_V(1U)
+
+#define MSTGRPPERR_S    0
+#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
+#define MSTGRPPERR_F    MSTGRPPERR_V(1U)
+
+#define PCIE_NONFAT_ERR_A	0x3010
+#define PCIE_CFG_SPACE_REQ_A	0x3060
+#define PCIE_CFG_SPACE_DATA_A	0x3064
+#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
+
+#define PCIEOFST_S    10
+#define PCIEOFST_M    0x3fffffU
+#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
+
+#define BIR_S    8
+#define BIR_M    0x3U
+#define BIR_V(x) ((x) << BIR_S)
+#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
+
+#define WINDOW_S    0
+#define WINDOW_M    0xffU
+#define WINDOW_V(x) ((x) << WINDOW_S)
+#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
+
+#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
+
+#define ENABLE_S    30
+#define ENABLE_V(x) ((x) << ENABLE_S)
+#define ENABLE_F    ENABLE_V(1U)
+
+#define LOCALCFG_S    28
+#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
+#define LOCALCFG_F    LOCALCFG_V(1U)
+
+#define FUNCTION_S    12
+#define FUNCTION_V(x) ((x) << FUNCTION_S)
+
+#define REGISTER_S    0
+#define REGISTER_V(x) ((x) << REGISTER_S)
+
+#define T6_ENABLE_S    31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F    T6_ENABLE_V(1U)
+
+#define PFNUM_S    0
+#define PFNUM_V(x) ((x) << PFNUM_S)
+
+#define PCIE_FW_A 0x30b8
+#define PCIE_FW_PF_A 0x30bc
+
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
+
+#define RNPP_S    31
+#define RNPP_V(x) ((x) << RNPP_S)
+#define RNPP_F    RNPP_V(1U)
+
+#define RPCP_S    29
+#define RPCP_V(x) ((x) << RPCP_S)
+#define RPCP_F    RPCP_V(1U)
+
+#define RCIP_S    27
+#define RCIP_V(x) ((x) << RCIP_S)
+#define RCIP_F    RCIP_V(1U)
+
+#define RCCP_S    26
+#define RCCP_V(x) ((x) << RCCP_S)
+#define RCCP_F    RCCP_V(1U)
+
+#define RFTP_S    23
+#define RFTP_V(x) ((x) << RFTP_S)
+#define RFTP_F    RFTP_V(1U)
+
+#define PTRP_S    20
+#define PTRP_V(x) ((x) << PTRP_S)
+#define PTRP_F    PTRP_V(1U)
+
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
+
+#define TPCP_S    30
+#define TPCP_V(x) ((x) << TPCP_S)
+#define TPCP_F    TPCP_V(1U)
+
+#define TNPP_S    29
+#define TNPP_V(x) ((x) << TNPP_S)
+#define TNPP_F    TNPP_V(1U)
+
+#define TFTP_S    28
+#define TFTP_V(x) ((x) << TFTP_S)
+#define TFTP_F    TFTP_V(1U)
+
+#define TCAP_S    27
+#define TCAP_V(x) ((x) << TCAP_S)
+#define TCAP_F    TCAP_V(1U)
+
+#define TCIP_S    26
+#define TCIP_V(x) ((x) << TCIP_S)
+#define TCIP_F    TCIP_V(1U)
+
+#define RCAP_S    25
+#define RCAP_V(x) ((x) << RCAP_S)
+#define RCAP_F    RCAP_V(1U)
+
+#define PLUP_S    23
+#define PLUP_V(x) ((x) << PLUP_S)
+#define PLUP_F    PLUP_V(1U)
+
+#define PLDN_S    22
+#define PLDN_V(x) ((x) << PLDN_S)
+#define PLDN_F    PLDN_V(1U)
+
+#define OTDD_S    21
+#define OTDD_V(x) ((x) << OTDD_S)
+#define OTDD_F    OTDD_V(1U)
+
+#define GTRP_S    20
+#define GTRP_V(x) ((x) << GTRP_S)
+#define GTRP_F    GTRP_V(1U)
+
+#define RDPE_S    18
+#define RDPE_V(x) ((x) << RDPE_S)
+#define RDPE_F    RDPE_V(1U)
+
+#define TDCE_S    17
+#define TDCE_V(x) ((x) << TDCE_S)
+#define TDCE_F    TDCE_V(1U)
+
+#define TDUE_S    16
+#define TDUE_V(x) ((x) << TDUE_S)
+#define TDUE_F    TDUE_V(1U)
+
+/* registers for module MC */
+#define MC_INT_CAUSE_A		0x7518
+#define MC_P_INT_CAUSE_A	0x41318
+
+#define ECC_UE_INT_CAUSE_S    2
+#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
+#define ECC_UE_INT_CAUSE_F    ECC_UE_INT_CAUSE_V(1U)
+
+#define ECC_CE_INT_CAUSE_S    1
+#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
+#define ECC_CE_INT_CAUSE_F    ECC_CE_INT_CAUSE_V(1U)
+
+#define PERR_INT_CAUSE_S    0
+#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
+#define PERR_INT_CAUSE_F    PERR_INT_CAUSE_V(1U)
+
+#define MC_ECC_STATUS_A		0x751c
+#define MC_P_ECC_STATUS_A	0x4131c
+
+#define ECC_CECNT_S    16
+#define ECC_CECNT_M    0xffffU
+#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
+#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
+
+#define ECC_UECNT_S    0
+#define ECC_UECNT_M    0xffffU
+#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
+#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
+
+#define MC_BIST_CMD_A 0x7600
+
+#define START_BIST_S    31
+#define START_BIST_V(x) ((x) << START_BIST_S)
+#define START_BIST_F    START_BIST_V(1U)
+
+#define BIST_CMD_GAP_S    8
+#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
+
+#define BIST_OPCODE_S    0
+#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
+
+#define MC_BIST_CMD_ADDR_A 0x7604
+#define MC_BIST_CMD_LEN_A 0x7608
+#define MC_BIST_DATA_PATTERN_A 0x760c
+
+#define MC_BIST_STATUS_RDATA_A 0x7688
+
+/* registers for module MA */
+#define MA_EDRAM0_BAR_A 0x77c0
+
+#define EDRAM0_BASE_S    16
+#define EDRAM0_BASE_M    0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
+#define EDRAM0_SIZE_S    0
+#define EDRAM0_SIZE_M    0xfffU
+#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
+#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M)
+
+#define MA_EDRAM1_BAR_A 0x77c4
+
+#define EDRAM1_BASE_S    16
+#define EDRAM1_BASE_M    0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
+#define EDRAM1_SIZE_S    0
+#define EDRAM1_SIZE_M    0xfffU
+#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
+#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M)
+
+#define MA_EXT_MEMORY_BAR_A 0x77c8
+
+#define EXT_MEM_BASE_S    16
+#define EXT_MEM_BASE_M    0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
+#define EXT_MEM_SIZE_S    0
+#define EXT_MEM_SIZE_M    0xfffU
+#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
+#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M)
+
+#define MA_EXT_MEMORY1_BAR_A 0x7808
+
+#define EXT_MEM1_BASE_S    16
+#define EXT_MEM1_BASE_M    0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
+#define EXT_MEM1_SIZE_S    0
+#define EXT_MEM1_SIZE_M    0xfffU
+#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
+#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M)
+
+#define MA_EXT_MEMORY0_BAR_A 0x77c8
+
+#define EXT_MEM0_BASE_S    16
+#define EXT_MEM0_BASE_M    0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
+#define EXT_MEM0_SIZE_S    0
+#define EXT_MEM0_SIZE_M    0xfffU
+#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
+#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M)
+
+#define MA_TARGET_MEM_ENABLE_A 0x77d8
+
+#define EXT_MEM_ENABLE_S    2
+#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S)
+#define EXT_MEM_ENABLE_F    EXT_MEM_ENABLE_V(1U)
+
+#define EDRAM1_ENABLE_S    1
+#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S)
+#define EDRAM1_ENABLE_F    EDRAM1_ENABLE_V(1U)
+
+#define EDRAM0_ENABLE_S    0
+#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S)
+#define EDRAM0_ENABLE_F    EDRAM0_ENABLE_V(1U)
+
+#define EXT_MEM1_ENABLE_S    4
+#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S)
+#define EXT_MEM1_ENABLE_F    EXT_MEM1_ENABLE_V(1U)
+
+#define EXT_MEM0_ENABLE_S    2
+#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
+#define EXT_MEM0_ENABLE_F    EXT_MEM0_ENABLE_V(1U)
+
+#define MA_INT_CAUSE_A	0x77e0
+
+#define MEM_PERR_INT_CAUSE_S    1
+#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
+#define MEM_PERR_INT_CAUSE_F    MEM_PERR_INT_CAUSE_V(1U)
+
+#define MEM_WRAP_INT_CAUSE_S    0
+#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
+#define MEM_WRAP_INT_CAUSE_F    MEM_WRAP_INT_CAUSE_V(1U)
+
+#define MA_INT_WRAP_STATUS_A	0x77e4
+
+#define MEM_WRAP_ADDRESS_S    4
+#define MEM_WRAP_ADDRESS_M    0xfffffffU
+#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
+
+#define MEM_WRAP_CLIENT_NUM_S    0
+#define MEM_WRAP_CLIENT_NUM_M    0xfU
+#define MEM_WRAP_CLIENT_NUM_G(x) \
+	(((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
+
+#define MA_PARITY_ERROR_STATUS_A	0x77f4
+#define MA_PARITY_ERROR_STATUS1_A	0x77f4
+#define MA_PARITY_ERROR_STATUS2_A	0x7804
+
+/* registers for module EDC_0 */
+#define EDC_0_BASE_ADDR		0x7900
+
+#define EDC_BIST_CMD_A		0x7904
+#define EDC_BIST_CMD_ADDR_A	0x7908
+#define EDC_BIST_CMD_LEN_A	0x790c
+#define EDC_BIST_DATA_PATTERN_A 0x7910
+#define EDC_BIST_STATUS_RDATA_A	0x7928
+#define EDC_INT_CAUSE_A		0x7978
+
+#define ECC_UE_PAR_S    5
+#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
+#define ECC_UE_PAR_F    ECC_UE_PAR_V(1U)
+
+#define ECC_CE_PAR_S    4
+#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
+#define ECC_CE_PAR_F    ECC_CE_PAR_V(1U)
+
+#define PERR_PAR_CAUSE_S    3
+#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
+#define PERR_PAR_CAUSE_F    PERR_PAR_CAUSE_V(1U)
+
+#define EDC_ECC_STATUS_A	0x797c
+
+/* registers for module EDC_1 */
+#define EDC_1_BASE_ADDR	0x7980
+
+/* registers for module CIM */
+#define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
+#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
+
+#define  BOOTADDR_M	0xffffff00U
+
+#define UPCRST_S    0
+#define UPCRST_V(x) ((x) << UPCRST_S)
+#define UPCRST_F    UPCRST_V(1U)
+
+#define CIM_PF_MAILBOX_DATA_A 0x240
+#define CIM_PF_MAILBOX_CTRL_A 0x280
+
+#define MBMSGVALID_S    3
+#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
+#define MBMSGVALID_F    MBMSGVALID_V(1U)
+
+#define MBINTREQ_S    2
+#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
+#define MBINTREQ_F    MBINTREQ_V(1U)
+
+#define MBOWNER_S    0
+#define MBOWNER_M    0x3U
+#define MBOWNER_V(x) ((x) << MBOWNER_S)
+#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
+
+#define CIM_PF_HOST_INT_ENABLE_A 0x288
+
+#define MBMSGRDYINTEN_S    19
+#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
+#define MBMSGRDYINTEN_F    MBMSGRDYINTEN_V(1U)
+
+#define CIM_PF_HOST_INT_CAUSE_A 0x28c
+
+#define MBMSGRDYINT_S    19
+#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
+#define MBMSGRDYINT_F    MBMSGRDYINT_V(1U)
+
+#define CIM_HOST_INT_CAUSE_A 0x7b2c
+
+#define TIEQOUTPARERRINT_S    20
+#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
+#define TIEQOUTPARERRINT_F    TIEQOUTPARERRINT_V(1U)
+
+#define TIEQINPARERRINT_S    19
+#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
+#define TIEQINPARERRINT_F    TIEQINPARERRINT_V(1U)
+
+#define PREFDROPINT_S    1
+#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
+#define PREFDROPINT_F    PREFDROPINT_V(1U)
+
+#define UPACCNONZERO_S    0
+#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
+#define UPACCNONZERO_F    UPACCNONZERO_V(1U)
+
+#define MBHOSTPARERR_S    18
+#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
+#define MBHOSTPARERR_F    MBHOSTPARERR_V(1U)
+
+#define MBUPPARERR_S    17
+#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
+#define MBUPPARERR_F    MBUPPARERR_V(1U)
+
+#define IBQTP0PARERR_S    16
+#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
+#define IBQTP0PARERR_F    IBQTP0PARERR_V(1U)
+
+#define IBQTP1PARERR_S    15
+#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
+#define IBQTP1PARERR_F    IBQTP1PARERR_V(1U)
+
+#define IBQULPPARERR_S    14
+#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
+#define IBQULPPARERR_F    IBQULPPARERR_V(1U)
+
+#define IBQSGELOPARERR_S    13
+#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
+#define IBQSGELOPARERR_F    IBQSGELOPARERR_V(1U)
+
+#define IBQSGEHIPARERR_S    12
+#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
+#define IBQSGEHIPARERR_F    IBQSGEHIPARERR_V(1U)
+
+#define IBQNCSIPARERR_S    11
+#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
+#define IBQNCSIPARERR_F    IBQNCSIPARERR_V(1U)
+
+#define OBQULP0PARERR_S    10
+#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
+#define OBQULP0PARERR_F    OBQULP0PARERR_V(1U)
+
+#define OBQULP1PARERR_S    9
+#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
+#define OBQULP1PARERR_F    OBQULP1PARERR_V(1U)
+
+#define OBQULP2PARERR_S    8
+#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
+#define OBQULP2PARERR_F    OBQULP2PARERR_V(1U)
+
+#define OBQULP3PARERR_S    7
+#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
+#define OBQULP3PARERR_F    OBQULP3PARERR_V(1U)
+
+#define OBQSGEPARERR_S    6
+#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
+#define OBQSGEPARERR_F    OBQSGEPARERR_V(1U)
+
+#define OBQNCSIPARERR_S    5
+#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
+#define OBQNCSIPARERR_F    OBQNCSIPARERR_V(1U)
+
+#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
+
+#define EEPROMWRINT_S    30
+#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
+#define EEPROMWRINT_F    EEPROMWRINT_V(1U)
+
+#define TIMEOUTMAINT_S    29
+#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
+#define TIMEOUTMAINT_F    TIMEOUTMAINT_V(1U)
+
+#define TIMEOUTINT_S    28
+#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
+#define TIMEOUTINT_F    TIMEOUTINT_V(1U)
+
+#define RSPOVRLOOKUPINT_S    27
+#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
+#define RSPOVRLOOKUPINT_F    RSPOVRLOOKUPINT_V(1U)
+
+#define REQOVRLOOKUPINT_S    26
+#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
+#define REQOVRLOOKUPINT_F    REQOVRLOOKUPINT_V(1U)
+
+#define BLKWRPLINT_S    25
+#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
+#define BLKWRPLINT_F    BLKWRPLINT_V(1U)
+
+#define BLKRDPLINT_S    24
+#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
+#define BLKRDPLINT_F    BLKRDPLINT_V(1U)
+
+#define SGLWRPLINT_S    23
+#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
+#define SGLWRPLINT_F    SGLWRPLINT_V(1U)
+
+#define SGLRDPLINT_S    22
+#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
+#define SGLRDPLINT_F    SGLRDPLINT_V(1U)
+
+#define BLKWRCTLINT_S    21
+#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
+#define BLKWRCTLINT_F    BLKWRCTLINT_V(1U)
+
+#define BLKRDCTLINT_S    20
+#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
+#define BLKRDCTLINT_F    BLKRDCTLINT_V(1U)
+
+#define SGLWRCTLINT_S    19
+#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
+#define SGLWRCTLINT_F    SGLWRCTLINT_V(1U)
+
+#define SGLRDCTLINT_S    18
+#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
+#define SGLRDCTLINT_F    SGLRDCTLINT_V(1U)
+
+#define BLKWREEPROMINT_S    17
+#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
+#define BLKWREEPROMINT_F    BLKWREEPROMINT_V(1U)
+
+#define BLKRDEEPROMINT_S    16
+#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
+#define BLKRDEEPROMINT_F    BLKRDEEPROMINT_V(1U)
+
+#define SGLWREEPROMINT_S    15
+#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
+#define SGLWREEPROMINT_F    SGLWREEPROMINT_V(1U)
+
+#define SGLRDEEPROMINT_S    14
+#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
+#define SGLRDEEPROMINT_F    SGLRDEEPROMINT_V(1U)
+
+#define BLKWRFLASHINT_S    13
+#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
+#define BLKWRFLASHINT_F    BLKWRFLASHINT_V(1U)
+
+#define BLKRDFLASHINT_S    12
+#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
+#define BLKRDFLASHINT_F    BLKRDFLASHINT_V(1U)
+
+#define SGLWRFLASHINT_S    11
+#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
+#define SGLWRFLASHINT_F    SGLWRFLASHINT_V(1U)
+
+#define SGLRDFLASHINT_S    10
+#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
+#define SGLRDFLASHINT_F    SGLRDFLASHINT_V(1U)
+
+#define BLKWRBOOTINT_S    9
+#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
+#define BLKWRBOOTINT_F    BLKWRBOOTINT_V(1U)
+
+#define BLKRDBOOTINT_S    8
+#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
+#define BLKRDBOOTINT_F    BLKRDBOOTINT_V(1U)
+
+#define SGLWRBOOTINT_S    7
+#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
+#define SGLWRBOOTINT_F    SGLWRBOOTINT_V(1U)
+
+#define SGLRDBOOTINT_S    6
+#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
+#define SGLRDBOOTINT_F    SGLRDBOOTINT_V(1U)
+
+#define ILLWRBEINT_S    5
+#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
+#define ILLWRBEINT_F    ILLWRBEINT_V(1U)
+
+#define ILLRDBEINT_S    4
+#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
+#define ILLRDBEINT_F    ILLRDBEINT_V(1U)
+
+#define ILLRDINT_S    3
+#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
+#define ILLRDINT_F    ILLRDINT_V(1U)
+
+#define ILLWRINT_S    2
+#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
+#define ILLWRINT_F    ILLWRINT_V(1U)
+
+#define ILLTRANSINT_S    1
+#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
+#define ILLTRANSINT_F    ILLTRANSINT_V(1U)
+
+#define RSVDSPACEINT_S    0
+#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
+#define RSVDSPACEINT_F    RSVDSPACEINT_V(1U)
+
+/* registers for module TP */
+#define DBGLAWHLF_S    23
+#define DBGLAWHLF_V(x) ((x) << DBGLAWHLF_S)
+#define DBGLAWHLF_F    DBGLAWHLF_V(1U)
+
+#define DBGLAWPTR_S    16
+#define DBGLAWPTR_M    0x7fU
+#define DBGLAWPTR_G(x) (((x) >> DBGLAWPTR_S) & DBGLAWPTR_M)
+
+#define DBGLAENABLE_S    12
+#define DBGLAENABLE_V(x) ((x) << DBGLAENABLE_S)
+#define DBGLAENABLE_F    DBGLAENABLE_V(1U)
+
+#define DBGLARPTR_S    0
+#define DBGLARPTR_M    0x7fU
+#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
+
+#define TP_DBG_LA_DATAL_A	0x7ed8
+#define TP_DBG_LA_CONFIG_A	0x7ed4
+#define TP_OUT_CONFIG_A		0x7d04
+#define TP_GLOBAL_CONFIG_A	0x7d08
+
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S    31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F    PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S    30
+#define PMTXNUMCHN_M    0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S    0
+#define PMTXMAXPAGE_M    0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S    0
+#define PMRXMAXPAGE_M    0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
+#define DBGLAMODE_S	14
+#define DBGLAMODE_M	0x3U
+#define DBGLAMODE_G(x)	(((x) >> DBGLAMODE_S) & DBGLAMODE_M)
+
+#define FIVETUPLELOOKUP_S    17
+#define FIVETUPLELOOKUP_M    0x3U
+#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
+#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
+
+#define TP_PARA_REG2_A 0x7d68
+
+#define MAXRXDATA_S    16
+#define MAXRXDATA_M    0xffffU
+#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
+
+#define TP_TIMER_RESOLUTION_A 0x7d90
+
+#define TIMERRESOLUTION_S    16
+#define TIMERRESOLUTION_M    0xffU
+#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
+
+#define TIMESTAMPRESOLUTION_S    8
+#define TIMESTAMPRESOLUTION_M    0xffU
+#define TIMESTAMPRESOLUTION_G(x) \
+	(((x) >> TIMESTAMPRESOLUTION_S) & TIMESTAMPRESOLUTION_M)
+
+#define DELAYEDACKRESOLUTION_S    0
+#define DELAYEDACKRESOLUTION_M    0xffU
+#define DELAYEDACKRESOLUTION_G(x) \
+	(((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
+
+#define TP_SHIFT_CNT_A 0x7dc0
+#define TP_RXT_MIN_A 0x7d98
+#define TP_RXT_MAX_A 0x7d9c
+#define TP_PERS_MIN_A 0x7da0
+#define TP_PERS_MAX_A 0x7da4
+#define TP_KEEP_IDLE_A 0x7da8
+#define TP_KEEP_INTVL_A 0x7dac
+#define TP_INIT_SRTT_A 0x7db0
+#define TP_DACK_TIMER_A 0x7db4
+#define TP_FINWAIT2_TIMER_A 0x7db8
+
+#define INITSRTT_S    0
+#define INITSRTT_M    0xffffU
+#define INITSRTT_G(x) (((x) >> INITSRTT_S) & INITSRTT_M)
+
+#define PERSMAX_S    0
+#define PERSMAX_M    0x3fffffffU
+#define PERSMAX_V(x) ((x) << PERSMAX_S)
+#define PERSMAX_G(x) (((x) >> PERSMAX_S) & PERSMAX_M)
+
+#define SYNSHIFTMAX_S    24
+#define SYNSHIFTMAX_M    0xffU
+#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
+#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
+
+#define RXTSHIFTMAXR1_S    20
+#define RXTSHIFTMAXR1_M    0xfU
+#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
+#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
+
+#define RXTSHIFTMAXR2_S    16
+#define RXTSHIFTMAXR2_M    0xfU
+#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
+#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
+
+#define PERSHIFTBACKOFFMAX_S    12
+#define PERSHIFTBACKOFFMAX_M    0xfU
+#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
+#define PERSHIFTBACKOFFMAX_G(x) \
+	(((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
+
+#define PERSHIFTMAX_S    8
+#define PERSHIFTMAX_M    0xfU
+#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
+#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
+
+#define KEEPALIVEMAXR1_S    4
+#define KEEPALIVEMAXR1_M    0xfU
+#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
+#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
+
+#define KEEPALIVEMAXR2_S    0
+#define KEEPALIVEMAXR2_M    0xfU
+#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
+#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
+
+#define ROWINDEX_S    16
+#define ROWINDEX_V(x) ((x) << ROWINDEX_S)
+
+#define TP_CCTRL_TABLE_A	0x7ddc
+#define TP_MTU_TABLE_A		0x7de4
+
+#define MTUINDEX_S    24
+#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
+
+#define MTUWIDTH_S    16
+#define MTUWIDTH_M    0xfU
+#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
+#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
+
+#define MTUVALUE_S    0
+#define MTUVALUE_M    0x3fffU
+#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
+#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
+
+#define TP_RSS_LKP_TABLE_A	0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
+
+#define LKPTBLROWVLD_S    31
+#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
+#define LKPTBLROWVLD_F    LKPTBLROWVLD_V(1U)
+
+#define LKPTBLQUEUE1_S    10
+#define LKPTBLQUEUE1_M    0x3ffU
+#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
+
+#define LKPTBLQUEUE0_S    0
+#define LKPTBLQUEUE0_M    0x3ffU
+#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
+
+#define TP_PIO_ADDR_A	0x7e40
+#define TP_PIO_DATA_A	0x7e44
+#define TP_MIB_INDEX_A	0x7e50
+#define TP_MIB_DATA_A	0x7e54
+#define TP_INT_CAUSE_A	0x7e74
+
+#define FLMTXFLSTEMPTY_S    30
+#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
+#define FLMTXFLSTEMPTY_F    FLMTXFLSTEMPTY_V(1U)
+
+#define TP_TX_ORATE_A 0x7ebc
+
+#define OFDRATE3_S    24
+#define OFDRATE3_M    0xffU
+#define OFDRATE3_G(x) (((x) >> OFDRATE3_S) & OFDRATE3_M)
+
+#define OFDRATE2_S    16
+#define OFDRATE2_M    0xffU
+#define OFDRATE2_G(x) (((x) >> OFDRATE2_S) & OFDRATE2_M)
+
+#define OFDRATE1_S    8
+#define OFDRATE1_M    0xffU
+#define OFDRATE1_G(x) (((x) >> OFDRATE1_S) & OFDRATE1_M)
+
+#define OFDRATE0_S    0
+#define OFDRATE0_M    0xffU
+#define OFDRATE0_G(x) (((x) >> OFDRATE0_S) & OFDRATE0_M)
+
+#define TP_TX_TRATE_A 0x7ed0
+
+#define TNLRATE3_S    24
+#define TNLRATE3_M    0xffU
+#define TNLRATE3_G(x) (((x) >> TNLRATE3_S) & TNLRATE3_M)
+
+#define TNLRATE2_S    16
+#define TNLRATE2_M    0xffU
+#define TNLRATE2_G(x) (((x) >> TNLRATE2_S) & TNLRATE2_M)
+
+#define TNLRATE1_S    8
+#define TNLRATE1_M    0xffU
+#define TNLRATE1_G(x) (((x) >> TNLRATE1_S) & TNLRATE1_M)
+
+#define TNLRATE0_S    0
+#define TNLRATE0_M    0xffU
+#define TNLRATE0_G(x) (((x) >> TNLRATE0_S) & TNLRATE0_M)
+
+#define TP_VLAN_PRI_MAP_A 0x140
+
+#define FRAGMENTATION_S    9
+#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
+#define FRAGMENTATION_F    FRAGMENTATION_V(1U)
+
+#define MPSHITTYPE_S    8
+#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
+#define MPSHITTYPE_F    MPSHITTYPE_V(1U)
+
+#define MACMATCH_S    7
+#define MACMATCH_V(x) ((x) << MACMATCH_S)
+#define MACMATCH_F    MACMATCH_V(1U)
+
+#define ETHERTYPE_S    6
+#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
+#define ETHERTYPE_F    ETHERTYPE_V(1U)
+
+#define PROTOCOL_S    5
+#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
+#define PROTOCOL_F    PROTOCOL_V(1U)
+
+#define TOS_S    4
+#define TOS_V(x) ((x) << TOS_S)
+#define TOS_F    TOS_V(1U)
+
+#define VLAN_S    3
+#define VLAN_V(x) ((x) << VLAN_S)
+#define VLAN_F    VLAN_V(1U)
+
+#define VNIC_ID_S    2
+#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
+#define VNIC_ID_F    VNIC_ID_V(1U)
+
+#define PORT_S    1
+#define PORT_V(x) ((x) << PORT_S)
+#define PORT_F    PORT_V(1U)
+
+#define FCOE_S    0
+#define FCOE_V(x) ((x) << FCOE_S)
+#define FCOE_F    FCOE_V(1U)
+
+#define FILTERMODE_S    15
+#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
+#define FILTERMODE_F    FILTERMODE_V(1U)
+
+#define FCOEMASK_S    14
+#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
+#define FCOEMASK_F    FCOEMASK_V(1U)
+
+#define TP_INGRESS_CONFIG_A	0x141
+
+#define VNIC_S    11
+#define VNIC_V(x) ((x) << VNIC_S)
+#define VNIC_F    VNIC_V(1U)
+
+#define CSUM_HAS_PSEUDO_HDR_S    10
+#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
+#define CSUM_HAS_PSEUDO_HDR_F    CSUM_HAS_PSEUDO_HDR_V(1U)
+
+#define TP_MIB_MAC_IN_ERR_0_A	0x0
+#define TP_MIB_HDR_IN_ERR_0_A	0x4
+#define TP_MIB_TCP_IN_ERR_0_A	0x8
+#define TP_MIB_TCP_OUT_RST_A	0xc
+#define TP_MIB_TCP_IN_SEG_HI_A	0x10
+#define TP_MIB_TCP_IN_SEG_LO_A	0x11
+#define TP_MIB_TCP_OUT_SEG_HI_A	0x12
+#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
+#define TP_MIB_TCP_RXT_SEG_HI_A	0x14
+#define TP_MIB_TCP_RXT_SEG_LO_A	0x15
+#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
+#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
+#define TP_MIB_TCP_V6OUT_RST_A	0x2c
+#define TP_MIB_OFD_ARP_DROP_A	0x36
+#define TP_MIB_CPL_IN_REQ_0_A	0x38
+#define TP_MIB_CPL_OUT_RSP_0_A	0x3c
+#define TP_MIB_TNL_DROP_0_A	0x44
+#define TP_MIB_FCOE_DDP_0_A	0x48
+#define TP_MIB_FCOE_DROP_0_A	0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A	0x50
+#define TP_MIB_OFD_VLN_DROP_0_A	0x58
+#define TP_MIB_USM_PKTS_A	0x5c
+#define TP_MIB_RQE_DFR_PKT_A	0x64
+
+#define ULP_TX_INT_CAUSE_A	0x8dcc
+#define ULP_TX_TPT_LLIMIT_A	0x8dd4
+#define ULP_TX_TPT_ULIMIT_A	0x8dd8
+#define ULP_TX_PBL_LLIMIT_A	0x8ddc
+#define ULP_TX_PBL_ULIMIT_A	0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
+
+#define PBL_BOUND_ERR_CH3_S    31
+#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
+#define PBL_BOUND_ERR_CH3_F    PBL_BOUND_ERR_CH3_V(1U)
+
+#define PBL_BOUND_ERR_CH2_S    30
+#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
+#define PBL_BOUND_ERR_CH2_F    PBL_BOUND_ERR_CH2_V(1U)
+
+#define PBL_BOUND_ERR_CH1_S    29
+#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
+#define PBL_BOUND_ERR_CH1_F    PBL_BOUND_ERR_CH1_V(1U)
+
+#define PBL_BOUND_ERR_CH0_S    28
+#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
+#define PBL_BOUND_ERR_CH0_F    PBL_BOUND_ERR_CH0_V(1U)
+
+#define PM_RX_INT_CAUSE_A	0x8fdc
+#define PM_RX_STAT_CONFIG_A 0x8fc8
+#define PM_RX_STAT_COUNT_A 0x8fcc
+#define PM_RX_STAT_LSB_A 0x8fd0
+#define PM_RX_DBG_CTRL_A 0x8fd0
+#define PM_RX_DBG_DATA_A 0x8fd4
+#define PM_RX_DBG_STAT_MSB_A 0x10013
+
+#define PMRX_FRAMING_ERROR_F	0x003ffff0U
+
+#define ZERO_E_CMD_ERROR_S    22
+#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
+#define ZERO_E_CMD_ERROR_F    ZERO_E_CMD_ERROR_V(1U)
+
+#define OCSPI_PAR_ERROR_S    3
+#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
+#define OCSPI_PAR_ERROR_F    OCSPI_PAR_ERROR_V(1U)
+
+#define DB_OPTIONS_PAR_ERROR_S    2
+#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
+#define DB_OPTIONS_PAR_ERROR_F    DB_OPTIONS_PAR_ERROR_V(1U)
+
+#define IESPI_PAR_ERROR_S    1
+#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
+#define IESPI_PAR_ERROR_F    IESPI_PAR_ERROR_V(1U)
+
+#define PMRX_E_PCMD_PAR_ERROR_S    0
+#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
+#define PMRX_E_PCMD_PAR_ERROR_F    PMRX_E_PCMD_PAR_ERROR_V(1U)
+
+#define PM_TX_INT_CAUSE_A	0x8ffc
+#define PM_TX_STAT_CONFIG_A 0x8fe8
+#define PM_TX_STAT_COUNT_A 0x8fec
+#define PM_TX_STAT_LSB_A 0x8ff0
+#define PM_TX_DBG_CTRL_A 0x8ff0
+#define PM_TX_DBG_DATA_A 0x8ff4
+#define PM_TX_DBG_STAT_MSB_A 0x1001a
+
+#define PCMD_LEN_OVFL0_S    31
+#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
+#define PCMD_LEN_OVFL0_F    PCMD_LEN_OVFL0_V(1U)
+
+#define PCMD_LEN_OVFL1_S    30
+#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
+#define PCMD_LEN_OVFL1_F    PCMD_LEN_OVFL1_V(1U)
+
+#define PCMD_LEN_OVFL2_S    29
+#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
+#define PCMD_LEN_OVFL2_F    PCMD_LEN_OVFL2_V(1U)
+
+#define ZERO_C_CMD_ERROR_S    28
+#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
+#define ZERO_C_CMD_ERROR_F    ZERO_C_CMD_ERROR_V(1U)
+
+#define  PMTX_FRAMING_ERROR_F 0x0ffffff0U
+
+#define OESPI_PAR_ERROR_S    3
+#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
+#define OESPI_PAR_ERROR_F    OESPI_PAR_ERROR_V(1U)
+
+#define ICSPI_PAR_ERROR_S    1
+#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
+#define ICSPI_PAR_ERROR_F    ICSPI_PAR_ERROR_V(1U)
+
+#define PMTX_C_PCMD_PAR_ERROR_S    0
+#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
+#define PMTX_C_PCMD_PAR_ERROR_F    PMTX_C_PCMD_PAR_ERROR_V(1U)
+
+#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
+#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MAC_PORT_MAGIC_MACID_LO 0x824
+#define MAC_PORT_MAGIC_MACID_HI 0x828
+
+#define MAC_PORT_EPIO_DATA0_A 0x8c0
+#define MAC_PORT_EPIO_DATA1_A 0x8c4
+#define MAC_PORT_EPIO_DATA2_A 0x8c8
+#define MAC_PORT_EPIO_DATA3_A 0x8cc
+#define MAC_PORT_EPIO_OP_A 0x8d0
+
+#define MAC_PORT_CFG2_A 0x818
+
+#define MPS_CMN_CTL_A	0x9000
+
+#define NUMPORTS_S    0
+#define NUMPORTS_M    0x3U
+#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
+
+#define MPS_INT_CAUSE_A 0x9008
+#define MPS_TX_INT_CAUSE_A 0x9408
+
+#define FRMERR_S    15
+#define FRMERR_V(x) ((x) << FRMERR_S)
+#define FRMERR_F    FRMERR_V(1U)
+
+#define SECNTERR_S    14
+#define SECNTERR_V(x) ((x) << SECNTERR_S)
+#define SECNTERR_F    SECNTERR_V(1U)
+
+#define BUBBLE_S    13
+#define BUBBLE_V(x) ((x) << BUBBLE_S)
+#define BUBBLE_F    BUBBLE_V(1U)
+
+#define TXDESCFIFO_S    9
+#define TXDESCFIFO_M    0xfU
+#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
+
+#define TXDATAFIFO_S    5
+#define TXDATAFIFO_M    0xfU
+#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
+
+#define NCSIFIFO_S    4
+#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
+#define NCSIFIFO_F    NCSIFIFO_V(1U)
+
+#define TPFIFO_S    0
+#define TPFIFO_M    0xfU
+#define TPFIFO_V(x) ((x) << TPFIFO_S)
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM_A		0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A	0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A	0x962c
+
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+
+#define MPS_TRC_CFG_A 0x9800
+
+#define TRCFIFOEMPTY_S    4
+#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
+#define TRCFIFOEMPTY_F    TRCFIFOEMPTY_V(1U)
+
+#define TRCIGNOREDROPINPUT_S    3
+#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
+#define TRCIGNOREDROPINPUT_F    TRCIGNOREDROPINPUT_V(1U)
+
+#define TRCKEEPDUPLICATES_S    2
+#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
+#define TRCKEEPDUPLICATES_F    TRCKEEPDUPLICATES_V(1U)
+
+#define TRCEN_S    1
+#define TRCEN_V(x) ((x) << TRCEN_S)
+#define TRCEN_F    TRCEN_V(1U)
+
+#define TRCMULTIFILTER_S    0
+#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
+#define TRCMULTIFILTER_F    TRCMULTIFILTER_V(1U)
+
+#define MPS_TRC_RSS_CONTROL_A		0x9808
+#define MPS_TRC_FILTER1_RSS_CONTROL_A	0x9ff4
+#define MPS_TRC_FILTER2_RSS_CONTROL_A	0x9ffc
+#define MPS_TRC_FILTER3_RSS_CONTROL_A	0xa004
+#define MPS_T5_TRC_RSS_CONTROL_A	0xa00c
+
+#define RSSCONTROL_S    16
+#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
+
+#define QUEUENUMBER_S    0
+#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+
+#define TFINVERTMATCH_S    24
+#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S)
+#define TFINVERTMATCH_F    TFINVERTMATCH_V(1U)
+
+#define TFEN_S    22
+#define TFEN_V(x) ((x) << TFEN_S)
+#define TFEN_F    TFEN_V(1U)
+
+#define TFPORT_S    18
+#define TFPORT_M    0xfU
+#define TFPORT_V(x) ((x) << TFPORT_S)
+#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M)
+
+#define TFLENGTH_S    8
+#define TFLENGTH_M    0x1fU
+#define TFLENGTH_V(x) ((x) << TFLENGTH_S)
+#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M)
+
+#define TFOFFSET_S    0
+#define TFOFFSET_M    0x1fU
+#define TFOFFSET_V(x) ((x) << TFOFFSET_S)
+#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M)
+
+#define T5_TFINVERTMATCH_S    25
+#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S)
+#define T5_TFINVERTMATCH_F    T5_TFINVERTMATCH_V(1U)
+
+#define T5_TFEN_S    23
+#define T5_TFEN_V(x) ((x) << T5_TFEN_S)
+#define T5_TFEN_F    T5_TFEN_V(1U)
+
+#define T5_TFPORT_S    18
+#define T5_TFPORT_M    0x1fU
+#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S)
+#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810
+#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820
+
+#define TFMINPKTSIZE_S    16
+#define TFMINPKTSIZE_M    0x1ffU
+#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S)
+#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M)
+
+#define TFCAPTUREMAX_S    0
+#define TFCAPTUREMAX_M    0x3fffU
+#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S)
+#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M)
+
+#define MPS_TRC_FILTER0_MATCH_A 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80
+#define MPS_TRC_FILTER1_MATCH_A 0x9d00
+
+#define TP_RSS_CONFIG_A 0x7df0
+
+#define TNL4TUPENIPV6_S    31
+#define TNL4TUPENIPV6_V(x) ((x) << TNL4TUPENIPV6_S)
+#define TNL4TUPENIPV6_F    TNL4TUPENIPV6_V(1U)
+
+#define TNL2TUPENIPV6_S    30
+#define TNL2TUPENIPV6_V(x) ((x) << TNL2TUPENIPV6_S)
+#define TNL2TUPENIPV6_F    TNL2TUPENIPV6_V(1U)
+
+#define TNL4TUPENIPV4_S    29
+#define TNL4TUPENIPV4_V(x) ((x) << TNL4TUPENIPV4_S)
+#define TNL4TUPENIPV4_F    TNL4TUPENIPV4_V(1U)
+
+#define TNL2TUPENIPV4_S    28
+#define TNL2TUPENIPV4_V(x) ((x) << TNL2TUPENIPV4_S)
+#define TNL2TUPENIPV4_F    TNL2TUPENIPV4_V(1U)
+
+#define TNLTCPSEL_S    27
+#define TNLTCPSEL_V(x) ((x) << TNLTCPSEL_S)
+#define TNLTCPSEL_F    TNLTCPSEL_V(1U)
+
+#define TNLIP6SEL_S    26
+#define TNLIP6SEL_V(x) ((x) << TNLIP6SEL_S)
+#define TNLIP6SEL_F    TNLIP6SEL_V(1U)
+
+#define TNLVRTSEL_S    25
+#define TNLVRTSEL_V(x) ((x) << TNLVRTSEL_S)
+#define TNLVRTSEL_F    TNLVRTSEL_V(1U)
+
+#define TNLMAPEN_S    24
+#define TNLMAPEN_V(x) ((x) << TNLMAPEN_S)
+#define TNLMAPEN_F    TNLMAPEN_V(1U)
+
+#define OFDHASHSAVE_S    19
+#define OFDHASHSAVE_V(x) ((x) << OFDHASHSAVE_S)
+#define OFDHASHSAVE_F    OFDHASHSAVE_V(1U)
+
+#define OFDVRTSEL_S    18
+#define OFDVRTSEL_V(x) ((x) << OFDVRTSEL_S)
+#define OFDVRTSEL_F    OFDVRTSEL_V(1U)
+
+#define OFDMAPEN_S    17
+#define OFDMAPEN_V(x) ((x) << OFDMAPEN_S)
+#define OFDMAPEN_F    OFDMAPEN_V(1U)
+
+#define OFDLKPEN_S    16
+#define OFDLKPEN_V(x) ((x) << OFDLKPEN_S)
+#define OFDLKPEN_F    OFDLKPEN_V(1U)
+
+#define SYN4TUPENIPV6_S    15
+#define SYN4TUPENIPV6_V(x) ((x) << SYN4TUPENIPV6_S)
+#define SYN4TUPENIPV6_F    SYN4TUPENIPV6_V(1U)
+
+#define SYN2TUPENIPV6_S    14
+#define SYN2TUPENIPV6_V(x) ((x) << SYN2TUPENIPV6_S)
+#define SYN2TUPENIPV6_F    SYN2TUPENIPV6_V(1U)
+
+#define SYN4TUPENIPV4_S    13
+#define SYN4TUPENIPV4_V(x) ((x) << SYN4TUPENIPV4_S)
+#define SYN4TUPENIPV4_F    SYN4TUPENIPV4_V(1U)
+
+#define SYN2TUPENIPV4_S    12
+#define SYN2TUPENIPV4_V(x) ((x) << SYN2TUPENIPV4_S)
+#define SYN2TUPENIPV4_F    SYN2TUPENIPV4_V(1U)
+
+#define SYNIP6SEL_S    11
+#define SYNIP6SEL_V(x) ((x) << SYNIP6SEL_S)
+#define SYNIP6SEL_F    SYNIP6SEL_V(1U)
+
+#define SYNVRTSEL_S    10
+#define SYNVRTSEL_V(x) ((x) << SYNVRTSEL_S)
+#define SYNVRTSEL_F    SYNVRTSEL_V(1U)
+
+#define SYNMAPEN_S    9
+#define SYNMAPEN_V(x) ((x) << SYNMAPEN_S)
+#define SYNMAPEN_F    SYNMAPEN_V(1U)
+
+#define SYNLKPEN_S    8
+#define SYNLKPEN_V(x) ((x) << SYNLKPEN_S)
+#define SYNLKPEN_F    SYNLKPEN_V(1U)
+
+#define CHANNELENABLE_S    7
+#define CHANNELENABLE_V(x) ((x) << CHANNELENABLE_S)
+#define CHANNELENABLE_F    CHANNELENABLE_V(1U)
+
+#define PORTENABLE_S    6
+#define PORTENABLE_V(x) ((x) << PORTENABLE_S)
+#define PORTENABLE_F    PORTENABLE_V(1U)
+
+#define TNLALLLOOKUP_S    5
+#define TNLALLLOOKUP_V(x) ((x) << TNLALLLOOKUP_S)
+#define TNLALLLOOKUP_F    TNLALLLOOKUP_V(1U)
+
+#define VIRTENABLE_S    4
+#define VIRTENABLE_V(x) ((x) << VIRTENABLE_S)
+#define VIRTENABLE_F    VIRTENABLE_V(1U)
+
+#define CONGESTIONENABLE_S    3
+#define CONGESTIONENABLE_V(x) ((x) << CONGESTIONENABLE_S)
+#define CONGESTIONENABLE_F    CONGESTIONENABLE_V(1U)
+
+#define HASHTOEPLITZ_S    2
+#define HASHTOEPLITZ_V(x) ((x) << HASHTOEPLITZ_S)
+#define HASHTOEPLITZ_F    HASHTOEPLITZ_V(1U)
+
+#define UDPENABLE_S    1
+#define UDPENABLE_V(x) ((x) << UDPENABLE_S)
+#define UDPENABLE_F    UDPENABLE_V(1U)
+
+#define DISABLE_S    0
+#define DISABLE_V(x) ((x) << DISABLE_S)
+#define DISABLE_F    DISABLE_V(1U)
+
+#define TP_RSS_CONFIG_TNL_A 0x7df4
+
+#define MASKSIZE_S    28
+#define MASKSIZE_M    0xfU
+#define MASKSIZE_V(x) ((x) << MASKSIZE_S)
+#define MASKSIZE_G(x) (((x) >> MASKSIZE_S) & MASKSIZE_M)
+
+#define MASKFILTER_S    16
+#define MASKFILTER_M    0x7ffU
+#define MASKFILTER_V(x) ((x) << MASKFILTER_S)
+#define MASKFILTER_G(x) (((x) >> MASKFILTER_S) & MASKFILTER_M)
+
+#define USEWIRECH_S    0
+#define USEWIRECH_V(x) ((x) << USEWIRECH_S)
+#define USEWIRECH_F    USEWIRECH_V(1U)
+
+#define HASHALL_S    2
+#define HASHALL_V(x) ((x) << HASHALL_S)
+#define HASHALL_F    HASHALL_V(1U)
+
+#define HASHETH_S    1
+#define HASHETH_V(x) ((x) << HASHETH_S)
+#define HASHETH_F    HASHETH_V(1U)
+
+#define TP_RSS_CONFIG_OFD_A 0x7df8
+
+#define RRCPLMAPEN_S    20
+#define RRCPLMAPEN_V(x) ((x) << RRCPLMAPEN_S)
+#define RRCPLMAPEN_F    RRCPLMAPEN_V(1U)
+
+#define RRCPLQUEWIDTH_S    16
+#define RRCPLQUEWIDTH_M    0xfU
+#define RRCPLQUEWIDTH_V(x) ((x) << RRCPLQUEWIDTH_S)
+#define RRCPLQUEWIDTH_G(x) (((x) >> RRCPLQUEWIDTH_S) & RRCPLQUEWIDTH_M)
+
+#define TP_RSS_CONFIG_SYN_A 0x7dfc
+#define TP_RSS_CONFIG_VRT_A 0x7e00
+
+#define VFRDRG_S    25
+#define VFRDRG_V(x) ((x) << VFRDRG_S)
+#define VFRDRG_F    VFRDRG_V(1U)
+
+#define VFRDEN_S    24
+#define VFRDEN_V(x) ((x) << VFRDEN_S)
+#define VFRDEN_F    VFRDEN_V(1U)
+
+#define VFPERREN_S    23
+#define VFPERREN_V(x) ((x) << VFPERREN_S)
+#define VFPERREN_F    VFPERREN_V(1U)
+
+#define KEYPERREN_S    22
+#define KEYPERREN_V(x) ((x) << KEYPERREN_S)
+#define KEYPERREN_F    KEYPERREN_V(1U)
+
+#define DISABLEVLAN_S    21
+#define DISABLEVLAN_V(x) ((x) << DISABLEVLAN_S)
+#define DISABLEVLAN_F    DISABLEVLAN_V(1U)
+
+#define ENABLEUP0_S    20
+#define ENABLEUP0_V(x) ((x) << ENABLEUP0_S)
+#define ENABLEUP0_F    ENABLEUP0_V(1U)
+
+#define HASHDELAY_S    16
+#define HASHDELAY_M    0xfU
+#define HASHDELAY_V(x) ((x) << HASHDELAY_S)
+#define HASHDELAY_G(x) (((x) >> HASHDELAY_S) & HASHDELAY_M)
+
+#define VFWRADDR_S    8
+#define VFWRADDR_M    0x7fU
+#define VFWRADDR_V(x) ((x) << VFWRADDR_S)
+#define VFWRADDR_G(x) (((x) >> VFWRADDR_S) & VFWRADDR_M)
+
+#define KEYMODE_S    6
+#define KEYMODE_M    0x3U
+#define KEYMODE_V(x) ((x) << KEYMODE_S)
+#define KEYMODE_G(x) (((x) >> KEYMODE_S) & KEYMODE_M)
+
+#define VFWREN_S    5
+#define VFWREN_V(x) ((x) << VFWREN_S)
+#define VFWREN_F    VFWREN_V(1U)
+
+#define KEYWREN_S    4
+#define KEYWREN_V(x) ((x) << KEYWREN_S)
+#define KEYWREN_F    KEYWREN_V(1U)
+
+#define KEYWRADDR_S    0
+#define KEYWRADDR_M    0xfU
+#define KEYWRADDR_V(x) ((x) << KEYWRADDR_S)
+#define KEYWRADDR_G(x) (((x) >> KEYWRADDR_S) & KEYWRADDR_M)
+
+#define KEYWRADDRX_S    30
+#define KEYWRADDRX_M    0x3U
+#define KEYWRADDRX_V(x) ((x) << KEYWRADDRX_S)
+#define KEYWRADDRX_G(x) (((x) >> KEYWRADDRX_S) & KEYWRADDRX_M)
+
+#define KEYEXTEND_S    26
+#define KEYEXTEND_V(x) ((x) << KEYEXTEND_S)
+#define KEYEXTEND_F    KEYEXTEND_V(1U)
+
+#define LKPIDXSIZE_S    24
+#define LKPIDXSIZE_M    0x3U
+#define LKPIDXSIZE_V(x) ((x) << LKPIDXSIZE_S)
+#define LKPIDXSIZE_G(x) (((x) >> LKPIDXSIZE_S) & LKPIDXSIZE_M)
+
+#define TP_RSS_VFL_CONFIG_A 0x3a
+#define TP_RSS_VFH_CONFIG_A 0x3b
+
+#define ENABLEUDPHASH_S    31
+#define ENABLEUDPHASH_V(x) ((x) << ENABLEUDPHASH_S)
+#define ENABLEUDPHASH_F    ENABLEUDPHASH_V(1U)
+
+#define VFUPEN_S    30
+#define VFUPEN_V(x) ((x) << VFUPEN_S)
+#define VFUPEN_F    VFUPEN_V(1U)
+
+#define VFVLNEX_S    28
+#define VFVLNEX_V(x) ((x) << VFVLNEX_S)
+#define VFVLNEX_F    VFVLNEX_V(1U)
+
+#define VFPRTEN_S    27
+#define VFPRTEN_V(x) ((x) << VFPRTEN_S)
+#define VFPRTEN_F    VFPRTEN_V(1U)
+
+#define VFCHNEN_S    26
+#define VFCHNEN_V(x) ((x) << VFCHNEN_S)
+#define VFCHNEN_F    VFCHNEN_V(1U)
+
+#define DEFAULTQUEUE_S    16
+#define DEFAULTQUEUE_M    0x3ffU
+#define DEFAULTQUEUE_G(x) (((x) >> DEFAULTQUEUE_S) & DEFAULTQUEUE_M)
+
+#define VFIP6TWOTUPEN_S    6
+#define VFIP6TWOTUPEN_V(x) ((x) << VFIP6TWOTUPEN_S)
+#define VFIP6TWOTUPEN_F    VFIP6TWOTUPEN_V(1U)
+
+#define VFIP4FOURTUPEN_S    5
+#define VFIP4FOURTUPEN_V(x) ((x) << VFIP4FOURTUPEN_S)
+#define VFIP4FOURTUPEN_F    VFIP4FOURTUPEN_V(1U)
+
+#define VFIP4TWOTUPEN_S    4
+#define VFIP4TWOTUPEN_V(x) ((x) << VFIP4TWOTUPEN_S)
+#define VFIP4TWOTUPEN_F    VFIP4TWOTUPEN_V(1U)
+
+#define KEYINDEX_S    0
+#define KEYINDEX_M    0xfU
+#define KEYINDEX_G(x) (((x) >> KEYINDEX_S) & KEYINDEX_M)
+
+#define MAPENABLE_S    31
+#define MAPENABLE_V(x) ((x) << MAPENABLE_S)
+#define MAPENABLE_F    MAPENABLE_V(1U)
+
+#define CHNENABLE_S    30
+#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
+#define CHNENABLE_F    CHNENABLE_V(1U)
+
+#define PRTENABLE_S    29
+#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
+#define PRTENABLE_F    PRTENABLE_V(1U)
+
+#define UDPFOURTUPEN_S    28
+#define UDPFOURTUPEN_V(x) ((x) << UDPFOURTUPEN_S)
+#define UDPFOURTUPEN_F    UDPFOURTUPEN_V(1U)
+
+#define IP6FOURTUPEN_S    27
+#define IP6FOURTUPEN_V(x) ((x) << IP6FOURTUPEN_S)
+#define IP6FOURTUPEN_F    IP6FOURTUPEN_V(1U)
+
+#define IP6TWOTUPEN_S    26
+#define IP6TWOTUPEN_V(x) ((x) << IP6TWOTUPEN_S)
+#define IP6TWOTUPEN_F    IP6TWOTUPEN_V(1U)
+
+#define IP4FOURTUPEN_S    25
+#define IP4FOURTUPEN_V(x) ((x) << IP4FOURTUPEN_S)
+#define IP4FOURTUPEN_F    IP4FOURTUPEN_V(1U)
+
+#define IP4TWOTUPEN_S    24
+#define IP4TWOTUPEN_V(x) ((x) << IP4TWOTUPEN_S)
+#define IP4TWOTUPEN_F    IP4TWOTUPEN_V(1U)
+
+#define IVFWIDTH_S    20
+#define IVFWIDTH_M    0xfU
+#define IVFWIDTH_V(x) ((x) << IVFWIDTH_S)
+#define IVFWIDTH_G(x) (((x) >> IVFWIDTH_S) & IVFWIDTH_M)
+
+#define CH1DEFAULTQUEUE_S    10
+#define CH1DEFAULTQUEUE_M    0x3ffU
+#define CH1DEFAULTQUEUE_V(x) ((x) << CH1DEFAULTQUEUE_S)
+#define CH1DEFAULTQUEUE_G(x) (((x) >> CH1DEFAULTQUEUE_S) & CH1DEFAULTQUEUE_M)
+
+#define CH0DEFAULTQUEUE_S    0
+#define CH0DEFAULTQUEUE_M    0x3ffU
+#define CH0DEFAULTQUEUE_V(x) ((x) << CH0DEFAULTQUEUE_S)
+#define CH0DEFAULTQUEUE_G(x) (((x) >> CH0DEFAULTQUEUE_S) & CH0DEFAULTQUEUE_M)
+
+#define VFLKPIDX_S    8
+#define VFLKPIDX_M    0xffU
+#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
+
+#define T6_VFWRADDR_S    8
+#define T6_VFWRADDR_M    0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
+#define TP_RSS_CONFIG_CNG_A 0x7e04
+#define TP_RSS_SECRET_KEY0_A 0x40
+#define TP_RSS_PF0_CONFIG_A 0x30
+#define TP_RSS_PF_MAP_A 0x38
+#define TP_RSS_PF_MSK_A 0x39
+
+#define PF1LKPIDX_S    3
+
+#define PF0LKPIDX_M    0x7U
+
+#define PF1MSKSIZE_S    4
+#define PF1MSKSIZE_M    0xfU
+
+#define CHNCOUNT3_S    31
+#define CHNCOUNT3_V(x) ((x) << CHNCOUNT3_S)
+#define CHNCOUNT3_F    CHNCOUNT3_V(1U)
+
+#define CHNCOUNT2_S    30
+#define CHNCOUNT2_V(x) ((x) << CHNCOUNT2_S)
+#define CHNCOUNT2_F    CHNCOUNT2_V(1U)
+
+#define CHNCOUNT1_S    29
+#define CHNCOUNT1_V(x) ((x) << CHNCOUNT1_S)
+#define CHNCOUNT1_F    CHNCOUNT1_V(1U)
+
+#define CHNCOUNT0_S    28
+#define CHNCOUNT0_V(x) ((x) << CHNCOUNT0_S)
+#define CHNCOUNT0_F    CHNCOUNT0_V(1U)
+
+#define CHNUNDFLOW3_S    27
+#define CHNUNDFLOW3_V(x) ((x) << CHNUNDFLOW3_S)
+#define CHNUNDFLOW3_F    CHNUNDFLOW3_V(1U)
+
+#define CHNUNDFLOW2_S    26
+#define CHNUNDFLOW2_V(x) ((x) << CHNUNDFLOW2_S)
+#define CHNUNDFLOW2_F    CHNUNDFLOW2_V(1U)
+
+#define CHNUNDFLOW1_S    25
+#define CHNUNDFLOW1_V(x) ((x) << CHNUNDFLOW1_S)
+#define CHNUNDFLOW1_F    CHNUNDFLOW1_V(1U)
+
+#define CHNUNDFLOW0_S    24
+#define CHNUNDFLOW0_V(x) ((x) << CHNUNDFLOW0_S)
+#define CHNUNDFLOW0_F    CHNUNDFLOW0_V(1U)
+
+#define RSTCHN3_S    19
+#define RSTCHN3_V(x) ((x) << RSTCHN3_S)
+#define RSTCHN3_F    RSTCHN3_V(1U)
+
+#define RSTCHN2_S    18
+#define RSTCHN2_V(x) ((x) << RSTCHN2_S)
+#define RSTCHN2_F    RSTCHN2_V(1U)
+
+#define RSTCHN1_S    17
+#define RSTCHN1_V(x) ((x) << RSTCHN1_S)
+#define RSTCHN1_F    RSTCHN1_V(1U)
+
+#define RSTCHN0_S    16
+#define RSTCHN0_V(x) ((x) << RSTCHN0_S)
+#define RSTCHN0_F    RSTCHN0_V(1U)
+
+#define UPDVLD_S    15
+#define UPDVLD_V(x) ((x) << UPDVLD_S)
+#define UPDVLD_F    UPDVLD_V(1U)
+
+#define XOFF_S    14
+#define XOFF_V(x) ((x) << XOFF_S)
+#define XOFF_F    XOFF_V(1U)
+
+#define UPDCHN3_S    13
+#define UPDCHN3_V(x) ((x) << UPDCHN3_S)
+#define UPDCHN3_F    UPDCHN3_V(1U)
+
+#define UPDCHN2_S    12
+#define UPDCHN2_V(x) ((x) << UPDCHN2_S)
+#define UPDCHN2_F    UPDCHN2_V(1U)
+
+#define UPDCHN1_S    11
+#define UPDCHN1_V(x) ((x) << UPDCHN1_S)
+#define UPDCHN1_F    UPDCHN1_V(1U)
+
+#define UPDCHN0_S    10
+#define UPDCHN0_V(x) ((x) << UPDCHN0_S)
+#define UPDCHN0_F    UPDCHN0_V(1U)
+
+#define QUEUE_S    0
+#define QUEUE_M    0x3ffU
+#define QUEUE_V(x) ((x) << QUEUE_S)
+#define QUEUE_G(x) (((x) >> QUEUE_S) & QUEUE_M)
+
+#define MPS_TRC_INT_CAUSE_A	0x985c
+
+#define MISCPERR_S    8
+#define MISCPERR_V(x) ((x) << MISCPERR_S)
+#define MISCPERR_F    MISCPERR_V(1U)
+
+#define PKTFIFO_S    4
+#define PKTFIFO_M    0xfU
+#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
+
+#define FILTMEM_S    0
+#define FILTMEM_M    0xfU
+#define FILTMEM_V(x) ((x) << FILTMEM_S)
+
+#define MPS_CLS_INT_CAUSE_A 0xd028
+
+#define HASHSRAM_S    2
+#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
+#define HASHSRAM_F    HASHSRAM_V(1U)
+
+#define MATCHTCAM_S    1
+#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
+#define MATCHTCAM_F    MATCHTCAM_V(1U)
+
+#define MATCHSRAM_S    0
+#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
+#define MATCHSRAM_F    MATCHSRAM_V(1U)
+
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
+#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+
+#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define USED_S    16
+#define USED_M    0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S    0
+#define ALLOC_M    0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S    16
+#define T5_USED_M    0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S    0
+#define T5_ALLOC_M    0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
+#define DMACH_S    0
+#define DMACH_M    0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
+#define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S    31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F    CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S    25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S    17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S    16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
+
+#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S    26
+
+#define T6_SRAM_PRIO3_S    23
+#define T6_SRAM_PRIO3_M    0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S    20
+#define T6_SRAM_PRIO2_M    0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S    17
+#define T6_SRAM_PRIO1_M    0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S    14
+#define T6_SRAM_PRIO0_M    0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S    13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F    T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S    12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F    T6_REPLICATE_V(1U)
+
+#define T6_PF_S    9
+#define T6_PF_M    0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S    8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F    T6_VF_VALID_V(1U)
+
+#define T6_VF_S    0
+#define T6_VF_M    0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
+#define MPS_CLS_SRAM_H_A 0xe004
+
+#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define MULTILISTEN0_S    25
+
+#define REPLICATE_S    11
+#define REPLICATE_V(x) ((x) << REPLICATE_S)
+#define REPLICATE_F    REPLICATE_V(1U)
+
+#define PF_S    8
+#define PF_M    0x7U
+#define PF_G(x) (((x) >> PF_S) & PF_M)
+
+#define VF_VALID_S    7
+#define VF_VALID_V(x) ((x) << VF_VALID_S)
+#define VF_VALID_F    VF_VALID_V(1U)
+
+#define VF_S    0
+#define VF_M    0x7fU
+#define VF_G(x) (((x) >> VF_S) & VF_M)
+
+#define SRAM_PRIO3_S    22
+#define SRAM_PRIO3_M    0x7U
+#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
+
+#define SRAM_PRIO2_S    19
+#define SRAM_PRIO2_M    0x7U
+#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
+
+#define SRAM_PRIO1_S    16
+#define SRAM_PRIO1_M    0x7U
+#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
+
+#define SRAM_PRIO0_S    13
+#define SRAM_PRIO0_M    0x7U
+#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
+
+#define SRAM_VLD_S    12
+#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
+#define SRAM_VLD_F    SRAM_VLD_V(1U)
+
+#define PORTMAP_S    0
+#define PORTMAP_M    0xfU
+#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
+
+#define CPL_INTR_CAUSE_A 0x19054
+
+#define CIM_OP_MAP_PERR_S    5
+#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
+#define CIM_OP_MAP_PERR_F    CIM_OP_MAP_PERR_V(1U)
+
+#define CIM_OVFL_ERROR_S    4
+#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
+#define CIM_OVFL_ERROR_F    CIM_OVFL_ERROR_V(1U)
+
+#define TP_FRAMING_ERROR_S    3
+#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
+#define TP_FRAMING_ERROR_F    TP_FRAMING_ERROR_V(1U)
+
+#define SGE_FRAMING_ERROR_S    2
+#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
+#define SGE_FRAMING_ERROR_F    SGE_FRAMING_ERROR_V(1U)
+
+#define CIM_FRAMING_ERROR_S    1
+#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
+#define CIM_FRAMING_ERROR_F    CIM_FRAMING_ERROR_V(1U)
+
+#define ZERO_SWITCH_ERROR_S    0
+#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
+#define ZERO_SWITCH_ERROR_F    ZERO_SWITCH_ERROR_V(1U)
+
+#define SMB_INT_CAUSE_A 0x19090
+
+#define MSTTXFIFOPARINT_S    21
+#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
+#define MSTTXFIFOPARINT_F    MSTTXFIFOPARINT_V(1U)
+
+#define MSTRXFIFOPARINT_S    20
+#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
+#define MSTRXFIFOPARINT_F    MSTRXFIFOPARINT_V(1U)
+
+#define SLVFIFOPARINT_S    19
+#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
+#define SLVFIFOPARINT_F    SLVFIFOPARINT_V(1U)
+
+#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
+#define ULP_RX_ISCSI_TAGMASK_A 0x19164
+#define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
+#define ULP_RX_LA_CTL_A 0x1923c
+#define ULP_RX_LA_RDPTR_A 0x19240
+#define ULP_RX_LA_RDDATA_A 0x19244
+#define ULP_RX_LA_WRPTR_A 0x19248
+
+#define HPZ3_S    24
+#define HPZ3_V(x) ((x) << HPZ3_S)
+
+#define HPZ2_S    16
+#define HPZ2_V(x) ((x) << HPZ2_S)
+
+#define HPZ1_S    8
+#define HPZ1_V(x) ((x) << HPZ1_S)
+
+#define HPZ0_S    0
+#define HPZ0_V(x) ((x) << HPZ0_S)
+
+#define ULP_RX_TDDP_PSZ_A 0x19178
+
+/* registers for module SF */
+#define SF_DATA_A 0x193f8
+#define SF_OP_A 0x193fc
+
+#define SF_BUSY_S    31
+#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
+#define SF_BUSY_F    SF_BUSY_V(1U)
+
+#define SF_LOCK_S    4
+#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
+#define SF_LOCK_F    SF_LOCK_V(1U)
+
+#define SF_CONT_S    3
+#define SF_CONT_V(x) ((x) << SF_CONT_S)
+#define SF_CONT_F    SF_CONT_V(1U)
+
+#define BYTECNT_S    1
+#define BYTECNT_V(x) ((x) << BYTECNT_S)
+
+#define OP_S    0
+#define OP_V(x) ((x) << OP_S)
+#define OP_F    OP_V(1U)
+
+#define PL_PF_INT_CAUSE_A 0x3c0
+
+#define PFSW_S    3
+#define PFSW_V(x) ((x) << PFSW_S)
+#define PFSW_F    PFSW_V(1U)
+
+#define PFCIM_S    1
+#define PFCIM_V(x) ((x) << PFCIM_S)
+#define PFCIM_F    PFCIM_V(1U)
+
+#define PL_PF_INT_ENABLE_A 0x3c4
+#define PL_PF_CTL_A 0x3c8
+
+#define PL_WHOAMI_A 0x19400
+
+#define SOURCEPF_S    8
+#define SOURCEPF_M    0x7U
+#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+
+#define T6_SOURCEPF_S    9
+#define T6_SOURCEPF_M    0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
+#define PL_INT_CAUSE_A 0x1940c
+
+#define ULP_TX_S    27
+#define ULP_TX_V(x) ((x) << ULP_TX_S)
+#define ULP_TX_F    ULP_TX_V(1U)
+
+#define SGE_S    26
+#define SGE_V(x) ((x) << SGE_S)
+#define SGE_F    SGE_V(1U)
+
+#define CPL_SWITCH_S    24
+#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
+#define CPL_SWITCH_F    CPL_SWITCH_V(1U)
+
+#define ULP_RX_S    23
+#define ULP_RX_V(x) ((x) << ULP_RX_S)
+#define ULP_RX_F    ULP_RX_V(1U)
+
+#define PM_RX_S    22
+#define PM_RX_V(x) ((x) << PM_RX_S)
+#define PM_RX_F    PM_RX_V(1U)
+
+#define PM_TX_S    21
+#define PM_TX_V(x) ((x) << PM_TX_S)
+#define PM_TX_F    PM_TX_V(1U)
+
+#define MA_S    20
+#define MA_V(x) ((x) << MA_S)
+#define MA_F    MA_V(1U)
+
+#define TP_S    19
+#define TP_V(x) ((x) << TP_S)
+#define TP_F    TP_V(1U)
+
+#define LE_S    18
+#define LE_V(x) ((x) << LE_S)
+#define LE_F    LE_V(1U)
+
+#define EDC1_S    17
+#define EDC1_V(x) ((x) << EDC1_S)
+#define EDC1_F    EDC1_V(1U)
+
+#define EDC0_S    16
+#define EDC0_V(x) ((x) << EDC0_S)
+#define EDC0_F    EDC0_V(1U)
+
+#define MC_S    15
+#define MC_V(x) ((x) << MC_S)
+#define MC_F    MC_V(1U)
+
+#define PCIE_S    14
+#define PCIE_V(x) ((x) << PCIE_S)
+#define PCIE_F    PCIE_V(1U)
+
+#define XGMAC_KR1_S    12
+#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
+#define XGMAC_KR1_F    XGMAC_KR1_V(1U)
+
+#define XGMAC_KR0_S    11
+#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
+#define XGMAC_KR0_F    XGMAC_KR0_V(1U)
+
+#define XGMAC1_S    10
+#define XGMAC1_V(x) ((x) << XGMAC1_S)
+#define XGMAC1_F    XGMAC1_V(1U)
+
+#define XGMAC0_S    9
+#define XGMAC0_V(x) ((x) << XGMAC0_S)
+#define XGMAC0_F    XGMAC0_V(1U)
+
+#define SMB_S    8
+#define SMB_V(x) ((x) << SMB_S)
+#define SMB_F    SMB_V(1U)
+
+#define SF_S    7
+#define SF_V(x) ((x) << SF_S)
+#define SF_F    SF_V(1U)
+
+#define PL_S    6
+#define PL_V(x) ((x) << PL_S)
+#define PL_F    PL_V(1U)
+
+#define NCSI_S    5
+#define NCSI_V(x) ((x) << NCSI_S)
+#define NCSI_F    NCSI_V(1U)
+
+#define MPS_S    4
+#define MPS_V(x) ((x) << MPS_S)
+#define MPS_F    MPS_V(1U)
+
+#define CIM_S    0
+#define CIM_V(x) ((x) << CIM_S)
+#define CIM_F    CIM_V(1U)
+
+#define MC1_S    31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F    MC1_V(1U)
+
+#define PL_INT_ENABLE_A 0x19410
+#define PL_INT_MAP0_A 0x19414
+#define PL_RST_A 0x19428
+
+#define PIORST_S    1
+#define PIORST_V(x) ((x) << PIORST_S)
+#define PIORST_F    PIORST_V(1U)
+
+#define PIORSTMODE_S    0
+#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
+#define PIORSTMODE_F    PIORSTMODE_V(1U)
+
+#define PL_PL_INT_CAUSE_A 0x19430
+
+#define FATALPERR_S    4
+#define FATALPERR_V(x) ((x) << FATALPERR_S)
+#define FATALPERR_F    FATALPERR_V(1U)
+
+#define PERRVFID_S    0
+#define PERRVFID_V(x) ((x) << PERRVFID_S)
+#define PERRVFID_F    PERRVFID_V(1U)
+
+#define PL_REV_A 0x1943c
+
+#define REV_S    0
+#define REV_M    0xfU
+#define REV_V(x) ((x) << REV_S)
+#define REV_G(x) (((x) >> REV_S) & REV_M)
+
+#define T6_UNKNOWNCMD_S    3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F    T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S    2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F    T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S    1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F    T6_LIPMISS_V(1U)
+
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
+#define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
+
+#define HASHEN_S    20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F    HASHEN_V(1U)
+
+#define REQQPARERR_S    16
+#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
+#define REQQPARERR_F    REQQPARERR_V(1U)
+
+#define UNKNOWNCMD_S    15
+#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
+#define UNKNOWNCMD_F    UNKNOWNCMD_V(1U)
+
+#define PARITYERR_S    6
+#define PARITYERR_V(x) ((x) << PARITYERR_S)
+#define PARITYERR_F    PARITYERR_V(1U)
+
+#define LIPMISS_S    5
+#define LIPMISS_V(x) ((x) << LIPMISS_S)
+#define LIPMISS_F    LIPMISS_V(1U)
+
+#define LIP0_S    4
+#define LIP0_V(x) ((x) << LIP0_S)
+#define LIP0_F    LIP0_V(1U)
+
+#define BASEADDR_S    3
+#define BASEADDR_M    0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
+#define TCAMINTPERR_S    13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F    TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S    10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F    SSRAMINTPERR_V(1U)
+
+#define NCSI_INT_CAUSE_A 0x1a0d8
+
+#define CIM_DM_PRTY_ERR_S    8
+#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
+#define CIM_DM_PRTY_ERR_F    CIM_DM_PRTY_ERR_V(1U)
+
+#define MPS_DM_PRTY_ERR_S    7
+#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
+#define MPS_DM_PRTY_ERR_F    MPS_DM_PRTY_ERR_V(1U)
+
+#define TXFIFO_PRTY_ERR_S    1
+#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
+#define TXFIFO_PRTY_ERR_F    TXFIFO_PRTY_ERR_V(1U)
+
+#define RXFIFO_PRTY_ERR_S    0
+#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
+#define RXFIFO_PRTY_ERR_F    RXFIFO_PRTY_ERR_V(1U)
+
+#define XGMAC_PORT_CFG2_A 0x1018
+
+#define PATEN_S    18
+#define PATEN_V(x) ((x) << PATEN_S)
+#define PATEN_F    PATEN_V(1U)
+
+#define MAGICEN_S    17
+#define MAGICEN_V(x) ((x) << MAGICEN_S)
+#define MAGICEN_F    MAGICEN_V(1U)
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
+#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
+#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
+#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
+#define XGMAC_PORT_EPIO_OP_A 0x10d0
+
+#define EPIOWR_S    8
+#define EPIOWR_V(x) ((x) << EPIOWR_S)
+#define EPIOWR_F    EPIOWR_V(1U)
+
+#define ADDRESS_S    0
+#define ADDRESS_V(x) ((x) << ADDRESS_S)
+
+#define MAC_PORT_INT_CAUSE_A 0x8dc
+#define XGMAC_PORT_INT_CAUSE_A 0x10dc
+
+#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
+
+#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
+#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
+
+#define TX_MOD_QUEUE_REQ_MAP_S    0
+#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
+
+#define TX_MODQ_WEIGHT3_S    24
+#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
+
+#define TX_MODQ_WEIGHT2_S    16
+#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
+
+#define TX_MODQ_WEIGHT1_S    8
+#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
+
+#define TX_MODQ_WEIGHT0_S    0
+#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
+
+#define TP_TX_SCHED_HDR_A 0x23
+#define TP_TX_SCHED_FIFO_A 0x24
+#define TP_TX_SCHED_PCMD_A 0x25
+
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MC_0_BASE_ADDR 0x40000
+#define MC_1_BASE_ADDR 0x48000
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_P_BIST_CMD_A			0x41400
+#define MC_P_BIST_CMD_ADDR_A		0x41404
+#define MC_P_BIST_CMD_LEN_A		0x41408
+#define MC_P_BIST_DATA_PATTERN_A	0x4140c
+#define MC_P_BIST_STATUS_RDATA_A	0x41488
+
+#define EDC_T50_BASE_ADDR		0x50000
+
+#define EDC_H_BIST_CMD_A		0x50004
+#define EDC_H_BIST_CMD_ADDR_A		0x50008
+#define EDC_H_BIST_CMD_LEN_A		0x5000c
+#define EDC_H_BIST_DATA_PATTERN_A	0x50010
+#define EDC_H_BIST_STATUS_RDATA_A	0x50028
+
+#define EDC_H_ECC_ERR_ADDR_A		0x50084
+#define EDC_T51_BASE_ADDR		0x50800
+
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
+
+#define PL_VF_REV_A 0x4
+#define PL_VF_WHOAMI_A 0x0
+#define PL_VF_REVISION_A 0x8
+
+/* registers for module CIM */
+#define CIM_HOST_ACC_CTRL_A	0x7b50
+#define CIM_HOST_ACC_DATA_A	0x7b54
+#define UP_UP_DBG_LA_CFG_A	0x140
+#define UP_UP_DBG_LA_DATA_A	0x144
+
+#define HOSTBUSY_S	17
+#define HOSTBUSY_V(x)	((x) << HOSTBUSY_S)
+#define HOSTBUSY_F	HOSTBUSY_V(1U)
+
+#define HOSTWRITE_S	16
+#define HOSTWRITE_V(x)	((x) << HOSTWRITE_S)
+#define HOSTWRITE_F	HOSTWRITE_V(1U)
+
+#define CIM_IBQ_DBG_CFG_A 0x7b60
+
+#define IBQDBGADDR_S    16
+#define IBQDBGADDR_M    0xfffU
+#define IBQDBGADDR_V(x) ((x) << IBQDBGADDR_S)
+#define IBQDBGADDR_G(x) (((x) >> IBQDBGADDR_S) & IBQDBGADDR_M)
+
+#define IBQDBGBUSY_S    1
+#define IBQDBGBUSY_V(x) ((x) << IBQDBGBUSY_S)
+#define IBQDBGBUSY_F    IBQDBGBUSY_V(1U)
+
+#define IBQDBGEN_S    0
+#define IBQDBGEN_V(x) ((x) << IBQDBGEN_S)
+#define IBQDBGEN_F    IBQDBGEN_V(1U)
+
+#define CIM_OBQ_DBG_CFG_A 0x7b64
+
+#define OBQDBGADDR_S    16
+#define OBQDBGADDR_M    0xfffU
+#define OBQDBGADDR_V(x) ((x) << OBQDBGADDR_S)
+#define OBQDBGADDR_G(x) (((x) >> OBQDBGADDR_S) & OBQDBGADDR_M)
+
+#define OBQDBGBUSY_S    1
+#define OBQDBGBUSY_V(x) ((x) << OBQDBGBUSY_S)
+#define OBQDBGBUSY_F    OBQDBGBUSY_V(1U)
+
+#define OBQDBGEN_S    0
+#define OBQDBGEN_V(x) ((x) << OBQDBGEN_S)
+#define OBQDBGEN_F    OBQDBGEN_V(1U)
+
+#define CIM_IBQ_DBG_DATA_A 0x7b68
+#define CIM_OBQ_DBG_DATA_A 0x7b6c
+#define CIM_DEBUGCFG_A 0x7b70
+#define CIM_DEBUGSTS_A 0x7b74
+
+#define POLADBGRDPTR_S		23
+#define POLADBGRDPTR_M		0x1ffU
+#define POLADBGRDPTR_V(x)	((x) << POLADBGRDPTR_S)
+
+#define POLADBGWRPTR_S		16
+#define POLADBGWRPTR_M		0x1ffU
+#define POLADBGWRPTR_G(x)	(((x) >> POLADBGWRPTR_S) & POLADBGWRPTR_M)
+
+#define PILADBGRDPTR_S		14
+#define PILADBGRDPTR_M		0x1ffU
+#define PILADBGRDPTR_V(x)	((x) << PILADBGRDPTR_S)
+
+#define PILADBGWRPTR_S		0
+#define PILADBGWRPTR_M		0x1ffU
+#define PILADBGWRPTR_G(x)	(((x) >> PILADBGWRPTR_S) & PILADBGWRPTR_M)
+
+#define LADBGEN_S	12
+#define LADBGEN_V(x)	((x) << LADBGEN_S)
+#define LADBGEN_F	LADBGEN_V(1U)
+
+#define CIM_PO_LA_DEBUGDATA_A 0x7b78
+#define CIM_PI_LA_DEBUGDATA_A 0x7b7c
+#define CIM_PO_LA_MADEBUGDATA_A	0x7b80
+#define CIM_PI_LA_MADEBUGDATA_A	0x7b84
+
+#define UPDBGLARDEN_S		1
+#define UPDBGLARDEN_V(x)	((x) << UPDBGLARDEN_S)
+#define UPDBGLARDEN_F		UPDBGLARDEN_V(1U)
+
+#define UPDBGLAEN_S	0
+#define UPDBGLAEN_V(x)	((x) << UPDBGLAEN_S)
+#define UPDBGLAEN_F	UPDBGLAEN_V(1U)
+
+#define UPDBGLARDPTR_S		2
+#define UPDBGLARDPTR_M		0xfffU
+#define UPDBGLARDPTR_V(x)	((x) << UPDBGLARDPTR_S)
+
+#define UPDBGLAWRPTR_S    16
+#define UPDBGLAWRPTR_M    0xfffU
+#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
+
+#define UPDBGLACAPTPCONLY_S	30
+#define UPDBGLACAPTPCONLY_V(x)	((x) << UPDBGLACAPTPCONLY_S)
+#define UPDBGLACAPTPCONLY_F	UPDBGLACAPTPCONLY_V(1U)
+
+#define CIM_QUEUE_CONFIG_REF_A 0x7b48
+#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
+
+#define CIMQSIZE_S    24
+#define CIMQSIZE_M    0x3fU
+#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
+
+#define CIMQBASE_S    16
+#define CIMQBASE_M    0x3fU
+#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
+
+#define QUEFULLTHRSH_S    0
+#define QUEFULLTHRSH_M    0x1ffU
+#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
+
+#define UP_IBQ_0_RDADDR_A 0x10
+#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
+#define UP_OBQ_0_REALADDR_A 0x104
+#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
+
+#define IBQRDADDR_S    0
+#define IBQRDADDR_M    0x1fffU
+#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
+
+#define IBQWRADDR_S    0
+#define IBQWRADDR_M    0x1fffU
+#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
+
+#define QUERDADDR_S    0
+#define QUERDADDR_M    0x7fffU
+#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
+
+#define QUEREMFLITS_S    0
+#define QUEREMFLITS_M    0x7ffU
+#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
+
+#define QUEEOPCNT_S    16
+#define QUEEOPCNT_M    0xfffU
+#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
+
+#define QUESOPCNT_S    0
+#define QUESOPCNT_M    0xfffU
+#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
+
+#define OBQSELECT_S    4
+#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
+#define OBQSELECT_F    OBQSELECT_V(1U)
+
+#define IBQSELECT_S    3
+#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
+#define IBQSELECT_F    IBQSELECT_V(1U)
+
+#define QUENUMSELECT_S    0
+#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
+
+#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644
index 0000000..7bdee3b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -0,0 +1,148 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_VALUES_H__
+#define __T4_VALUES_H__
+
+/* This file contains definitions for various T4 register value hardware
+ * constants.  The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior.  For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/* SGE register field values.
+ */
+
+/* CONTROL1 register */
+#define RXPKTCPLMODE_SPLIT_X		1
+
+#define INGPCIEBOUNDARY_SHIFT_X		5
+#define INGPCIEBOUNDARY_32B_X		0
+
+#define INGPADBOUNDARY_SHIFT_X		5
+
+/* CONTROL2 register */
+#define INGPACKBOUNDARY_SHIFT_X		5
+#define INGPACKBOUNDARY_16B_X		0
+
+/* GTS register */
+#define SGE_TIMERREGS			6
+#define TIMERREG_COUNTER0_X		0
+
+#define FETCHBURSTMIN_64B_X		2
+
+#define FETCHBURSTMAX_256B_X		2
+#define FETCHBURSTMAX_512B_X		3
+
+#define HOSTFCMODE_STATUS_PAGE_X	2
+
+#define CIDXFLUSHTHRESH_32_X		5
+
+#define UPDATEDELIVERY_INTERRUPT_X	1
+
+#define RSPD_TYPE_FLBUF_X		0
+#define RSPD_TYPE_CPL_X			1
+#define RSPD_TYPE_INTR_X		2
+
+/* Congestion Manager Definitions.
+ */
+#define CONMCTXT_CNGTPMODE_S		19
+#define CONMCTXT_CNGTPMODE_V(x)		((x) << CONMCTXT_CNGTPMODE_S)
+#define CONMCTXT_CNGCHMAP_S		0
+#define CONMCTXT_CNGCHMAP_V(x)		((x) << CONMCTXT_CNGCHMAP_S)
+#define CONMCTXT_CNGTPMODE_CHANNEL_X	2
+#define CONMCTXT_CNGTPMODE_QUEUE_X	1
+
+/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64.  For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively.  We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE		128
+#define SGE_UDB_KDOORBELL	8
+#define SGE_UDB_GTS		20
+#define SGE_UDB_WCDOORBELL	64
+
+/* CIM register field values.
+ */
+#define X_MBOWNER_FW			1
+#define X_MBOWNER_PL			2
+
+/* PCI-E definitions */
+#define WINDOW_SHIFT_X		10
+#define PCIEOFST_SHIFT_X	10
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present.  These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define FT_FCOE_W                       1
+#define FT_PORT_W                       3
+#define FT_VNIC_ID_W                    17
+#define FT_VLAN_W                       17
+#define FT_TOS_W                        8
+#define FT_PROTOCOL_W                   8
+#define FT_ETHERTYPE_W                  16
+#define FT_MACMATCH_W                   9
+#define FT_MPSHITTYPE_W                 3
+#define FT_FRAGMENTATION_W              1
+
+/* Some of the Compressed Filter Tuple fields have internal structure.  These
+ * bit shifts/masks describe those structures.  All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define FT_VLAN_VLD_S                   16
+#define FT_VLAN_VLD_V(x)                ((x) << FT_VLAN_VLD_S)
+#define FT_VLAN_VLD_F                   FT_VLAN_VLD_V(1U)
+
+#define FT_VNID_ID_VF_S                 0
+#define FT_VNID_ID_VF_V(x)              ((x) << FT_VNID_ID_VF_S)
+
+#define FT_VNID_ID_PF_S                 7
+#define FT_VNID_ID_PF_V(x)              ((x) << FT_VNID_ID_PF_S)
+
+#define FT_VNID_ID_VLD_S                16
+#define FT_VNID_ID_VLD_V(x)             ((x) << FT_VNID_ID_VLD_S)
+
+#endif /* __T4_VALUES_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
new file mode 100644
index 0000000..a32de30
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -0,0 +1,3234 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+enum fw_retval {
+	FW_SUCCESS		= 0,	/* completed successfully */
+	FW_EPERM		= 1,	/* operation not permitted */
+	FW_ENOENT		= 2,	/* no such file or directory */
+	FW_EIO			= 5,	/* input/output error; hw bad */
+	FW_ENOEXEC		= 8,	/* exec format error; inv microcode */
+	FW_EAGAIN		= 11,	/* try again */
+	FW_ENOMEM		= 12,	/* out of memory */
+	FW_EFAULT		= 14,	/* bad address; fw bad */
+	FW_EBUSY		= 16,	/* resource busy */
+	FW_EEXIST		= 17,	/* file exists */
+	FW_ENODEV		= 19,	/* no such device */
+	FW_EINVAL		= 22,	/* invalid argument */
+	FW_ENOSPC		= 28,	/* no space left on device */
+	FW_ENOSYS		= 38,	/* functionality not implemented */
+	FW_ENODATA		= 61,	/* no data available */
+	FW_EPROTO		= 71,	/* protocol error */
+	FW_EADDRINUSE		= 98,	/* address already in use */
+	FW_EADDRNOTAVAIL	= 99,	/* cannot assigned requested address */
+	FW_ENETDOWN		= 100,	/* network is down */
+	FW_ENETUNREACH		= 101,	/* network is unreachable */
+	FW_ENOBUFS		= 105,	/* no buffer space available */
+	FW_ETIMEDOUT		= 110,	/* timeout */
+	FW_EINPROGRESS		= 115,	/* fw internal */
+	FW_SCSI_ABORT_REQUESTED	= 128,	/* */
+	FW_SCSI_ABORT_TIMEDOUT	= 129,	/* */
+	FW_SCSI_ABORTED		= 130,	/* */
+	FW_SCSI_CLOSE_REQUESTED	= 131,	/* */
+	FW_ERR_LINK_DOWN	= 132,	/* */
+	FW_RDEV_NOT_READY	= 133,	/* */
+	FW_ERR_RDEV_LOST	= 134,	/* */
+	FW_ERR_RDEV_LOGO	= 135,	/* */
+	FW_FCOE_NO_XCHG		= 136,	/* */
+	FW_SCSI_RSP_ERR		= 137,	/* */
+	FW_ERR_RDEV_IMPL_LOGO	= 138,	/* */
+	FW_SCSI_UNDER_FLOW_ERR  = 139,	/* */
+	FW_SCSI_OVER_FLOW_ERR   = 140,	/* */
+	FW_SCSI_DDP_ERR		= 141,	/* DDP error*/
+	FW_SCSI_TASK_ERR	= 142,	/* No SCSI tasks available */
+};
+
+#define FW_T4VF_SGE_BASE_ADDR      0x0000
+#define FW_T4VF_MPS_BASE_ADDR      0x0100
+#define FW_T4VF_PL_BASE_ADDR       0x0200
+#define FW_T4VF_MBDATA_BASE_ADDR   0x0240
+#define FW_T4VF_CIM_BASE_ADDR      0x0300
+
+enum fw_wr_opcodes {
+	FW_FILTER_WR                   = 0x02,
+	FW_ULPTX_WR                    = 0x04,
+	FW_TP_WR                       = 0x05,
+	FW_ETH_TX_PKT_WR               = 0x08,
+	FW_OFLD_CONNECTION_WR          = 0x2f,
+	FW_FLOWC_WR                    = 0x0a,
+	FW_OFLD_TX_DATA_WR             = 0x0b,
+	FW_CMD_WR                      = 0x10,
+	FW_ETH_TX_PKT_VM_WR            = 0x11,
+	FW_RI_RES_WR                   = 0x0c,
+	FW_RI_INIT_WR                  = 0x0d,
+	FW_RI_RDMA_WRITE_WR            = 0x14,
+	FW_RI_SEND_WR                  = 0x15,
+	FW_RI_RDMA_READ_WR             = 0x16,
+	FW_RI_RECV_WR                  = 0x17,
+	FW_RI_BIND_MW_WR               = 0x18,
+	FW_RI_FR_NSMR_WR               = 0x19,
+	FW_RI_INV_LSTAG_WR             = 0x1a,
+	FW_LASTC2E_WR                  = 0x70
+};
+
+struct fw_wr_hdr {
+	__be32 hi;
+	__be32 lo;
+};
+
+/* work request opcode (hi) */
+#define FW_WR_OP_S	24
+#define FW_WR_OP_M      0xff
+#define FW_WR_OP_V(x)   ((x) << FW_WR_OP_S)
+#define FW_WR_OP_G(x)   (((x) >> FW_WR_OP_S) & FW_WR_OP_M)
+
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */
+#define FW_WR_ATOMIC_S		23
+#define FW_WR_ATOMIC_V(x)	((x) << FW_WR_ATOMIC_S)
+
+/* flush flag (hi) - firmware flushes flushable work request buffered
+ * in the flow context.
+ */
+#define FW_WR_FLUSH_S     22
+#define FW_WR_FLUSH_V(x)  ((x) << FW_WR_FLUSH_S)
+
+/* completion flag (hi) - firmware generates a cpl_fw6_ack */
+#define FW_WR_COMPL_S     21
+#define FW_WR_COMPL_V(x)  ((x) << FW_WR_COMPL_S)
+#define FW_WR_COMPL_F     FW_WR_COMPL_V(1U)
+
+/* work request immediate data length (hi) */
+#define FW_WR_IMMDLEN_S 0
+#define FW_WR_IMMDLEN_M 0xff
+#define FW_WR_IMMDLEN_V(x)      ((x) << FW_WR_IMMDLEN_S)
+
+/* egress queue status update to associated ingress queue entry (lo) */
+#define FW_WR_EQUIQ_S           31
+#define FW_WR_EQUIQ_V(x)        ((x) << FW_WR_EQUIQ_S)
+#define FW_WR_EQUIQ_F           FW_WR_EQUIQ_V(1U)
+
+/* egress queue status update to egress queue status entry (lo) */
+#define FW_WR_EQUEQ_S           30
+#define FW_WR_EQUEQ_V(x)        ((x) << FW_WR_EQUEQ_S)
+#define FW_WR_EQUEQ_F           FW_WR_EQUEQ_V(1U)
+
+/* flow context identifier (lo) */
+#define FW_WR_FLOWID_S          8
+#define FW_WR_FLOWID_V(x)       ((x) << FW_WR_FLOWID_S)
+
+/* length in units of 16-bytes (lo) */
+#define FW_WR_LEN16_S           0
+#define FW_WR_LEN16_V(x)        ((x) << FW_WR_LEN16_S)
+
+#define HW_TPL_FR_MT_PR_IV_P_FC         0X32B
+#define HW_TPL_FR_MT_PR_OV_P_FC         0X327
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+	FW_FILTER_WR_SUCCESS,
+	FW_FILTER_WR_FLT_ADDED,
+	FW_FILTER_WR_FLT_DELETED,
+	FW_FILTER_WR_SMT_TBL_FULL,
+	FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+	__be32 op_pkd;
+	__be32 len16_pkd;
+	__be64 r3;
+	__be32 tid_to_iq;
+	__be32 del_filter_to_l2tix;
+	__be16 ethtype;
+	__be16 ethtypem;
+	__u8   frag_to_ovlan_vldm;
+	__u8   smac_sel;
+	__be16 rx_chan_rx_rpl_iq;
+	__be32 maci_to_matchtypem;
+	__u8   ptcl;
+	__u8   ptclm;
+	__u8   ttyp;
+	__u8   ttypm;
+	__be16 ivlan;
+	__be16 ivlanm;
+	__be16 ovlan;
+	__be16 ovlanm;
+	__u8   lip[16];
+	__u8   lipm[16];
+	__u8   fip[16];
+	__u8   fipm[16];
+	__be16 lp;
+	__be16 lpm;
+	__be16 fp;
+	__be16 fpm;
+	__be16 r7;
+	__u8   sma[6];
+};
+
+#define FW_FILTER_WR_TID_S      12
+#define FW_FILTER_WR_TID_M      0xfffff
+#define FW_FILTER_WR_TID_V(x)   ((x) << FW_FILTER_WR_TID_S)
+#define FW_FILTER_WR_TID_G(x)   \
+	(((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M)
+
+#define FW_FILTER_WR_RQTYPE_S           11
+#define FW_FILTER_WR_RQTYPE_M           0x1
+#define FW_FILTER_WR_RQTYPE_V(x)        ((x) << FW_FILTER_WR_RQTYPE_S)
+#define FW_FILTER_WR_RQTYPE_G(x)        \
+	(((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M)
+#define FW_FILTER_WR_RQTYPE_F   FW_FILTER_WR_RQTYPE_V(1U)
+
+#define FW_FILTER_WR_NOREPLY_S          10
+#define FW_FILTER_WR_NOREPLY_M          0x1
+#define FW_FILTER_WR_NOREPLY_V(x)       ((x) << FW_FILTER_WR_NOREPLY_S)
+#define FW_FILTER_WR_NOREPLY_G(x)       \
+	(((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M)
+#define FW_FILTER_WR_NOREPLY_F  FW_FILTER_WR_NOREPLY_V(1U)
+
+#define FW_FILTER_WR_IQ_S       0
+#define FW_FILTER_WR_IQ_M       0x3ff
+#define FW_FILTER_WR_IQ_V(x)    ((x) << FW_FILTER_WR_IQ_S)
+#define FW_FILTER_WR_IQ_G(x)    \
+	(((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M)
+
+#define FW_FILTER_WR_DEL_FILTER_S       31
+#define FW_FILTER_WR_DEL_FILTER_M       0x1
+#define FW_FILTER_WR_DEL_FILTER_V(x)    ((x) << FW_FILTER_WR_DEL_FILTER_S)
+#define FW_FILTER_WR_DEL_FILTER_G(x)    \
+	(((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M)
+#define FW_FILTER_WR_DEL_FILTER_F       FW_FILTER_WR_DEL_FILTER_V(1U)
+
+#define FW_FILTER_WR_RPTTID_S           25
+#define FW_FILTER_WR_RPTTID_M           0x1
+#define FW_FILTER_WR_RPTTID_V(x)        ((x) << FW_FILTER_WR_RPTTID_S)
+#define FW_FILTER_WR_RPTTID_G(x)        \
+	(((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M)
+#define FW_FILTER_WR_RPTTID_F   FW_FILTER_WR_RPTTID_V(1U)
+
+#define FW_FILTER_WR_DROP_S     24
+#define FW_FILTER_WR_DROP_M     0x1
+#define FW_FILTER_WR_DROP_V(x)  ((x) << FW_FILTER_WR_DROP_S)
+#define FW_FILTER_WR_DROP_G(x)  \
+	(((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M)
+#define FW_FILTER_WR_DROP_F     FW_FILTER_WR_DROP_V(1U)
+
+#define FW_FILTER_WR_DIRSTEER_S         23
+#define FW_FILTER_WR_DIRSTEER_M         0x1
+#define FW_FILTER_WR_DIRSTEER_V(x)      ((x) << FW_FILTER_WR_DIRSTEER_S)
+#define FW_FILTER_WR_DIRSTEER_G(x)      \
+	(((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M)
+#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U)
+
+#define FW_FILTER_WR_MASKHASH_S         22
+#define FW_FILTER_WR_MASKHASH_M         0x1
+#define FW_FILTER_WR_MASKHASH_V(x)      ((x) << FW_FILTER_WR_MASKHASH_S)
+#define FW_FILTER_WR_MASKHASH_G(x)      \
+	(((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M)
+#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U)
+
+#define FW_FILTER_WR_DIRSTEERHASH_S     21
+#define FW_FILTER_WR_DIRSTEERHASH_M     0x1
+#define FW_FILTER_WR_DIRSTEERHASH_V(x)  ((x) << FW_FILTER_WR_DIRSTEERHASH_S)
+#define FW_FILTER_WR_DIRSTEERHASH_G(x)  \
+	(((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M)
+#define FW_FILTER_WR_DIRSTEERHASH_F     FW_FILTER_WR_DIRSTEERHASH_V(1U)
+
+#define FW_FILTER_WR_LPBK_S     20
+#define FW_FILTER_WR_LPBK_M     0x1
+#define FW_FILTER_WR_LPBK_V(x)  ((x) << FW_FILTER_WR_LPBK_S)
+#define FW_FILTER_WR_LPBK_G(x)  \
+	(((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M)
+#define FW_FILTER_WR_LPBK_F     FW_FILTER_WR_LPBK_V(1U)
+
+#define FW_FILTER_WR_DMAC_S     19
+#define FW_FILTER_WR_DMAC_M     0x1
+#define FW_FILTER_WR_DMAC_V(x)  ((x) << FW_FILTER_WR_DMAC_S)
+#define FW_FILTER_WR_DMAC_G(x)  \
+	(((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M)
+#define FW_FILTER_WR_DMAC_F     FW_FILTER_WR_DMAC_V(1U)
+
+#define FW_FILTER_WR_SMAC_S     18
+#define FW_FILTER_WR_SMAC_M     0x1
+#define FW_FILTER_WR_SMAC_V(x)  ((x) << FW_FILTER_WR_SMAC_S)
+#define FW_FILTER_WR_SMAC_G(x)  \
+	(((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M)
+#define FW_FILTER_WR_SMAC_F     FW_FILTER_WR_SMAC_V(1U)
+
+#define FW_FILTER_WR_INSVLAN_S          17
+#define FW_FILTER_WR_INSVLAN_M          0x1
+#define FW_FILTER_WR_INSVLAN_V(x)       ((x) << FW_FILTER_WR_INSVLAN_S)
+#define FW_FILTER_WR_INSVLAN_G(x)       \
+	(((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M)
+#define FW_FILTER_WR_INSVLAN_F  FW_FILTER_WR_INSVLAN_V(1U)
+
+#define FW_FILTER_WR_RMVLAN_S           16
+#define FW_FILTER_WR_RMVLAN_M           0x1
+#define FW_FILTER_WR_RMVLAN_V(x)        ((x) << FW_FILTER_WR_RMVLAN_S)
+#define FW_FILTER_WR_RMVLAN_G(x)        \
+	(((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M)
+#define FW_FILTER_WR_RMVLAN_F   FW_FILTER_WR_RMVLAN_V(1U)
+
+#define FW_FILTER_WR_HITCNTS_S          15
+#define FW_FILTER_WR_HITCNTS_M          0x1
+#define FW_FILTER_WR_HITCNTS_V(x)       ((x) << FW_FILTER_WR_HITCNTS_S)
+#define FW_FILTER_WR_HITCNTS_G(x)       \
+	(((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M)
+#define FW_FILTER_WR_HITCNTS_F  FW_FILTER_WR_HITCNTS_V(1U)
+
+#define FW_FILTER_WR_TXCHAN_S           13
+#define FW_FILTER_WR_TXCHAN_M           0x3
+#define FW_FILTER_WR_TXCHAN_V(x)        ((x) << FW_FILTER_WR_TXCHAN_S)
+#define FW_FILTER_WR_TXCHAN_G(x)        \
+	(((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M)
+
+#define FW_FILTER_WR_PRIO_S     12
+#define FW_FILTER_WR_PRIO_M     0x1
+#define FW_FILTER_WR_PRIO_V(x)  ((x) << FW_FILTER_WR_PRIO_S)
+#define FW_FILTER_WR_PRIO_G(x)  \
+	(((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M)
+#define FW_FILTER_WR_PRIO_F     FW_FILTER_WR_PRIO_V(1U)
+
+#define FW_FILTER_WR_L2TIX_S    0
+#define FW_FILTER_WR_L2TIX_M    0xfff
+#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S)
+#define FW_FILTER_WR_L2TIX_G(x) \
+	(((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M)
+
+#define FW_FILTER_WR_FRAG_S     7
+#define FW_FILTER_WR_FRAG_M     0x1
+#define FW_FILTER_WR_FRAG_V(x)  ((x) << FW_FILTER_WR_FRAG_S)
+#define FW_FILTER_WR_FRAG_G(x)  \
+	(((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M)
+#define FW_FILTER_WR_FRAG_F     FW_FILTER_WR_FRAG_V(1U)
+
+#define FW_FILTER_WR_FRAGM_S    6
+#define FW_FILTER_WR_FRAGM_M    0x1
+#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S)
+#define FW_FILTER_WR_FRAGM_G(x) \
+	(((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M)
+#define FW_FILTER_WR_FRAGM_F    FW_FILTER_WR_FRAGM_V(1U)
+
+#define FW_FILTER_WR_IVLAN_VLD_S        5
+#define FW_FILTER_WR_IVLAN_VLD_M        0x1
+#define FW_FILTER_WR_IVLAN_VLD_V(x)     ((x) << FW_FILTER_WR_IVLAN_VLD_S)
+#define FW_FILTER_WR_IVLAN_VLD_G(x)     \
+	(((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M)
+#define FW_FILTER_WR_IVLAN_VLD_F        FW_FILTER_WR_IVLAN_VLD_V(1U)
+
+#define FW_FILTER_WR_OVLAN_VLD_S        4
+#define FW_FILTER_WR_OVLAN_VLD_M        0x1
+#define FW_FILTER_WR_OVLAN_VLD_V(x)     ((x) << FW_FILTER_WR_OVLAN_VLD_S)
+#define FW_FILTER_WR_OVLAN_VLD_G(x)     \
+	(((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M)
+#define FW_FILTER_WR_OVLAN_VLD_F        FW_FILTER_WR_OVLAN_VLD_V(1U)
+
+#define FW_FILTER_WR_IVLAN_VLDM_S       3
+#define FW_FILTER_WR_IVLAN_VLDM_M       0x1
+#define FW_FILTER_WR_IVLAN_VLDM_V(x)    ((x) << FW_FILTER_WR_IVLAN_VLDM_S)
+#define FW_FILTER_WR_IVLAN_VLDM_G(x)    \
+	(((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M)
+#define FW_FILTER_WR_IVLAN_VLDM_F       FW_FILTER_WR_IVLAN_VLDM_V(1U)
+
+#define FW_FILTER_WR_OVLAN_VLDM_S       2
+#define FW_FILTER_WR_OVLAN_VLDM_M       0x1
+#define FW_FILTER_WR_OVLAN_VLDM_V(x)    ((x) << FW_FILTER_WR_OVLAN_VLDM_S)
+#define FW_FILTER_WR_OVLAN_VLDM_G(x)    \
+	(((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M)
+#define FW_FILTER_WR_OVLAN_VLDM_F       FW_FILTER_WR_OVLAN_VLDM_V(1U)
+
+#define FW_FILTER_WR_RX_CHAN_S          15
+#define FW_FILTER_WR_RX_CHAN_M          0x1
+#define FW_FILTER_WR_RX_CHAN_V(x)       ((x) << FW_FILTER_WR_RX_CHAN_S)
+#define FW_FILTER_WR_RX_CHAN_G(x)       \
+	(((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M)
+#define FW_FILTER_WR_RX_CHAN_F  FW_FILTER_WR_RX_CHAN_V(1U)
+
+#define FW_FILTER_WR_RX_RPL_IQ_S        0
+#define FW_FILTER_WR_RX_RPL_IQ_M        0x3ff
+#define FW_FILTER_WR_RX_RPL_IQ_V(x)     ((x) << FW_FILTER_WR_RX_RPL_IQ_S)
+#define FW_FILTER_WR_RX_RPL_IQ_G(x)     \
+	(((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M)
+
+#define FW_FILTER_WR_MACI_S     23
+#define FW_FILTER_WR_MACI_M     0x1ff
+#define FW_FILTER_WR_MACI_V(x)  ((x) << FW_FILTER_WR_MACI_S)
+#define FW_FILTER_WR_MACI_G(x)  \
+	(((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M)
+
+#define FW_FILTER_WR_MACIM_S    14
+#define FW_FILTER_WR_MACIM_M    0x1ff
+#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S)
+#define FW_FILTER_WR_MACIM_G(x) \
+	(((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M)
+
+#define FW_FILTER_WR_FCOE_S     13
+#define FW_FILTER_WR_FCOE_M     0x1
+#define FW_FILTER_WR_FCOE_V(x)  ((x) << FW_FILTER_WR_FCOE_S)
+#define FW_FILTER_WR_FCOE_G(x)  \
+	(((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M)
+#define FW_FILTER_WR_FCOE_F     FW_FILTER_WR_FCOE_V(1U)
+
+#define FW_FILTER_WR_FCOEM_S    12
+#define FW_FILTER_WR_FCOEM_M    0x1
+#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S)
+#define FW_FILTER_WR_FCOEM_G(x) \
+	(((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M)
+#define FW_FILTER_WR_FCOEM_F    FW_FILTER_WR_FCOEM_V(1U)
+
+#define FW_FILTER_WR_PORT_S     9
+#define FW_FILTER_WR_PORT_M     0x7
+#define FW_FILTER_WR_PORT_V(x)  ((x) << FW_FILTER_WR_PORT_S)
+#define FW_FILTER_WR_PORT_G(x)  \
+	(((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M)
+
+#define FW_FILTER_WR_PORTM_S    6
+#define FW_FILTER_WR_PORTM_M    0x7
+#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S)
+#define FW_FILTER_WR_PORTM_G(x) \
+	(((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M)
+
+#define FW_FILTER_WR_MATCHTYPE_S        3
+#define FW_FILTER_WR_MATCHTYPE_M        0x7
+#define FW_FILTER_WR_MATCHTYPE_V(x)     ((x) << FW_FILTER_WR_MATCHTYPE_S)
+#define FW_FILTER_WR_MATCHTYPE_G(x)     \
+	(((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M)
+
+#define FW_FILTER_WR_MATCHTYPEM_S       0
+#define FW_FILTER_WR_MATCHTYPEM_M       0x7
+#define FW_FILTER_WR_MATCHTYPEM_V(x)    ((x) << FW_FILTER_WR_MATCHTYPEM_S)
+#define FW_FILTER_WR_MATCHTYPEM_G(x)    \
+	(((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M)
+
+struct fw_ulptx_wr {
+	__be32 op_to_compl;
+	__be32 flowid_len16;
+	u64 cookie;
+};
+
+struct fw_tp_wr {
+	__be32 op_to_immdlen;
+	__be32 flowid_len16;
+	u64 cookie;
+};
+
+struct fw_eth_tx_pkt_wr {
+	__be32 op_immdlen;
+	__be32 equiq_to_len16;
+	__be64 r3;
+};
+
+struct fw_ofld_connection_wr {
+	__be32 op_compl;
+	__be32 len16_pkd;
+	__u64  cookie;
+	__be64 r2;
+	__be64 r3;
+	struct fw_ofld_connection_le {
+		__be32 version_cpl;
+		__be32 filter;
+		__be32 r1;
+		__be16 lport;
+		__be16 pport;
+		union fw_ofld_connection_leip {
+			struct fw_ofld_connection_le_ipv4 {
+				__be32 pip;
+				__be32 lip;
+				__be64 r0;
+				__be64 r1;
+				__be64 r2;
+			} ipv4;
+			struct fw_ofld_connection_le_ipv6 {
+				__be64 pip_hi;
+				__be64 pip_lo;
+				__be64 lip_hi;
+				__be64 lip_lo;
+			} ipv6;
+		} u;
+	} le;
+	struct fw_ofld_connection_tcb {
+		__be32 t_state_to_astid;
+		__be16 cplrxdataack_cplpassacceptrpl;
+		__be16 rcv_adv;
+		__be32 rcv_nxt;
+		__be32 tx_max;
+		__be64 opt0;
+		__be32 opt2;
+		__be32 r1;
+		__be64 r2;
+		__be64 r3;
+	} tcb;
+};
+
+#define FW_OFLD_CONNECTION_WR_VERSION_S                31
+#define FW_OFLD_CONNECTION_WR_VERSION_M                0x1
+#define FW_OFLD_CONNECTION_WR_VERSION_V(x)     \
+	((x) << FW_OFLD_CONNECTION_WR_VERSION_S)
+#define FW_OFLD_CONNECTION_WR_VERSION_G(x)     \
+	(((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \
+	FW_OFLD_CONNECTION_WR_VERSION_M)
+#define FW_OFLD_CONNECTION_WR_VERSION_F        \
+	FW_OFLD_CONNECTION_WR_VERSION_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_CPL_S    30
+#define FW_OFLD_CONNECTION_WR_CPL_M    0x1
+#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S)
+#define FW_OFLD_CONNECTION_WR_CPL_G(x) \
+	(((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M)
+#define FW_OFLD_CONNECTION_WR_CPL_F    FW_OFLD_CONNECTION_WR_CPL_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_T_STATE_S                28
+#define FW_OFLD_CONNECTION_WR_T_STATE_M                0xf
+#define FW_OFLD_CONNECTION_WR_T_STATE_V(x)     \
+	((x) << FW_OFLD_CONNECTION_WR_T_STATE_S)
+#define FW_OFLD_CONNECTION_WR_T_STATE_G(x)     \
+	(((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \
+	FW_OFLD_CONNECTION_WR_T_STATE_M)
+
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S      24
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M      0xf
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x)   \
+	((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S)
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x)   \
+	(((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \
+	FW_OFLD_CONNECTION_WR_RCV_SCALE_M)
+
+#define FW_OFLD_CONNECTION_WR_ASTID_S          0
+#define FW_OFLD_CONNECTION_WR_ASTID_M          0xffffff
+#define FW_OFLD_CONNECTION_WR_ASTID_V(x)       \
+	((x) << FW_OFLD_CONNECTION_WR_ASTID_S)
+#define FW_OFLD_CONNECTION_WR_ASTID_G(x)       \
+	(((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M)
+
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S   15
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M   0x1
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x)        \
+	((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S)
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x)        \
+	(((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \
+	FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M)
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F   \
+	FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S       14
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M       0x1
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x)    \
+	((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S)
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x)    \
+	(((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \
+	FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M)
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F       \
+	FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U)
+
+enum fw_flowc_mnem {
+	FW_FLOWC_MNEM_PFNVFN,		/* PFN [15:8] VFN [7:0] */
+	FW_FLOWC_MNEM_CH,
+	FW_FLOWC_MNEM_PORT,
+	FW_FLOWC_MNEM_IQID,
+	FW_FLOWC_MNEM_SNDNXT,
+	FW_FLOWC_MNEM_RCVNXT,
+	FW_FLOWC_MNEM_SNDBUF,
+	FW_FLOWC_MNEM_MSS,
+	FW_FLOWC_MNEM_TXDATAPLEN_MAX,
+};
+
+struct fw_flowc_mnemval {
+	u8 mnemonic;
+	u8 r4[3];
+	__be32 val;
+};
+
+struct fw_flowc_wr {
+	__be32 op_to_nparams;
+	__be32 flowid_len16;
+	struct fw_flowc_mnemval mnemval[0];
+};
+
+#define FW_FLOWC_WR_NPARAMS_S           0
+#define FW_FLOWC_WR_NPARAMS_V(x)        ((x) << FW_FLOWC_WR_NPARAMS_S)
+
+struct fw_ofld_tx_data_wr {
+	__be32 op_to_immdlen;
+	__be32 flowid_len16;
+	__be32 plen;
+	__be32 tunnel_to_proxy;
+};
+
+#define FW_OFLD_TX_DATA_WR_TUNNEL_S     19
+#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x)  ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S)
+
+#define FW_OFLD_TX_DATA_WR_SAVE_S       18
+#define FW_OFLD_TX_DATA_WR_SAVE_V(x)    ((x) << FW_OFLD_TX_DATA_WR_SAVE_S)
+
+#define FW_OFLD_TX_DATA_WR_FLUSH_S      17
+#define FW_OFLD_TX_DATA_WR_FLUSH_V(x)   ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S)
+#define FW_OFLD_TX_DATA_WR_FLUSH_F      FW_OFLD_TX_DATA_WR_FLUSH_V(1U)
+
+#define FW_OFLD_TX_DATA_WR_URGENT_S     16
+#define FW_OFLD_TX_DATA_WR_URGENT_V(x)  ((x) << FW_OFLD_TX_DATA_WR_URGENT_S)
+
+#define FW_OFLD_TX_DATA_WR_MORE_S       15
+#define FW_OFLD_TX_DATA_WR_MORE_V(x)    ((x) << FW_OFLD_TX_DATA_WR_MORE_S)
+
+#define FW_OFLD_TX_DATA_WR_SHOVE_S      14
+#define FW_OFLD_TX_DATA_WR_SHOVE_V(x)   ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S)
+#define FW_OFLD_TX_DATA_WR_SHOVE_F      FW_OFLD_TX_DATA_WR_SHOVE_V(1U)
+
+#define FW_OFLD_TX_DATA_WR_ULPMODE_S    10
+#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S)
+
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S         6
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x)      \
+	((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S)
+
+struct fw_cmd_wr {
+	__be32 op_dma;
+	__be32 len16_pkd;
+	__be64 cookie_daddr;
+};
+
+#define FW_CMD_WR_DMA_S         17
+#define FW_CMD_WR_DMA_V(x)      ((x) << FW_CMD_WR_DMA_S)
+
+struct fw_eth_tx_pkt_vm_wr {
+	__be32 op_immdlen;
+	__be32 equiq_to_len16;
+	__be32 r3[2];
+	u8 ethmacdst[6];
+	u8 ethmacsrc[6];
+	__be16 ethtype;
+	__be16 vlantci;
+};
+
+#define FW_CMD_MAX_TIMEOUT 10000
+
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands.  Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT	(3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES	3
+
+
+enum fw_cmd_opcodes {
+	FW_LDST_CMD                    = 0x01,
+	FW_RESET_CMD                   = 0x03,
+	FW_HELLO_CMD                   = 0x04,
+	FW_BYE_CMD                     = 0x05,
+	FW_INITIALIZE_CMD              = 0x06,
+	FW_CAPS_CONFIG_CMD             = 0x07,
+	FW_PARAMS_CMD                  = 0x08,
+	FW_PFVF_CMD                    = 0x09,
+	FW_IQ_CMD                      = 0x10,
+	FW_EQ_MNGT_CMD                 = 0x11,
+	FW_EQ_ETH_CMD                  = 0x12,
+	FW_EQ_CTRL_CMD                 = 0x13,
+	FW_EQ_OFLD_CMD                 = 0x21,
+	FW_VI_CMD                      = 0x14,
+	FW_VI_MAC_CMD                  = 0x15,
+	FW_VI_RXMODE_CMD               = 0x16,
+	FW_VI_ENABLE_CMD               = 0x17,
+	FW_ACL_MAC_CMD                 = 0x18,
+	FW_ACL_VLAN_CMD                = 0x19,
+	FW_VI_STATS_CMD                = 0x1a,
+	FW_PORT_CMD                    = 0x1b,
+	FW_PORT_STATS_CMD              = 0x1c,
+	FW_PORT_LB_STATS_CMD           = 0x1d,
+	FW_PORT_TRACE_CMD              = 0x1e,
+	FW_PORT_TRACE_MMAP_CMD         = 0x1f,
+	FW_RSS_IND_TBL_CMD             = 0x20,
+	FW_RSS_GLB_CONFIG_CMD          = 0x22,
+	FW_RSS_VI_CONFIG_CMD           = 0x23,
+	FW_DEVLOG_CMD                  = 0x25,
+	FW_CLIP_CMD                    = 0x28,
+	FW_LASTC2E_CMD                 = 0x40,
+	FW_ERROR_CMD                   = 0x80,
+	FW_DEBUG_CMD                   = 0x81,
+};
+
+enum fw_cmd_cap {
+	FW_CMD_CAP_PF                  = 0x01,
+	FW_CMD_CAP_DMAQ                = 0x02,
+	FW_CMD_CAP_PORT                = 0x04,
+	FW_CMD_CAP_PORTPROMISC         = 0x08,
+	FW_CMD_CAP_PORTSTATS           = 0x10,
+	FW_CMD_CAP_VF                  = 0x80,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+	__be32 hi;
+	__be32 lo;
+};
+
+#define FW_CMD_OP_S             24
+#define FW_CMD_OP_M             0xff
+#define FW_CMD_OP_V(x)          ((x) << FW_CMD_OP_S)
+#define FW_CMD_OP_G(x)          (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M)
+
+#define FW_CMD_REQUEST_S        23
+#define FW_CMD_REQUEST_V(x)     ((x) << FW_CMD_REQUEST_S)
+#define FW_CMD_REQUEST_F        FW_CMD_REQUEST_V(1U)
+
+#define FW_CMD_READ_S           22
+#define FW_CMD_READ_V(x)        ((x) << FW_CMD_READ_S)
+#define FW_CMD_READ_F           FW_CMD_READ_V(1U)
+
+#define FW_CMD_WRITE_S          21
+#define FW_CMD_WRITE_V(x)       ((x) << FW_CMD_WRITE_S)
+#define FW_CMD_WRITE_F          FW_CMD_WRITE_V(1U)
+
+#define FW_CMD_EXEC_S           20
+#define FW_CMD_EXEC_V(x)        ((x) << FW_CMD_EXEC_S)
+#define FW_CMD_EXEC_F           FW_CMD_EXEC_V(1U)
+
+#define FW_CMD_RAMASK_S         20
+#define FW_CMD_RAMASK_V(x)      ((x) << FW_CMD_RAMASK_S)
+
+#define FW_CMD_RETVAL_S         8
+#define FW_CMD_RETVAL_M         0xff
+#define FW_CMD_RETVAL_V(x)      ((x) << FW_CMD_RETVAL_S)
+#define FW_CMD_RETVAL_G(x)      (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M)
+
+#define FW_CMD_LEN16_S          0
+#define FW_CMD_LEN16_V(x)       ((x) << FW_CMD_LEN16_S)
+
+#define FW_LEN16(fw_struct)	FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
+
+enum fw_ldst_addrspc {
+	FW_LDST_ADDRSPC_FIRMWARE  = 0x0001,
+	FW_LDST_ADDRSPC_SGE_EGRC  = 0x0008,
+	FW_LDST_ADDRSPC_SGE_INGC  = 0x0009,
+	FW_LDST_ADDRSPC_SGE_FLMC  = 0x000a,
+	FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
+	FW_LDST_ADDRSPC_TP_PIO    = 0x0010,
+	FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
+	FW_LDST_ADDRSPC_TP_MIB    = 0x0012,
+	FW_LDST_ADDRSPC_MDIO      = 0x0018,
+	FW_LDST_ADDRSPC_MPS       = 0x0020,
+	FW_LDST_ADDRSPC_FUNC      = 0x0028,
+	FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+};
+
+enum fw_ldst_mps_fid {
+	FW_LDST_MPS_ATRB,
+	FW_LDST_MPS_RPLC
+};
+
+enum fw_ldst_func_access_ctl {
+	FW_LDST_FUNC_ACC_CTL_VIID,
+	FW_LDST_FUNC_ACC_CTL_FID
+};
+
+enum fw_ldst_func_mod_index {
+	FW_LDST_FUNC_MPS
+};
+
+struct fw_ldst_cmd {
+	__be32 op_to_addrspace;
+	__be32 cycles_to_len16;
+	union fw_ldst {
+		struct fw_ldst_addrval {
+			__be32 addr;
+			__be32 val;
+		} addrval;
+		struct fw_ldst_idctxt {
+			__be32 physid;
+			__be32 msg_ctxtflush;
+			__be32 ctxt_data7;
+			__be32 ctxt_data6;
+			__be32 ctxt_data5;
+			__be32 ctxt_data4;
+			__be32 ctxt_data3;
+			__be32 ctxt_data2;
+			__be32 ctxt_data1;
+			__be32 ctxt_data0;
+		} idctxt;
+		struct fw_ldst_mdio {
+			__be16 paddr_mmd;
+			__be16 raddr;
+			__be16 vctl;
+			__be16 rval;
+		} mdio;
+		struct fw_ldst_cim_rq {
+			u8 req_first64[8];
+			u8 req_second64[8];
+			u8 resp_first64[8];
+			u8 resp_second64[8];
+			__be32 r3[2];
+		} cim_rq;
+		union fw_ldst_mps {
+			struct fw_ldst_mps_rplc {
+				__be16 fid_idx;
+				__be16 rplcpf_pkd;
+				__be32 rplc255_224;
+				__be32 rplc223_192;
+				__be32 rplc191_160;
+				__be32 rplc159_128;
+				__be32 rplc127_96;
+				__be32 rplc95_64;
+				__be32 rplc63_32;
+				__be32 rplc31_0;
+			} rplc;
+			struct fw_ldst_mps_atrb {
+				__be16 fid_mpsid;
+				__be16 r2[3];
+				__be32 r3[2];
+				__be32 r4;
+				__be32 atrb;
+				__be16 vlan[16];
+			} atrb;
+		} mps;
+		struct fw_ldst_func {
+			u8 access_ctl;
+			u8 mod_index;
+			__be16 ctl_id;
+			__be32 offset;
+			__be64 data0;
+			__be64 data1;
+		} func;
+		struct fw_ldst_pcie {
+			u8 ctrl_to_fn;
+			u8 bnum;
+			u8 r;
+			u8 ext_r;
+			u8 select_naccess;
+			u8 pcie_fn;
+			__be16 nset_pkd;
+			__be32 data[12];
+		} pcie;
+		struct fw_ldst_i2c_deprecated {
+			u8 pid_pkd;
+			u8 base;
+			u8 boffset;
+			u8 data;
+			__be32 r9;
+		} i2c_deprecated;
+		struct fw_ldst_i2c {
+			u8 pid;
+			u8 did;
+			u8 boffset;
+			u8 blen;
+			__be32 r9;
+			__u8   data[48];
+		} i2c;
+		struct fw_ldst_le {
+			__be32 index;
+			__be32 r9;
+			u8 val[33];
+			u8 r11[7];
+		} le;
+	} u;
+};
+
+#define FW_LDST_CMD_ADDRSPACE_S		0
+#define FW_LDST_CMD_ADDRSPACE_V(x)	((x) << FW_LDST_CMD_ADDRSPACE_S)
+
+#define FW_LDST_CMD_MSG_S       31
+#define FW_LDST_CMD_MSG_V(x)	((x) << FW_LDST_CMD_MSG_S)
+
+#define FW_LDST_CMD_CTXTFLUSH_S		30
+#define FW_LDST_CMD_CTXTFLUSH_V(x)	((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F		FW_LDST_CMD_CTXTFLUSH_V(1U)
+
+#define FW_LDST_CMD_PADDR_S     8
+#define FW_LDST_CMD_PADDR_V(x)	((x) << FW_LDST_CMD_PADDR_S)
+
+#define FW_LDST_CMD_MMD_S       0
+#define FW_LDST_CMD_MMD_V(x)	((x) << FW_LDST_CMD_MMD_S)
+
+#define FW_LDST_CMD_FID_S       15
+#define FW_LDST_CMD_FID_V(x)	((x) << FW_LDST_CMD_FID_S)
+
+#define FW_LDST_CMD_IDX_S	0
+#define FW_LDST_CMD_IDX_V(x)	((x) << FW_LDST_CMD_IDX_S)
+
+#define FW_LDST_CMD_RPLCPF_S    0
+#define FW_LDST_CMD_RPLCPF_V(x)	((x) << FW_LDST_CMD_RPLCPF_S)
+
+#define FW_LDST_CMD_LC_S        4
+#define FW_LDST_CMD_LC_V(x)     ((x) << FW_LDST_CMD_LC_S)
+#define FW_LDST_CMD_LC_F	FW_LDST_CMD_LC_V(1U)
+
+#define FW_LDST_CMD_FN_S        0
+#define FW_LDST_CMD_FN_V(x)	((x) << FW_LDST_CMD_FN_S)
+
+#define FW_LDST_CMD_NACCESS_S           0
+#define FW_LDST_CMD_NACCESS_V(x)	((x) << FW_LDST_CMD_NACCESS_S)
+
+struct fw_reset_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be32 val;
+	__be32 halt_pkd;
+};
+
+#define FW_RESET_CMD_HALT_S	31
+#define FW_RESET_CMD_HALT_M     0x1
+#define FW_RESET_CMD_HALT_V(x)	((x) << FW_RESET_CMD_HALT_S)
+#define FW_RESET_CMD_HALT_G(x)  \
+	(((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M)
+#define FW_RESET_CMD_HALT_F	FW_RESET_CMD_HALT_V(1U)
+
+enum fw_hellow_cmd {
+	fw_hello_cmd_stage_os		= 0x0
+};
+
+struct fw_hello_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be32 err_to_clearinit;
+	__be32 fwrev;
+};
+
+#define FW_HELLO_CMD_ERR_S      31
+#define FW_HELLO_CMD_ERR_V(x)   ((x) << FW_HELLO_CMD_ERR_S)
+#define FW_HELLO_CMD_ERR_F	FW_HELLO_CMD_ERR_V(1U)
+
+#define FW_HELLO_CMD_INIT_S     30
+#define FW_HELLO_CMD_INIT_V(x)  ((x) << FW_HELLO_CMD_INIT_S)
+#define FW_HELLO_CMD_INIT_F	FW_HELLO_CMD_INIT_V(1U)
+
+#define FW_HELLO_CMD_MASTERDIS_S	29
+#define FW_HELLO_CMD_MASTERDIS_V(x)	((x) << FW_HELLO_CMD_MASTERDIS_S)
+
+#define FW_HELLO_CMD_MASTERFORCE_S      28
+#define FW_HELLO_CMD_MASTERFORCE_V(x)	((x) << FW_HELLO_CMD_MASTERFORCE_S)
+
+#define FW_HELLO_CMD_MBMASTER_S		24
+#define FW_HELLO_CMD_MBMASTER_M		0xfU
+#define FW_HELLO_CMD_MBMASTER_V(x)	((x) << FW_HELLO_CMD_MBMASTER_S)
+#define FW_HELLO_CMD_MBMASTER_G(x)	\
+	(((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M)
+
+#define FW_HELLO_CMD_MBASYNCNOTINT_S    23
+#define FW_HELLO_CMD_MBASYNCNOTINT_V(x)	((x) << FW_HELLO_CMD_MBASYNCNOTINT_S)
+
+#define FW_HELLO_CMD_MBASYNCNOT_S       20
+#define FW_HELLO_CMD_MBASYNCNOT_V(x)	((x) << FW_HELLO_CMD_MBASYNCNOT_S)
+
+#define FW_HELLO_CMD_STAGE_S		17
+#define FW_HELLO_CMD_STAGE_V(x)		((x) << FW_HELLO_CMD_STAGE_S)
+
+#define FW_HELLO_CMD_CLEARINIT_S        16
+#define FW_HELLO_CMD_CLEARINIT_V(x)     ((x) << FW_HELLO_CMD_CLEARINIT_S)
+#define FW_HELLO_CMD_CLEARINIT_F	FW_HELLO_CMD_CLEARINIT_V(1U)
+
+struct fw_bye_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be64 r3;
+};
+
+struct fw_initialize_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be64 r3;
+};
+
+enum fw_caps_config_hm {
+	FW_CAPS_CONFIG_HM_PCIE		= 0x00000001,
+	FW_CAPS_CONFIG_HM_PL		= 0x00000002,
+	FW_CAPS_CONFIG_HM_SGE		= 0x00000004,
+	FW_CAPS_CONFIG_HM_CIM		= 0x00000008,
+	FW_CAPS_CONFIG_HM_ULPTX		= 0x00000010,
+	FW_CAPS_CONFIG_HM_TP		= 0x00000020,
+	FW_CAPS_CONFIG_HM_ULPRX		= 0x00000040,
+	FW_CAPS_CONFIG_HM_PMRX		= 0x00000080,
+	FW_CAPS_CONFIG_HM_PMTX		= 0x00000100,
+	FW_CAPS_CONFIG_HM_MC		= 0x00000200,
+	FW_CAPS_CONFIG_HM_LE		= 0x00000400,
+	FW_CAPS_CONFIG_HM_MPS		= 0x00000800,
+	FW_CAPS_CONFIG_HM_XGMAC		= 0x00001000,
+	FW_CAPS_CONFIG_HM_CPLSWITCH	= 0x00002000,
+	FW_CAPS_CONFIG_HM_T4DBG		= 0x00004000,
+	FW_CAPS_CONFIG_HM_MI		= 0x00008000,
+	FW_CAPS_CONFIG_HM_I2CM		= 0x00010000,
+	FW_CAPS_CONFIG_HM_NCSI		= 0x00020000,
+	FW_CAPS_CONFIG_HM_SMB		= 0x00040000,
+	FW_CAPS_CONFIG_HM_MA		= 0x00080000,
+	FW_CAPS_CONFIG_HM_EDRAM		= 0x00100000,
+	FW_CAPS_CONFIG_HM_PMU		= 0x00200000,
+	FW_CAPS_CONFIG_HM_UART		= 0x00400000,
+	FW_CAPS_CONFIG_HM_SF		= 0x00800000,
+};
+
+enum fw_caps_config_nbm {
+	FW_CAPS_CONFIG_NBM_IPMI		= 0x00000001,
+	FW_CAPS_CONFIG_NBM_NCSI		= 0x00000002,
+};
+
+enum fw_caps_config_link {
+	FW_CAPS_CONFIG_LINK_PPP		= 0x00000001,
+	FW_CAPS_CONFIG_LINK_QFC		= 0x00000002,
+	FW_CAPS_CONFIG_LINK_DCBX	= 0x00000004,
+};
+
+enum fw_caps_config_switch {
+	FW_CAPS_CONFIG_SWITCH_INGRESS	= 0x00000001,
+	FW_CAPS_CONFIG_SWITCH_EGRESS	= 0x00000002,
+};
+
+enum fw_caps_config_nic {
+	FW_CAPS_CONFIG_NIC		= 0x00000001,
+	FW_CAPS_CONFIG_NIC_VM		= 0x00000002,
+};
+
+enum fw_caps_config_ofld {
+	FW_CAPS_CONFIG_OFLD		= 0x00000001,
+};
+
+enum fw_caps_config_rdma {
+	FW_CAPS_CONFIG_RDMA_RDDP	= 0x00000001,
+	FW_CAPS_CONFIG_RDMA_RDMAC	= 0x00000002,
+};
+
+enum fw_caps_config_iscsi {
+	FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
+	FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
+	FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
+	FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+};
+
+enum fw_caps_config_fcoe {
+	FW_CAPS_CONFIG_FCOE_INITIATOR	= 0x00000001,
+	FW_CAPS_CONFIG_FCOE_TARGET	= 0x00000002,
+	FW_CAPS_CONFIG_FCOE_CTRL_OFLD	= 0x00000004,
+};
+
+enum fw_memtype_cf {
+	FW_MEMTYPE_CF_EDC0		= 0x0,
+	FW_MEMTYPE_CF_EDC1		= 0x1,
+	FW_MEMTYPE_CF_EXTMEM		= 0x2,
+	FW_MEMTYPE_CF_FLASH		= 0x4,
+	FW_MEMTYPE_CF_INTERNAL		= 0x5,
+	FW_MEMTYPE_CF_EXTMEM1           = 0x6,
+};
+
+struct fw_caps_config_cmd {
+	__be32 op_to_write;
+	__be32 cfvalid_to_len16;
+	__be32 r2;
+	__be32 hwmbitmap;
+	__be16 nbmcaps;
+	__be16 linkcaps;
+	__be16 switchcaps;
+	__be16 r3;
+	__be16 niccaps;
+	__be16 ofldcaps;
+	__be16 rdmacaps;
+	__be16 r4;
+	__be16 iscsicaps;
+	__be16 fcoecaps;
+	__be32 cfcsum;
+	__be32 finiver;
+	__be32 finicsum;
+};
+
+#define FW_CAPS_CONFIG_CMD_CFVALID_S    27
+#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S)
+#define FW_CAPS_CONFIG_CMD_CFVALID_F    FW_CAPS_CONFIG_CMD_CFVALID_V(1U)
+
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S		24
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x)	\
+	((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S)
+
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S      16
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x)	\
+	((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S)
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+	FW_PARAMS_MNEM_DEV		= 1,	/* device params */
+	FW_PARAMS_MNEM_PFVF		= 2,	/* function params */
+	FW_PARAMS_MNEM_REG		= 3,	/* limited register access */
+	FW_PARAMS_MNEM_DMAQ		= 4,	/* dma queue params */
+	FW_PARAMS_MNEM_CHNET            = 5,    /* chnet params */
+	FW_PARAMS_MNEM_LAST
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+	FW_PARAMS_PARAM_DEV_CCLK	= 0x00, /* chip core clock in khz */
+	FW_PARAMS_PARAM_DEV_PORTVEC	= 0x01, /* the port vector */
+	FW_PARAMS_PARAM_DEV_NTID	= 0x02, /* reads the number of TIDs
+						 * allocated by the device's
+						 * Lookup Engine
+						 */
+	FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
+	FW_PARAMS_PARAM_DEV_INTVER_NIC	= 0x04,
+	FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
+	FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
+	FW_PARAMS_PARAM_DEV_INTVER_RI	= 0x07,
+	FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
+	FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
+	FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
+	FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
+	FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
+	FW_PARAMS_PARAM_DEV_CF = 0x0D,
+	FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
+	FW_PARAMS_PARAM_DEV_DIAG = 0x11,
+	FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
+	FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
+	FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+	FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+	FW_PARAMS_PARAM_PFVF_RWXCAPS	= 0x00,
+	FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
+	FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
+	FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+	FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+	FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+	FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+	FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
+	FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
+	FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
+	FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
+	FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
+	FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+	FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
+	FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
+	FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
+	FW_PARAMS_PARAM_PFVF_RQ_END	= 0x10,
+	FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
+	FW_PARAMS_PARAM_PFVF_PBL_END	= 0x12,
+	FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+	FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+	FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
+	FW_PARAMS_PARAM_PFVF_SQRQ_END	= 0x16,
+	FW_PARAMS_PARAM_PFVF_CQ_START	= 0x17,
+	FW_PARAMS_PARAM_PFVF_CQ_END	= 0x18,
+	FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+	FW_PARAMS_PARAM_PFVF_VIID       = 0x24,
+	FW_PARAMS_PARAM_PFVF_CPMASK     = 0x25,
+	FW_PARAMS_PARAM_PFVF_OCQ_START  = 0x26,
+	FW_PARAMS_PARAM_PFVF_OCQ_END    = 0x27,
+	FW_PARAMS_PARAM_PFVF_CONM_MAP   = 0x28,
+	FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
+	FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
+	FW_PARAMS_PARAM_PFVF_EQ_START	= 0x2B,
+	FW_PARAMS_PARAM_PFVF_EQ_END	= 0x2C,
+	FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
+	FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
+	FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
+	FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+	FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
+	FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
+	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
+	FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+	FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+	FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_phyfw {
+	FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+	FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
+};
+
+enum fw_params_param_dev_diag {
+	FW_PARAM_DEV_DIAG_TMP		= 0x00,
+	FW_PARAM_DEV_DIAG_VDD		= 0x01,
+};
+
+enum fw_params_param_dev_fwcache {
+	FW_PARAM_DEV_FWCACHE_FLUSH      = 0x00,
+	FW_PARAM_DEV_FWCACHE_FLUSHINV   = 0x01,
+};
+
+#define FW_PARAMS_MNEM_S	24
+#define FW_PARAMS_MNEM_V(x)	((x) << FW_PARAMS_MNEM_S)
+
+#define FW_PARAMS_PARAM_X_S     16
+#define FW_PARAMS_PARAM_X_V(x)	((x) << FW_PARAMS_PARAM_X_S)
+
+#define FW_PARAMS_PARAM_Y_S	8
+#define FW_PARAMS_PARAM_Y_M	0xffU
+#define FW_PARAMS_PARAM_Y_V(x)	((x) << FW_PARAMS_PARAM_Y_S)
+#define FW_PARAMS_PARAM_Y_G(x)	(((x) >> FW_PARAMS_PARAM_Y_S) &\
+		FW_PARAMS_PARAM_Y_M)
+
+#define FW_PARAMS_PARAM_Z_S	0
+#define FW_PARAMS_PARAM_Z_M	0xffu
+#define FW_PARAMS_PARAM_Z_V(x)	((x) << FW_PARAMS_PARAM_Z_S)
+#define FW_PARAMS_PARAM_Z_G(x)	(((x) >> FW_PARAMS_PARAM_Z_S) &\
+		FW_PARAMS_PARAM_Z_M)
+
+#define FW_PARAMS_PARAM_XYZ_S		0
+#define FW_PARAMS_PARAM_XYZ_V(x)	((x) << FW_PARAMS_PARAM_XYZ_S)
+
+#define FW_PARAMS_PARAM_YZ_S		0
+#define FW_PARAMS_PARAM_YZ_V(x)		((x) << FW_PARAMS_PARAM_YZ_S)
+
+struct fw_params_cmd {
+	__be32 op_to_vfn;
+	__be32 retval_len16;
+	struct fw_params_param {
+		__be32 mnem;
+		__be32 val;
+	} param[7];
+};
+
+#define FW_PARAMS_CMD_PFN_S     8
+#define FW_PARAMS_CMD_PFN_V(x)	((x) << FW_PARAMS_CMD_PFN_S)
+
+#define FW_PARAMS_CMD_VFN_S     0
+#define FW_PARAMS_CMD_VFN_V(x)	((x) << FW_PARAMS_CMD_VFN_S)
+
+struct fw_pfvf_cmd {
+	__be32 op_to_vfn;
+	__be32 retval_len16;
+	__be32 niqflint_niq;
+	__be32 type_to_neq;
+	__be32 tc_to_nexactf;
+	__be32 r_caps_to_nethctrl;
+	__be16 nricq;
+	__be16 nriqp;
+	__be32 r4;
+};
+
+#define FW_PFVF_CMD_PFN_S	8
+#define FW_PFVF_CMD_PFN_V(x)	((x) << FW_PFVF_CMD_PFN_S)
+
+#define FW_PFVF_CMD_VFN_S       0
+#define FW_PFVF_CMD_VFN_V(x)	((x) << FW_PFVF_CMD_VFN_S)
+
+#define FW_PFVF_CMD_NIQFLINT_S          20
+#define FW_PFVF_CMD_NIQFLINT_M          0xfff
+#define FW_PFVF_CMD_NIQFLINT_V(x)	((x) << FW_PFVF_CMD_NIQFLINT_S)
+#define FW_PFVF_CMD_NIQFLINT_G(x)	\
+	(((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M)
+
+#define FW_PFVF_CMD_NIQ_S       0
+#define FW_PFVF_CMD_NIQ_M       0xfffff
+#define FW_PFVF_CMD_NIQ_V(x)	((x) << FW_PFVF_CMD_NIQ_S)
+#define FW_PFVF_CMD_NIQ_G(x)	\
+	(((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M)
+
+#define FW_PFVF_CMD_TYPE_S      31
+#define FW_PFVF_CMD_TYPE_M      0x1
+#define FW_PFVF_CMD_TYPE_V(x)   ((x) << FW_PFVF_CMD_TYPE_S)
+#define FW_PFVF_CMD_TYPE_G(x)	\
+	(((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M)
+#define FW_PFVF_CMD_TYPE_F      FW_PFVF_CMD_TYPE_V(1U)
+
+#define FW_PFVF_CMD_CMASK_S     24
+#define FW_PFVF_CMD_CMASK_M	0xf
+#define FW_PFVF_CMD_CMASK_V(x)	((x) << FW_PFVF_CMD_CMASK_S)
+#define FW_PFVF_CMD_CMASK_G(x)	\
+	(((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M)
+
+#define FW_PFVF_CMD_PMASK_S     20
+#define FW_PFVF_CMD_PMASK_M	0xf
+#define FW_PFVF_CMD_PMASK_V(x)	((x) << FW_PFVF_CMD_PMASK_S)
+#define FW_PFVF_CMD_PMASK_G(x) \
+	(((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M)
+
+#define FW_PFVF_CMD_NEQ_S       0
+#define FW_PFVF_CMD_NEQ_M       0xfffff
+#define FW_PFVF_CMD_NEQ_V(x)	((x) << FW_PFVF_CMD_NEQ_S)
+#define FW_PFVF_CMD_NEQ_G(x)	\
+	(((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M)
+
+#define FW_PFVF_CMD_TC_S        24
+#define FW_PFVF_CMD_TC_M        0xff
+#define FW_PFVF_CMD_TC_V(x)	((x) << FW_PFVF_CMD_TC_S)
+#define FW_PFVF_CMD_TC_G(x)	(((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M)
+
+#define FW_PFVF_CMD_NVI_S       16
+#define FW_PFVF_CMD_NVI_M       0xff
+#define FW_PFVF_CMD_NVI_V(x)	((x) << FW_PFVF_CMD_NVI_S)
+#define FW_PFVF_CMD_NVI_G(x)	(((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M)
+
+#define FW_PFVF_CMD_NEXACTF_S           0
+#define FW_PFVF_CMD_NEXACTF_M           0xffff
+#define FW_PFVF_CMD_NEXACTF_V(x)	((x) << FW_PFVF_CMD_NEXACTF_S)
+#define FW_PFVF_CMD_NEXACTF_G(x)	\
+	(((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M)
+
+#define FW_PFVF_CMD_R_CAPS_S    24
+#define FW_PFVF_CMD_R_CAPS_M    0xff
+#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S)
+#define FW_PFVF_CMD_R_CAPS_G(x) \
+	(((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M)
+
+#define FW_PFVF_CMD_WX_CAPS_S           16
+#define FW_PFVF_CMD_WX_CAPS_M           0xff
+#define FW_PFVF_CMD_WX_CAPS_V(x)	((x) << FW_PFVF_CMD_WX_CAPS_S)
+#define FW_PFVF_CMD_WX_CAPS_G(x)	\
+	(((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M)
+
+#define FW_PFVF_CMD_NETHCTRL_S          0
+#define FW_PFVF_CMD_NETHCTRL_M          0xffff
+#define FW_PFVF_CMD_NETHCTRL_V(x)	((x) << FW_PFVF_CMD_NETHCTRL_S)
+#define FW_PFVF_CMD_NETHCTRL_G(x)	\
+	(((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M)
+
+enum fw_iq_type {
+	FW_IQ_TYPE_FL_INT_CAP,
+	FW_IQ_TYPE_NO_FL_INT_CAP
+};
+
+struct fw_iq_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be16 physiqid;
+	__be16 iqid;
+	__be16 fl0id;
+	__be16 fl1id;
+	__be32 type_to_iqandstindex;
+	__be16 iqdroprss_to_iqesize;
+	__be16 iqsize;
+	__be64 iqaddr;
+	__be32 iqns_to_fl0congen;
+	__be16 fl0dcaen_to_fl0cidxfthresh;
+	__be16 fl0size;
+	__be64 fl0addr;
+	__be32 fl1cngchmap_to_fl1congen;
+	__be16 fl1dcaen_to_fl1cidxfthresh;
+	__be16 fl1size;
+	__be64 fl1addr;
+};
+
+#define FW_IQ_CMD_PFN_S		8
+#define FW_IQ_CMD_PFN_V(x)	((x) << FW_IQ_CMD_PFN_S)
+
+#define FW_IQ_CMD_VFN_S		0
+#define FW_IQ_CMD_VFN_V(x)	((x) << FW_IQ_CMD_VFN_S)
+
+#define FW_IQ_CMD_ALLOC_S	31
+#define FW_IQ_CMD_ALLOC_V(x)	((x) << FW_IQ_CMD_ALLOC_S)
+#define FW_IQ_CMD_ALLOC_F	FW_IQ_CMD_ALLOC_V(1U)
+
+#define FW_IQ_CMD_FREE_S	30
+#define FW_IQ_CMD_FREE_V(x)	((x) << FW_IQ_CMD_FREE_S)
+#define FW_IQ_CMD_FREE_F	FW_IQ_CMD_FREE_V(1U)
+
+#define FW_IQ_CMD_MODIFY_S	29
+#define FW_IQ_CMD_MODIFY_V(x)	((x) << FW_IQ_CMD_MODIFY_S)
+#define FW_IQ_CMD_MODIFY_F	FW_IQ_CMD_MODIFY_V(1U)
+
+#define FW_IQ_CMD_IQSTART_S	28
+#define FW_IQ_CMD_IQSTART_V(x)	((x) << FW_IQ_CMD_IQSTART_S)
+#define FW_IQ_CMD_IQSTART_F	FW_IQ_CMD_IQSTART_V(1U)
+
+#define FW_IQ_CMD_IQSTOP_S	27
+#define FW_IQ_CMD_IQSTOP_V(x)	((x) << FW_IQ_CMD_IQSTOP_S)
+#define FW_IQ_CMD_IQSTOP_F	FW_IQ_CMD_IQSTOP_V(1U)
+
+#define FW_IQ_CMD_TYPE_S	29
+#define FW_IQ_CMD_TYPE_V(x)	((x) << FW_IQ_CMD_TYPE_S)
+
+#define FW_IQ_CMD_IQASYNCH_S	28
+#define FW_IQ_CMD_IQASYNCH_V(x)	((x) << FW_IQ_CMD_IQASYNCH_S)
+
+#define FW_IQ_CMD_VIID_S	16
+#define FW_IQ_CMD_VIID_V(x)	((x) << FW_IQ_CMD_VIID_S)
+
+#define FW_IQ_CMD_IQANDST_S	15
+#define FW_IQ_CMD_IQANDST_V(x)	((x) << FW_IQ_CMD_IQANDST_S)
+
+#define FW_IQ_CMD_IQANUS_S	14
+#define FW_IQ_CMD_IQANUS_V(x)	((x) << FW_IQ_CMD_IQANUS_S)
+
+#define FW_IQ_CMD_IQANUD_S	12
+#define FW_IQ_CMD_IQANUD_V(x)	((x) << FW_IQ_CMD_IQANUD_S)
+
+#define FW_IQ_CMD_IQANDSTINDEX_S	0
+#define FW_IQ_CMD_IQANDSTINDEX_V(x)	((x) << FW_IQ_CMD_IQANDSTINDEX_S)
+
+#define FW_IQ_CMD_IQDROPRSS_S		15
+#define FW_IQ_CMD_IQDROPRSS_V(x)	((x) << FW_IQ_CMD_IQDROPRSS_S)
+#define FW_IQ_CMD_IQDROPRSS_F	FW_IQ_CMD_IQDROPRSS_V(1U)
+
+#define FW_IQ_CMD_IQGTSMODE_S		14
+#define FW_IQ_CMD_IQGTSMODE_V(x)	((x) << FW_IQ_CMD_IQGTSMODE_S)
+#define FW_IQ_CMD_IQGTSMODE_F		FW_IQ_CMD_IQGTSMODE_V(1U)
+
+#define FW_IQ_CMD_IQPCIECH_S	12
+#define FW_IQ_CMD_IQPCIECH_V(x)	((x) << FW_IQ_CMD_IQPCIECH_S)
+
+#define FW_IQ_CMD_IQDCAEN_S	11
+#define FW_IQ_CMD_IQDCAEN_V(x)	((x) << FW_IQ_CMD_IQDCAEN_S)
+
+#define FW_IQ_CMD_IQDCACPU_S	6
+#define FW_IQ_CMD_IQDCACPU_V(x)	((x) << FW_IQ_CMD_IQDCACPU_S)
+
+#define FW_IQ_CMD_IQINTCNTTHRESH_S	4
+#define FW_IQ_CMD_IQINTCNTTHRESH_V(x)	((x) << FW_IQ_CMD_IQINTCNTTHRESH_S)
+
+#define FW_IQ_CMD_IQO_S		3
+#define FW_IQ_CMD_IQO_V(x)	((x) << FW_IQ_CMD_IQO_S)
+#define FW_IQ_CMD_IQO_F		FW_IQ_CMD_IQO_V(1U)
+
+#define FW_IQ_CMD_IQCPRIO_S	2
+#define FW_IQ_CMD_IQCPRIO_V(x)	((x) << FW_IQ_CMD_IQCPRIO_S)
+
+#define FW_IQ_CMD_IQESIZE_S	0
+#define FW_IQ_CMD_IQESIZE_V(x)	((x) << FW_IQ_CMD_IQESIZE_S)
+
+#define FW_IQ_CMD_IQNS_S	31
+#define FW_IQ_CMD_IQNS_V(x)	((x) << FW_IQ_CMD_IQNS_S)
+
+#define FW_IQ_CMD_IQRO_S	30
+#define FW_IQ_CMD_IQRO_V(x)	((x) << FW_IQ_CMD_IQRO_S)
+
+#define FW_IQ_CMD_IQFLINTIQHSEN_S	28
+#define FW_IQ_CMD_IQFLINTIQHSEN_V(x)	((x) << FW_IQ_CMD_IQFLINTIQHSEN_S)
+
+#define FW_IQ_CMD_IQFLINTCONGEN_S	27
+#define FW_IQ_CMD_IQFLINTCONGEN_V(x)	((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+#define FW_IQ_CMD_IQFLINTCONGEN_F	FW_IQ_CMD_IQFLINTCONGEN_V(1U)
+
+#define FW_IQ_CMD_IQFLINTISCSIC_S	26
+#define FW_IQ_CMD_IQFLINTISCSIC_V(x)	((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+
+#define FW_IQ_CMD_FL0CNGCHMAP_S		20
+#define FW_IQ_CMD_FL0CNGCHMAP_V(x)	((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
+
+#define FW_IQ_CMD_FL0CACHELOCK_S	15
+#define FW_IQ_CMD_FL0CACHELOCK_V(x)	((x) << FW_IQ_CMD_FL0CACHELOCK_S)
+
+#define FW_IQ_CMD_FL0DBP_S	14
+#define FW_IQ_CMD_FL0DBP_V(x)	((x) << FW_IQ_CMD_FL0DBP_S)
+
+#define FW_IQ_CMD_FL0DATANS_S		13
+#define FW_IQ_CMD_FL0DATANS_V(x)	((x) << FW_IQ_CMD_FL0DATANS_S)
+
+#define FW_IQ_CMD_FL0DATARO_S		12
+#define FW_IQ_CMD_FL0DATARO_V(x)	((x) << FW_IQ_CMD_FL0DATARO_S)
+#define FW_IQ_CMD_FL0DATARO_F		FW_IQ_CMD_FL0DATARO_V(1U)
+
+#define FW_IQ_CMD_FL0CONGCIF_S		11
+#define FW_IQ_CMD_FL0CONGCIF_V(x)	((x) << FW_IQ_CMD_FL0CONGCIF_S)
+#define FW_IQ_CMD_FL0CONGCIF_F		FW_IQ_CMD_FL0CONGCIF_V(1U)
+
+#define FW_IQ_CMD_FL0ONCHIP_S		10
+#define FW_IQ_CMD_FL0ONCHIP_V(x)	((x) << FW_IQ_CMD_FL0ONCHIP_S)
+
+#define FW_IQ_CMD_FL0STATUSPGNS_S	9
+#define FW_IQ_CMD_FL0STATUSPGNS_V(x)	((x) << FW_IQ_CMD_FL0STATUSPGNS_S)
+
+#define FW_IQ_CMD_FL0STATUSPGRO_S	8
+#define FW_IQ_CMD_FL0STATUSPGRO_V(x)	((x) << FW_IQ_CMD_FL0STATUSPGRO_S)
+
+#define FW_IQ_CMD_FL0FETCHNS_S		7
+#define FW_IQ_CMD_FL0FETCHNS_V(x)	((x) << FW_IQ_CMD_FL0FETCHNS_S)
+
+#define FW_IQ_CMD_FL0FETCHRO_S		6
+#define FW_IQ_CMD_FL0FETCHRO_V(x)	((x) << FW_IQ_CMD_FL0FETCHRO_S)
+#define FW_IQ_CMD_FL0FETCHRO_F		FW_IQ_CMD_FL0FETCHRO_V(1U)
+
+#define FW_IQ_CMD_FL0HOSTFCMODE_S	4
+#define FW_IQ_CMD_FL0HOSTFCMODE_V(x)	((x) << FW_IQ_CMD_FL0HOSTFCMODE_S)
+
+#define FW_IQ_CMD_FL0CPRIO_S	3
+#define FW_IQ_CMD_FL0CPRIO_V(x)	((x) << FW_IQ_CMD_FL0CPRIO_S)
+
+#define FW_IQ_CMD_FL0PADEN_S	2
+#define FW_IQ_CMD_FL0PADEN_V(x)	((x) << FW_IQ_CMD_FL0PADEN_S)
+#define FW_IQ_CMD_FL0PADEN_F	FW_IQ_CMD_FL0PADEN_V(1U)
+
+#define FW_IQ_CMD_FL0PACKEN_S		1
+#define FW_IQ_CMD_FL0PACKEN_V(x)	((x) << FW_IQ_CMD_FL0PACKEN_S)
+#define FW_IQ_CMD_FL0PACKEN_F		FW_IQ_CMD_FL0PACKEN_V(1U)
+
+#define FW_IQ_CMD_FL0CONGEN_S		0
+#define FW_IQ_CMD_FL0CONGEN_V(x)	((x) << FW_IQ_CMD_FL0CONGEN_S)
+#define FW_IQ_CMD_FL0CONGEN_F		FW_IQ_CMD_FL0CONGEN_V(1U)
+
+#define FW_IQ_CMD_FL0DCAEN_S	15
+#define FW_IQ_CMD_FL0DCAEN_V(x)	((x) << FW_IQ_CMD_FL0DCAEN_S)
+
+#define FW_IQ_CMD_FL0DCACPU_S		10
+#define FW_IQ_CMD_FL0DCACPU_V(x)	((x) << FW_IQ_CMD_FL0DCACPU_S)
+
+#define FW_IQ_CMD_FL0FBMIN_S	7
+#define FW_IQ_CMD_FL0FBMIN_V(x)	((x) << FW_IQ_CMD_FL0FBMIN_S)
+
+#define FW_IQ_CMD_FL0FBMAX_S	4
+#define FW_IQ_CMD_FL0FBMAX_V(x)	((x) << FW_IQ_CMD_FL0FBMAX_S)
+
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_S	3
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x)	((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S)
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_F	FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U)
+
+#define FW_IQ_CMD_FL0CIDXFTHRESH_S	0
+#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x)	((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S)
+
+#define FW_IQ_CMD_FL1CNGCHMAP_S		20
+#define FW_IQ_CMD_FL1CNGCHMAP_V(x)	((x) << FW_IQ_CMD_FL1CNGCHMAP_S)
+
+#define FW_IQ_CMD_FL1CACHELOCK_S	15
+#define FW_IQ_CMD_FL1CACHELOCK_V(x)	((x) << FW_IQ_CMD_FL1CACHELOCK_S)
+
+#define FW_IQ_CMD_FL1DBP_S	14
+#define FW_IQ_CMD_FL1DBP_V(x)	((x) << FW_IQ_CMD_FL1DBP_S)
+
+#define FW_IQ_CMD_FL1DATANS_S		13
+#define FW_IQ_CMD_FL1DATANS_V(x)	((x) << FW_IQ_CMD_FL1DATANS_S)
+
+#define FW_IQ_CMD_FL1DATARO_S		12
+#define FW_IQ_CMD_FL1DATARO_V(x)	((x) << FW_IQ_CMD_FL1DATARO_S)
+
+#define FW_IQ_CMD_FL1CONGCIF_S		11
+#define FW_IQ_CMD_FL1CONGCIF_V(x)	((x) << FW_IQ_CMD_FL1CONGCIF_S)
+
+#define FW_IQ_CMD_FL1ONCHIP_S		10
+#define FW_IQ_CMD_FL1ONCHIP_V(x)	((x) << FW_IQ_CMD_FL1ONCHIP_S)
+
+#define FW_IQ_CMD_FL1STATUSPGNS_S	9
+#define FW_IQ_CMD_FL1STATUSPGNS_V(x)	((x) << FW_IQ_CMD_FL1STATUSPGNS_S)
+
+#define FW_IQ_CMD_FL1STATUSPGRO_S	8
+#define FW_IQ_CMD_FL1STATUSPGRO_V(x)	((x) << FW_IQ_CMD_FL1STATUSPGRO_S)
+
+#define FW_IQ_CMD_FL1FETCHNS_S		7
+#define FW_IQ_CMD_FL1FETCHNS_V(x)	((x) << FW_IQ_CMD_FL1FETCHNS_S)
+
+#define FW_IQ_CMD_FL1FETCHRO_S		6
+#define FW_IQ_CMD_FL1FETCHRO_V(x)	((x) << FW_IQ_CMD_FL1FETCHRO_S)
+
+#define FW_IQ_CMD_FL1HOSTFCMODE_S	4
+#define FW_IQ_CMD_FL1HOSTFCMODE_V(x)	((x) << FW_IQ_CMD_FL1HOSTFCMODE_S)
+
+#define FW_IQ_CMD_FL1CPRIO_S	3
+#define FW_IQ_CMD_FL1CPRIO_V(x)	((x) << FW_IQ_CMD_FL1CPRIO_S)
+
+#define FW_IQ_CMD_FL1PADEN_S	2
+#define FW_IQ_CMD_FL1PADEN_V(x)	((x) << FW_IQ_CMD_FL1PADEN_S)
+#define FW_IQ_CMD_FL1PADEN_F	FW_IQ_CMD_FL1PADEN_V(1U)
+
+#define FW_IQ_CMD_FL1PACKEN_S		1
+#define FW_IQ_CMD_FL1PACKEN_V(x)	((x) << FW_IQ_CMD_FL1PACKEN_S)
+#define FW_IQ_CMD_FL1PACKEN_F	FW_IQ_CMD_FL1PACKEN_V(1U)
+
+#define FW_IQ_CMD_FL1CONGEN_S		0
+#define FW_IQ_CMD_FL1CONGEN_V(x)	((x) << FW_IQ_CMD_FL1CONGEN_S)
+#define FW_IQ_CMD_FL1CONGEN_F	FW_IQ_CMD_FL1CONGEN_V(1U)
+
+#define FW_IQ_CMD_FL1DCAEN_S	15
+#define FW_IQ_CMD_FL1DCAEN_V(x)	((x) << FW_IQ_CMD_FL1DCAEN_S)
+
+#define FW_IQ_CMD_FL1DCACPU_S		10
+#define FW_IQ_CMD_FL1DCACPU_V(x)	((x) << FW_IQ_CMD_FL1DCACPU_S)
+
+#define FW_IQ_CMD_FL1FBMIN_S	7
+#define FW_IQ_CMD_FL1FBMIN_V(x)	((x) << FW_IQ_CMD_FL1FBMIN_S)
+
+#define FW_IQ_CMD_FL1FBMAX_S	4
+#define FW_IQ_CMD_FL1FBMAX_V(x)	((x) << FW_IQ_CMD_FL1FBMAX_S)
+
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_S	3
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x)	((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S)
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_F	FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U)
+
+#define FW_IQ_CMD_FL1CIDXFTHRESH_S	0
+#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x)	((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S)
+
+struct fw_eq_eth_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be32 eqid_pkd;
+	__be32 physeqid_pkd;
+	__be32 fetchszm_to_iqid;
+	__be32 dcaen_to_eqsize;
+	__be64 eqaddr;
+	__be32 viid_pkd;
+	__be32 r8_lo;
+	__be64 r9;
+};
+
+#define FW_EQ_ETH_CMD_PFN_S	8
+#define FW_EQ_ETH_CMD_PFN_V(x)	((x) << FW_EQ_ETH_CMD_PFN_S)
+
+#define FW_EQ_ETH_CMD_VFN_S	0
+#define FW_EQ_ETH_CMD_VFN_V(x)	((x) << FW_EQ_ETH_CMD_VFN_S)
+
+#define FW_EQ_ETH_CMD_ALLOC_S		31
+#define FW_EQ_ETH_CMD_ALLOC_V(x)	((x) << FW_EQ_ETH_CMD_ALLOC_S)
+#define FW_EQ_ETH_CMD_ALLOC_F	FW_EQ_ETH_CMD_ALLOC_V(1U)
+
+#define FW_EQ_ETH_CMD_FREE_S	30
+#define FW_EQ_ETH_CMD_FREE_V(x)	((x) << FW_EQ_ETH_CMD_FREE_S)
+#define FW_EQ_ETH_CMD_FREE_F	FW_EQ_ETH_CMD_FREE_V(1U)
+
+#define FW_EQ_ETH_CMD_MODIFY_S		29
+#define FW_EQ_ETH_CMD_MODIFY_V(x)	((x) << FW_EQ_ETH_CMD_MODIFY_S)
+#define FW_EQ_ETH_CMD_MODIFY_F	FW_EQ_ETH_CMD_MODIFY_V(1U)
+
+#define FW_EQ_ETH_CMD_EQSTART_S		28
+#define FW_EQ_ETH_CMD_EQSTART_V(x)	((x) << FW_EQ_ETH_CMD_EQSTART_S)
+#define FW_EQ_ETH_CMD_EQSTART_F	FW_EQ_ETH_CMD_EQSTART_V(1U)
+
+#define FW_EQ_ETH_CMD_EQSTOP_S		27
+#define FW_EQ_ETH_CMD_EQSTOP_V(x)	((x) << FW_EQ_ETH_CMD_EQSTOP_S)
+#define FW_EQ_ETH_CMD_EQSTOP_F	FW_EQ_ETH_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_ETH_CMD_EQID_S	0
+#define FW_EQ_ETH_CMD_EQID_M	0xfffff
+#define FW_EQ_ETH_CMD_EQID_V(x)	((x) << FW_EQ_ETH_CMD_EQID_S)
+#define FW_EQ_ETH_CMD_EQID_G(x)	\
+	(((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M)
+
+#define FW_EQ_ETH_CMD_PHYSEQID_S	0
+#define FW_EQ_ETH_CMD_PHYSEQID_M	0xfffff
+#define FW_EQ_ETH_CMD_PHYSEQID_V(x)	((x) << FW_EQ_ETH_CMD_PHYSEQID_S)
+#define FW_EQ_ETH_CMD_PHYSEQID_G(x)	\
+	(((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M)
+
+#define FW_EQ_ETH_CMD_FETCHSZM_S	26
+#define FW_EQ_ETH_CMD_FETCHSZM_V(x)	((x) << FW_EQ_ETH_CMD_FETCHSZM_S)
+#define FW_EQ_ETH_CMD_FETCHSZM_F	FW_EQ_ETH_CMD_FETCHSZM_V(1U)
+
+#define FW_EQ_ETH_CMD_STATUSPGNS_S	25
+#define FW_EQ_ETH_CMD_STATUSPGNS_V(x)	((x) << FW_EQ_ETH_CMD_STATUSPGNS_S)
+
+#define FW_EQ_ETH_CMD_STATUSPGRO_S	24
+#define FW_EQ_ETH_CMD_STATUSPGRO_V(x)	((x) << FW_EQ_ETH_CMD_STATUSPGRO_S)
+
+#define FW_EQ_ETH_CMD_FETCHNS_S		23
+#define FW_EQ_ETH_CMD_FETCHNS_V(x)	((x) << FW_EQ_ETH_CMD_FETCHNS_S)
+
+#define FW_EQ_ETH_CMD_FETCHRO_S		22
+#define FW_EQ_ETH_CMD_FETCHRO_V(x)	((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+#define FW_EQ_ETH_CMD_FETCHRO_F		FW_EQ_ETH_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_ETH_CMD_HOSTFCMODE_S	20
+#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x)	((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_ETH_CMD_CPRIO_S		19
+#define FW_EQ_ETH_CMD_CPRIO_V(x)	((x) << FW_EQ_ETH_CMD_CPRIO_S)
+
+#define FW_EQ_ETH_CMD_ONCHIP_S		18
+#define FW_EQ_ETH_CMD_ONCHIP_V(x)	((x) << FW_EQ_ETH_CMD_ONCHIP_S)
+
+#define FW_EQ_ETH_CMD_PCIECHN_S		16
+#define FW_EQ_ETH_CMD_PCIECHN_V(x)	((x) << FW_EQ_ETH_CMD_PCIECHN_S)
+
+#define FW_EQ_ETH_CMD_IQID_S	0
+#define FW_EQ_ETH_CMD_IQID_V(x)	((x) << FW_EQ_ETH_CMD_IQID_S)
+
+#define FW_EQ_ETH_CMD_DCAEN_S		31
+#define FW_EQ_ETH_CMD_DCAEN_V(x)	((x) << FW_EQ_ETH_CMD_DCAEN_S)
+
+#define FW_EQ_ETH_CMD_DCACPU_S		26
+#define FW_EQ_ETH_CMD_DCACPU_V(x)	((x) << FW_EQ_ETH_CMD_DCACPU_S)
+
+#define FW_EQ_ETH_CMD_FBMIN_S		23
+#define FW_EQ_ETH_CMD_FBMIN_V(x)	((x) << FW_EQ_ETH_CMD_FBMIN_S)
+
+#define FW_EQ_ETH_CMD_FBMAX_S		20
+#define FW_EQ_ETH_CMD_FBMAX_V(x)	((x) << FW_EQ_ETH_CMD_FBMAX_S)
+
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S	19
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x)	((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_ETH_CMD_CIDXFTHRESH_S	16
+#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x)	((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_ETH_CMD_EQSIZE_S		0
+#define FW_EQ_ETH_CMD_EQSIZE_V(x)	((x) << FW_EQ_ETH_CMD_EQSIZE_S)
+
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_S	30
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x)	((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_F	FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U)
+
+#define FW_EQ_ETH_CMD_VIID_S	16
+#define FW_EQ_ETH_CMD_VIID_V(x)	((x) << FW_EQ_ETH_CMD_VIID_S)
+
+struct fw_eq_ctrl_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be32 cmpliqid_eqid;
+	__be32 physeqid_pkd;
+	__be32 fetchszm_to_iqid;
+	__be32 dcaen_to_eqsize;
+	__be64 eqaddr;
+};
+
+#define FW_EQ_CTRL_CMD_PFN_S	8
+#define FW_EQ_CTRL_CMD_PFN_V(x)	((x) << FW_EQ_CTRL_CMD_PFN_S)
+
+#define FW_EQ_CTRL_CMD_VFN_S	0
+#define FW_EQ_CTRL_CMD_VFN_V(x)	((x) << FW_EQ_CTRL_CMD_VFN_S)
+
+#define FW_EQ_CTRL_CMD_ALLOC_S		31
+#define FW_EQ_CTRL_CMD_ALLOC_V(x)	((x) << FW_EQ_CTRL_CMD_ALLOC_S)
+#define FW_EQ_CTRL_CMD_ALLOC_F		FW_EQ_CTRL_CMD_ALLOC_V(1U)
+
+#define FW_EQ_CTRL_CMD_FREE_S		30
+#define FW_EQ_CTRL_CMD_FREE_V(x)	((x) << FW_EQ_CTRL_CMD_FREE_S)
+#define FW_EQ_CTRL_CMD_FREE_F		FW_EQ_CTRL_CMD_FREE_V(1U)
+
+#define FW_EQ_CTRL_CMD_MODIFY_S		29
+#define FW_EQ_CTRL_CMD_MODIFY_V(x)	((x) << FW_EQ_CTRL_CMD_MODIFY_S)
+#define FW_EQ_CTRL_CMD_MODIFY_F		FW_EQ_CTRL_CMD_MODIFY_V(1U)
+
+#define FW_EQ_CTRL_CMD_EQSTART_S	28
+#define FW_EQ_CTRL_CMD_EQSTART_V(x)	((x) << FW_EQ_CTRL_CMD_EQSTART_S)
+#define FW_EQ_CTRL_CMD_EQSTART_F	FW_EQ_CTRL_CMD_EQSTART_V(1U)
+
+#define FW_EQ_CTRL_CMD_EQSTOP_S		27
+#define FW_EQ_CTRL_CMD_EQSTOP_V(x)	((x) << FW_EQ_CTRL_CMD_EQSTOP_S)
+#define FW_EQ_CTRL_CMD_EQSTOP_F		FW_EQ_CTRL_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_CTRL_CMD_CMPLIQID_S	20
+#define FW_EQ_CTRL_CMD_CMPLIQID_V(x)	((x) << FW_EQ_CTRL_CMD_CMPLIQID_S)
+
+#define FW_EQ_CTRL_CMD_EQID_S		0
+#define FW_EQ_CTRL_CMD_EQID_M		0xfffff
+#define FW_EQ_CTRL_CMD_EQID_V(x)	((x) << FW_EQ_CTRL_CMD_EQID_S)
+#define FW_EQ_CTRL_CMD_EQID_G(x)	\
+	(((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M)
+
+#define FW_EQ_CTRL_CMD_PHYSEQID_S	0
+#define FW_EQ_CTRL_CMD_PHYSEQID_M	0xfffff
+#define FW_EQ_CTRL_CMD_PHYSEQID_G(x)	\
+	(((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M)
+
+#define FW_EQ_CTRL_CMD_FETCHSZM_S	26
+#define FW_EQ_CTRL_CMD_FETCHSZM_V(x)	((x) << FW_EQ_CTRL_CMD_FETCHSZM_S)
+#define FW_EQ_CTRL_CMD_FETCHSZM_F	FW_EQ_CTRL_CMD_FETCHSZM_V(1U)
+
+#define FW_EQ_CTRL_CMD_STATUSPGNS_S	25
+#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x)	((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S)
+#define FW_EQ_CTRL_CMD_STATUSPGNS_F	FW_EQ_CTRL_CMD_STATUSPGNS_V(1U)
+
+#define FW_EQ_CTRL_CMD_STATUSPGRO_S	24
+#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x)	((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S)
+#define FW_EQ_CTRL_CMD_STATUSPGRO_F	FW_EQ_CTRL_CMD_STATUSPGRO_V(1U)
+
+#define FW_EQ_CTRL_CMD_FETCHNS_S	23
+#define FW_EQ_CTRL_CMD_FETCHNS_V(x)	((x) << FW_EQ_CTRL_CMD_FETCHNS_S)
+#define FW_EQ_CTRL_CMD_FETCHNS_F	FW_EQ_CTRL_CMD_FETCHNS_V(1U)
+
+#define FW_EQ_CTRL_CMD_FETCHRO_S	22
+#define FW_EQ_CTRL_CMD_FETCHRO_V(x)	((x) << FW_EQ_CTRL_CMD_FETCHRO_S)
+#define FW_EQ_CTRL_CMD_FETCHRO_F	FW_EQ_CTRL_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_CTRL_CMD_HOSTFCMODE_S	20
+#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x)	((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_CTRL_CMD_CPRIO_S		19
+#define FW_EQ_CTRL_CMD_CPRIO_V(x)	((x) << FW_EQ_CTRL_CMD_CPRIO_S)
+
+#define FW_EQ_CTRL_CMD_ONCHIP_S		18
+#define FW_EQ_CTRL_CMD_ONCHIP_V(x)	((x) << FW_EQ_CTRL_CMD_ONCHIP_S)
+
+#define FW_EQ_CTRL_CMD_PCIECHN_S	16
+#define FW_EQ_CTRL_CMD_PCIECHN_V(x)	((x) << FW_EQ_CTRL_CMD_PCIECHN_S)
+
+#define FW_EQ_CTRL_CMD_IQID_S		0
+#define FW_EQ_CTRL_CMD_IQID_V(x)	((x) << FW_EQ_CTRL_CMD_IQID_S)
+
+#define FW_EQ_CTRL_CMD_DCAEN_S		31
+#define FW_EQ_CTRL_CMD_DCAEN_V(x)	((x) << FW_EQ_CTRL_CMD_DCAEN_S)
+
+#define FW_EQ_CTRL_CMD_DCACPU_S		26
+#define FW_EQ_CTRL_CMD_DCACPU_V(x)	((x) << FW_EQ_CTRL_CMD_DCACPU_S)
+
+#define FW_EQ_CTRL_CMD_FBMIN_S		23
+#define FW_EQ_CTRL_CMD_FBMIN_V(x)	((x) << FW_EQ_CTRL_CMD_FBMIN_S)
+
+#define FW_EQ_CTRL_CMD_FBMAX_S		20
+#define FW_EQ_CTRL_CMD_FBMAX_V(x)	((x) << FW_EQ_CTRL_CMD_FBMAX_S)
+
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S		19
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x)	\
+	((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S	16
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x)	((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_CTRL_CMD_EQSIZE_S		0
+#define FW_EQ_CTRL_CMD_EQSIZE_V(x)	((x) << FW_EQ_CTRL_CMD_EQSIZE_S)
+
+struct fw_eq_ofld_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be32 eqid_pkd;
+	__be32 physeqid_pkd;
+	__be32 fetchszm_to_iqid;
+	__be32 dcaen_to_eqsize;
+	__be64 eqaddr;
+};
+
+#define FW_EQ_OFLD_CMD_PFN_S	8
+#define FW_EQ_OFLD_CMD_PFN_V(x)	((x) << FW_EQ_OFLD_CMD_PFN_S)
+
+#define FW_EQ_OFLD_CMD_VFN_S	0
+#define FW_EQ_OFLD_CMD_VFN_V(x)	((x) << FW_EQ_OFLD_CMD_VFN_S)
+
+#define FW_EQ_OFLD_CMD_ALLOC_S		31
+#define FW_EQ_OFLD_CMD_ALLOC_V(x)	((x) << FW_EQ_OFLD_CMD_ALLOC_S)
+#define FW_EQ_OFLD_CMD_ALLOC_F		FW_EQ_OFLD_CMD_ALLOC_V(1U)
+
+#define FW_EQ_OFLD_CMD_FREE_S		30
+#define FW_EQ_OFLD_CMD_FREE_V(x)	((x) << FW_EQ_OFLD_CMD_FREE_S)
+#define FW_EQ_OFLD_CMD_FREE_F		FW_EQ_OFLD_CMD_FREE_V(1U)
+
+#define FW_EQ_OFLD_CMD_MODIFY_S		29
+#define FW_EQ_OFLD_CMD_MODIFY_V(x)	((x) << FW_EQ_OFLD_CMD_MODIFY_S)
+#define FW_EQ_OFLD_CMD_MODIFY_F		FW_EQ_OFLD_CMD_MODIFY_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQSTART_S	28
+#define FW_EQ_OFLD_CMD_EQSTART_V(x)	((x) << FW_EQ_OFLD_CMD_EQSTART_S)
+#define FW_EQ_OFLD_CMD_EQSTART_F	FW_EQ_OFLD_CMD_EQSTART_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQSTOP_S		27
+#define FW_EQ_OFLD_CMD_EQSTOP_V(x)	((x) << FW_EQ_OFLD_CMD_EQSTOP_S)
+#define FW_EQ_OFLD_CMD_EQSTOP_F		FW_EQ_OFLD_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQID_S		0
+#define FW_EQ_OFLD_CMD_EQID_M		0xfffff
+#define FW_EQ_OFLD_CMD_EQID_V(x)	((x) << FW_EQ_OFLD_CMD_EQID_S)
+#define FW_EQ_OFLD_CMD_EQID_G(x)	\
+	(((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M)
+
+#define FW_EQ_OFLD_CMD_PHYSEQID_S	0
+#define FW_EQ_OFLD_CMD_PHYSEQID_M	0xfffff
+#define FW_EQ_OFLD_CMD_PHYSEQID_G(x)	\
+	(((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M)
+
+#define FW_EQ_OFLD_CMD_FETCHSZM_S	26
+#define FW_EQ_OFLD_CMD_FETCHSZM_V(x)	((x) << FW_EQ_OFLD_CMD_FETCHSZM_S)
+
+#define FW_EQ_OFLD_CMD_STATUSPGNS_S	25
+#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x)	((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S)
+
+#define FW_EQ_OFLD_CMD_STATUSPGRO_S	24
+#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x)	((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S)
+
+#define FW_EQ_OFLD_CMD_FETCHNS_S	23
+#define FW_EQ_OFLD_CMD_FETCHNS_V(x)	((x) << FW_EQ_OFLD_CMD_FETCHNS_S)
+
+#define FW_EQ_OFLD_CMD_FETCHRO_S	22
+#define FW_EQ_OFLD_CMD_FETCHRO_V(x)	((x) << FW_EQ_OFLD_CMD_FETCHRO_S)
+#define FW_EQ_OFLD_CMD_FETCHRO_F	FW_EQ_OFLD_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_OFLD_CMD_HOSTFCMODE_S	20
+#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x)	((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_OFLD_CMD_CPRIO_S		19
+#define FW_EQ_OFLD_CMD_CPRIO_V(x)	((x) << FW_EQ_OFLD_CMD_CPRIO_S)
+
+#define FW_EQ_OFLD_CMD_ONCHIP_S		18
+#define FW_EQ_OFLD_CMD_ONCHIP_V(x)	((x) << FW_EQ_OFLD_CMD_ONCHIP_S)
+
+#define FW_EQ_OFLD_CMD_PCIECHN_S	16
+#define FW_EQ_OFLD_CMD_PCIECHN_V(x)	((x) << FW_EQ_OFLD_CMD_PCIECHN_S)
+
+#define FW_EQ_OFLD_CMD_IQID_S		0
+#define FW_EQ_OFLD_CMD_IQID_V(x)	((x) << FW_EQ_OFLD_CMD_IQID_S)
+
+#define FW_EQ_OFLD_CMD_DCAEN_S		31
+#define FW_EQ_OFLD_CMD_DCAEN_V(x)	((x) << FW_EQ_OFLD_CMD_DCAEN_S)
+
+#define FW_EQ_OFLD_CMD_DCACPU_S		26
+#define FW_EQ_OFLD_CMD_DCACPU_V(x)	((x) << FW_EQ_OFLD_CMD_DCACPU_S)
+
+#define FW_EQ_OFLD_CMD_FBMIN_S		23
+#define FW_EQ_OFLD_CMD_FBMIN_V(x)	((x) << FW_EQ_OFLD_CMD_FBMIN_S)
+
+#define FW_EQ_OFLD_CMD_FBMAX_S		20
+#define FW_EQ_OFLD_CMD_FBMAX_V(x)	((x) << FW_EQ_OFLD_CMD_FBMAX_S)
+
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S		19
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x)	\
+	((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S	16
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x)	((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_OFLD_CMD_EQSIZE_S		0
+#define FW_EQ_OFLD_CMD_EQSIZE_V(x)	((x) << FW_EQ_OFLD_CMD_EQSIZE_S)
+
+/*
+ * Macros for VIID parsing:
+ * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
+ */
+
+#define FW_VIID_PFN_S           8
+#define FW_VIID_PFN_M           0x7
+#define FW_VIID_PFN_G(x)        (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M)
+
+#define FW_VIID_VIVLD_S		7
+#define FW_VIID_VIVLD_M		0x1
+#define FW_VIID_VIVLD_G(x)	(((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M)
+
+#define FW_VIID_VIN_S		0
+#define FW_VIID_VIN_M		0x7F
+#define FW_VIID_VIN_G(x)	(((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M)
+
+struct fw_vi_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be16 type_viid;
+	u8 mac[6];
+	u8 portid_pkd;
+	u8 nmac;
+	u8 nmac0[6];
+	__be16 rsssize_pkd;
+	u8 nmac1[6];
+	__be16 idsiiq_pkd;
+	u8 nmac2[6];
+	__be16 idseiq_pkd;
+	u8 nmac3[6];
+	__be64 r9;
+	__be64 r10;
+};
+
+#define FW_VI_CMD_PFN_S		8
+#define FW_VI_CMD_PFN_V(x)	((x) << FW_VI_CMD_PFN_S)
+
+#define FW_VI_CMD_VFN_S		0
+#define FW_VI_CMD_VFN_V(x)	((x) << FW_VI_CMD_VFN_S)
+
+#define FW_VI_CMD_ALLOC_S	31
+#define FW_VI_CMD_ALLOC_V(x)	((x) << FW_VI_CMD_ALLOC_S)
+#define FW_VI_CMD_ALLOC_F	FW_VI_CMD_ALLOC_V(1U)
+
+#define FW_VI_CMD_FREE_S	30
+#define FW_VI_CMD_FREE_V(x)	((x) << FW_VI_CMD_FREE_S)
+#define FW_VI_CMD_FREE_F	FW_VI_CMD_FREE_V(1U)
+
+#define FW_VI_CMD_VIID_S	0
+#define FW_VI_CMD_VIID_M	0xfff
+#define FW_VI_CMD_VIID_V(x)	((x) << FW_VI_CMD_VIID_S)
+#define FW_VI_CMD_VIID_G(x)	(((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M)
+
+#define FW_VI_CMD_PORTID_S	4
+#define FW_VI_CMD_PORTID_M	0xf
+#define FW_VI_CMD_PORTID_V(x)	((x) << FW_VI_CMD_PORTID_S)
+#define FW_VI_CMD_PORTID_G(x)	\
+	(((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M)
+
+#define FW_VI_CMD_RSSSIZE_S	0
+#define FW_VI_CMD_RSSSIZE_M	0x7ff
+#define FW_VI_CMD_RSSSIZE_G(x)	\
+	(((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC		0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC	0x3FE
+#define FW_VI_MAC_MAC_BASED_FREE	0x3FD
+#define FW_CLS_TCAM_NUM_ENTRIES		336
+
+enum fw_vi_mac_smac {
+	FW_VI_MAC_MPS_TCAM_ENTRY,
+	FW_VI_MAC_MPS_TCAM_ONLY,
+	FW_VI_MAC_SMT_ONLY,
+	FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+enum fw_vi_mac_result {
+	FW_VI_MAC_R_SUCCESS,
+	FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
+	FW_VI_MAC_R_SMAC_FAIL,
+	FW_VI_MAC_R_F_ACL_CHECK
+};
+
+struct fw_vi_mac_cmd {
+	__be32 op_to_viid;
+	__be32 freemacs_to_len16;
+	union fw_vi_mac {
+		struct fw_vi_mac_exact {
+			__be16 valid_to_idx;
+			u8 macaddr[6];
+		} exact[7];
+		struct fw_vi_mac_hash {
+			__be64 hashvec;
+		} hash;
+	} u;
+};
+
+#define FW_VI_MAC_CMD_VIID_S	0
+#define FW_VI_MAC_CMD_VIID_V(x)	((x) << FW_VI_MAC_CMD_VIID_S)
+
+#define FW_VI_MAC_CMD_FREEMACS_S	31
+#define FW_VI_MAC_CMD_FREEMACS_V(x)	((x) << FW_VI_MAC_CMD_FREEMACS_S)
+
+#define FW_VI_MAC_CMD_HASHVECEN_S	23
+#define FW_VI_MAC_CMD_HASHVECEN_V(x)	((x) << FW_VI_MAC_CMD_HASHVECEN_S)
+#define FW_VI_MAC_CMD_HASHVECEN_F	FW_VI_MAC_CMD_HASHVECEN_V(1U)
+
+#define FW_VI_MAC_CMD_HASHUNIEN_S	22
+#define FW_VI_MAC_CMD_HASHUNIEN_V(x)	((x) << FW_VI_MAC_CMD_HASHUNIEN_S)
+
+#define FW_VI_MAC_CMD_VALID_S		15
+#define FW_VI_MAC_CMD_VALID_V(x)	((x) << FW_VI_MAC_CMD_VALID_S)
+#define FW_VI_MAC_CMD_VALID_F	FW_VI_MAC_CMD_VALID_V(1U)
+
+#define FW_VI_MAC_CMD_PRIO_S	12
+#define FW_VI_MAC_CMD_PRIO_V(x)	((x) << FW_VI_MAC_CMD_PRIO_S)
+
+#define FW_VI_MAC_CMD_SMAC_RESULT_S	10
+#define FW_VI_MAC_CMD_SMAC_RESULT_M	0x3
+#define FW_VI_MAC_CMD_SMAC_RESULT_V(x)	((x) << FW_VI_MAC_CMD_SMAC_RESULT_S)
+#define FW_VI_MAC_CMD_SMAC_RESULT_G(x)	\
+	(((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M)
+
+#define FW_VI_MAC_CMD_IDX_S	0
+#define FW_VI_MAC_CMD_IDX_M	0x3ff
+#define FW_VI_MAC_CMD_IDX_V(x)	((x) << FW_VI_MAC_CMD_IDX_S)
+#define FW_VI_MAC_CMD_IDX_G(x)	\
+	(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+
+#define FW_RXMODE_MTU_NO_CHG	65535
+
+struct fw_vi_rxmode_cmd {
+	__be32 op_to_viid;
+	__be32 retval_len16;
+	__be32 mtu_to_vlanexen;
+	__be32 r4_lo;
+};
+
+#define FW_VI_RXMODE_CMD_VIID_S		0
+#define FW_VI_RXMODE_CMD_VIID_V(x)	((x) << FW_VI_RXMODE_CMD_VIID_S)
+
+#define FW_VI_RXMODE_CMD_MTU_S		16
+#define FW_VI_RXMODE_CMD_MTU_M		0xffff
+#define FW_VI_RXMODE_CMD_MTU_V(x)	((x) << FW_VI_RXMODE_CMD_MTU_S)
+
+#define FW_VI_RXMODE_CMD_PROMISCEN_S	14
+#define FW_VI_RXMODE_CMD_PROMISCEN_M	0x3
+#define FW_VI_RXMODE_CMD_PROMISCEN_V(x)	((x) << FW_VI_RXMODE_CMD_PROMISCEN_S)
+
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_S		12
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_M		0x3
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x)	\
+	((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S)
+
+#define FW_VI_RXMODE_CMD_BROADCASTEN_S		10
+#define FW_VI_RXMODE_CMD_BROADCASTEN_M		0x3
+#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x)	\
+	((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S)
+
+#define FW_VI_RXMODE_CMD_VLANEXEN_S	8
+#define FW_VI_RXMODE_CMD_VLANEXEN_M	0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN_V(x)	((x) << FW_VI_RXMODE_CMD_VLANEXEN_S)
+
+struct fw_vi_enable_cmd {
+	__be32 op_to_viid;
+	__be32 ien_to_len16;
+	__be16 blinkdur;
+	__be16 r3;
+	__be32 r4;
+};
+
+#define FW_VI_ENABLE_CMD_VIID_S         0
+#define FW_VI_ENABLE_CMD_VIID_V(x)      ((x) << FW_VI_ENABLE_CMD_VIID_S)
+
+#define FW_VI_ENABLE_CMD_IEN_S		31
+#define FW_VI_ENABLE_CMD_IEN_V(x)	((x) << FW_VI_ENABLE_CMD_IEN_S)
+
+#define FW_VI_ENABLE_CMD_EEN_S		30
+#define FW_VI_ENABLE_CMD_EEN_V(x)	((x) << FW_VI_ENABLE_CMD_EEN_S)
+
+#define FW_VI_ENABLE_CMD_LED_S		29
+#define FW_VI_ENABLE_CMD_LED_V(x)	((x) << FW_VI_ENABLE_CMD_LED_S)
+#define FW_VI_ENABLE_CMD_LED_F	FW_VI_ENABLE_CMD_LED_V(1U)
+
+#define FW_VI_ENABLE_CMD_DCB_INFO_S	28
+#define FW_VI_ENABLE_CMD_DCB_INFO_V(x)	((x) << FW_VI_ENABLE_CMD_DCB_INFO_S)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS	16
+enum fw_vi_stats_vf_index {
+	FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
+	FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
+	FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
+	FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
+	FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
+	FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
+	FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
+	FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
+	FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
+	FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
+	FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
+	FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
+	FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
+	FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
+	FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
+	FW_VI_VF_STAT_RX_ERR_FRAMES_IX
+};
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS	17
+enum fw_vi_stats_pf_index {
+	FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+	FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+	FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+	FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+	FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+	FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+	FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+	FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+	FW_VI_PF_STAT_RX_BYTES_IX,
+	FW_VI_PF_STAT_RX_FRAMES_IX,
+	FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+	FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+	FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+	FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+	FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+	FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+	FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+	__be32 op_to_viid;
+	__be32 retval_len16;
+	union fw_vi_stats {
+		struct fw_vi_stats_ctl {
+			__be16 nstats_ix;
+			__be16 r6;
+			__be32 r7;
+			__be64 stat0;
+			__be64 stat1;
+			__be64 stat2;
+			__be64 stat3;
+			__be64 stat4;
+			__be64 stat5;
+		} ctl;
+		struct fw_vi_stats_pf {
+			__be64 tx_bcast_bytes;
+			__be64 tx_bcast_frames;
+			__be64 tx_mcast_bytes;
+			__be64 tx_mcast_frames;
+			__be64 tx_ucast_bytes;
+			__be64 tx_ucast_frames;
+			__be64 tx_offload_bytes;
+			__be64 tx_offload_frames;
+			__be64 rx_pf_bytes;
+			__be64 rx_pf_frames;
+			__be64 rx_bcast_bytes;
+			__be64 rx_bcast_frames;
+			__be64 rx_mcast_bytes;
+			__be64 rx_mcast_frames;
+			__be64 rx_ucast_bytes;
+			__be64 rx_ucast_frames;
+			__be64 rx_err_frames;
+		} pf;
+		struct fw_vi_stats_vf {
+			__be64 tx_bcast_bytes;
+			__be64 tx_bcast_frames;
+			__be64 tx_mcast_bytes;
+			__be64 tx_mcast_frames;
+			__be64 tx_ucast_bytes;
+			__be64 tx_ucast_frames;
+			__be64 tx_drop_frames;
+			__be64 tx_offload_bytes;
+			__be64 tx_offload_frames;
+			__be64 rx_bcast_bytes;
+			__be64 rx_bcast_frames;
+			__be64 rx_mcast_bytes;
+			__be64 rx_mcast_frames;
+			__be64 rx_ucast_bytes;
+			__be64 rx_ucast_frames;
+			__be64 rx_err_frames;
+		} vf;
+	} u;
+};
+
+#define FW_VI_STATS_CMD_VIID_S		0
+#define FW_VI_STATS_CMD_VIID_V(x)	((x) << FW_VI_STATS_CMD_VIID_S)
+
+#define FW_VI_STATS_CMD_NSTATS_S	12
+#define FW_VI_STATS_CMD_NSTATS_V(x)	((x) << FW_VI_STATS_CMD_NSTATS_S)
+
+#define FW_VI_STATS_CMD_IX_S	0
+#define FW_VI_STATS_CMD_IX_V(x)	((x) << FW_VI_STATS_CMD_IX_S)
+
+struct fw_acl_mac_cmd {
+	__be32 op_to_vfn;
+	__be32 en_to_len16;
+	u8 nmac;
+	u8 r3[7];
+	__be16 r4;
+	u8 macaddr0[6];
+	__be16 r5;
+	u8 macaddr1[6];
+	__be16 r6;
+	u8 macaddr2[6];
+	__be16 r7;
+	u8 macaddr3[6];
+};
+
+#define FW_ACL_MAC_CMD_PFN_S	8
+#define FW_ACL_MAC_CMD_PFN_V(x)	((x) << FW_ACL_MAC_CMD_PFN_S)
+
+#define FW_ACL_MAC_CMD_VFN_S	0
+#define FW_ACL_MAC_CMD_VFN_V(x)	((x) << FW_ACL_MAC_CMD_VFN_S)
+
+#define FW_ACL_MAC_CMD_EN_S	31
+#define FW_ACL_MAC_CMD_EN_V(x)	((x) << FW_ACL_MAC_CMD_EN_S)
+
+struct fw_acl_vlan_cmd {
+	__be32 op_to_vfn;
+	__be32 en_to_len16;
+	u8 nvlan;
+	u8 dropnovlan_fm;
+	u8 r3_lo[6];
+	__be16 vlanid[16];
+};
+
+#define FW_ACL_VLAN_CMD_PFN_S		8
+#define FW_ACL_VLAN_CMD_PFN_V(x)	((x) << FW_ACL_VLAN_CMD_PFN_S)
+
+#define FW_ACL_VLAN_CMD_VFN_S		0
+#define FW_ACL_VLAN_CMD_VFN_V(x)	((x) << FW_ACL_VLAN_CMD_VFN_S)
+
+#define FW_ACL_VLAN_CMD_EN_S	31
+#define FW_ACL_VLAN_CMD_EN_V(x)	((x) << FW_ACL_VLAN_CMD_EN_S)
+
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_S	7
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x)	((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
+
+#define FW_ACL_VLAN_CMD_FM_S	6
+#define FW_ACL_VLAN_CMD_FM_V(x)	((x) << FW_ACL_VLAN_CMD_FM_S)
+
+enum fw_port_cap {
+	FW_PORT_CAP_SPEED_100M		= 0x0001,
+	FW_PORT_CAP_SPEED_1G		= 0x0002,
+	FW_PORT_CAP_SPEED_2_5G		= 0x0004,
+	FW_PORT_CAP_SPEED_10G		= 0x0008,
+	FW_PORT_CAP_SPEED_40G		= 0x0010,
+	FW_PORT_CAP_SPEED_100G		= 0x0020,
+	FW_PORT_CAP_FC_RX		= 0x0040,
+	FW_PORT_CAP_FC_TX		= 0x0080,
+	FW_PORT_CAP_ANEG		= 0x0100,
+	FW_PORT_CAP_MDI_0		= 0x0200,
+	FW_PORT_CAP_MDI_1		= 0x0400,
+	FW_PORT_CAP_BEAN		= 0x0800,
+	FW_PORT_CAP_PMA_LPBK		= 0x1000,
+	FW_PORT_CAP_PCS_LPBK		= 0x2000,
+	FW_PORT_CAP_PHYXS_LPBK		= 0x4000,
+	FW_PORT_CAP_FAR_END_LPBK	= 0x8000,
+};
+
+enum fw_port_mdi {
+	FW_PORT_CAP_MDI_UNCHANGED,
+	FW_PORT_CAP_MDI_AUTO,
+	FW_PORT_CAP_MDI_F_STRAIGHT,
+	FW_PORT_CAP_MDI_F_CROSSOVER
+};
+
+#define FW_PORT_CAP_MDI_S 9
+#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S)
+
+enum fw_port_action {
+	FW_PORT_ACTION_L1_CFG		= 0x0001,
+	FW_PORT_ACTION_L2_CFG		= 0x0002,
+	FW_PORT_ACTION_GET_PORT_INFO	= 0x0003,
+	FW_PORT_ACTION_L2_PPP_CFG	= 0x0004,
+	FW_PORT_ACTION_L2_DCB_CFG	= 0x0005,
+	FW_PORT_ACTION_DCB_READ_TRANS	= 0x0006,
+	FW_PORT_ACTION_DCB_READ_RECV	= 0x0007,
+	FW_PORT_ACTION_DCB_READ_DET	= 0x0008,
+	FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
+	FW_PORT_ACTION_L1_LOW_PWR_EN	= 0x0011,
+	FW_PORT_ACTION_L2_WOL_MODE_EN	= 0x0012,
+	FW_PORT_ACTION_LPBK_TO_NORMAL	= 0x0020,
+	FW_PORT_ACTION_L1_LPBK		= 0x0021,
+	FW_PORT_ACTION_L1_PMA_LPBK	= 0x0022,
+	FW_PORT_ACTION_L1_PCS_LPBK	= 0x0023,
+	FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
+	FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
+	FW_PORT_ACTION_PHY_RESET	= 0x0040,
+	FW_PORT_ACTION_PMA_RESET	= 0x0041,
+	FW_PORT_ACTION_PCS_RESET	= 0x0042,
+	FW_PORT_ACTION_PHYXS_RESET	= 0x0043,
+	FW_PORT_ACTION_DTEXS_REEST	= 0x0044,
+	FW_PORT_ACTION_AN_RESET		= 0x0045
+};
+
+enum fw_port_l2cfg_ctlbf {
+	FW_PORT_L2_CTLBF_OVLAN0	= 0x01,
+	FW_PORT_L2_CTLBF_OVLAN1	= 0x02,
+	FW_PORT_L2_CTLBF_OVLAN2	= 0x04,
+	FW_PORT_L2_CTLBF_OVLAN3	= 0x08,
+	FW_PORT_L2_CTLBF_IVLAN	= 0x10,
+	FW_PORT_L2_CTLBF_TXIPG	= 0x20
+};
+
+enum fw_port_dcb_versions {
+	FW_PORT_DCB_VER_UNKNOWN,
+	FW_PORT_DCB_VER_CEE1D0,
+	FW_PORT_DCB_VER_CEE1D01,
+	FW_PORT_DCB_VER_IEEE,
+	FW_PORT_DCB_VER_AUTO = 7
+};
+
+enum fw_port_dcb_cfg {
+	FW_PORT_DCB_CFG_PG	= 0x01,
+	FW_PORT_DCB_CFG_PFC	= 0x02,
+	FW_PORT_DCB_CFG_APPL	= 0x04
+};
+
+enum fw_port_dcb_cfg_rc {
+	FW_PORT_DCB_CFG_SUCCESS	= 0x0,
+	FW_PORT_DCB_CFG_ERROR	= 0x1
+};
+
+enum fw_port_dcb_type {
+	FW_PORT_DCB_TYPE_PGID		= 0x00,
+	FW_PORT_DCB_TYPE_PGRATE		= 0x01,
+	FW_PORT_DCB_TYPE_PRIORATE	= 0x02,
+	FW_PORT_DCB_TYPE_PFC		= 0x03,
+	FW_PORT_DCB_TYPE_APP_ID		= 0x04,
+	FW_PORT_DCB_TYPE_CONTROL	= 0x05,
+};
+
+enum fw_port_dcb_feature_state {
+	FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0,
+	FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1,
+	FW_PORT_DCB_FEATURE_STATE_ERROR	= 0x2,
+	FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3,
+};
+
+struct fw_port_cmd {
+	__be32 op_to_portid;
+	__be32 action_to_len16;
+	union fw_port {
+		struct fw_port_l1cfg {
+			__be32 rcap;
+			__be32 r;
+		} l1cfg;
+		struct fw_port_l2cfg {
+			__u8   ctlbf;
+			__u8   ovlan3_to_ivlan0;
+			__be16 ivlantype;
+			__be16 txipg_force_pinfo;
+			__be16 mtu;
+			__be16 ovlan0mask;
+			__be16 ovlan0type;
+			__be16 ovlan1mask;
+			__be16 ovlan1type;
+			__be16 ovlan2mask;
+			__be16 ovlan2type;
+			__be16 ovlan3mask;
+			__be16 ovlan3type;
+		} l2cfg;
+		struct fw_port_info {
+			__be32 lstatus_to_modtype;
+			__be16 pcap;
+			__be16 acap;
+			__be16 mtu;
+			__u8   cbllen;
+			__u8   auxlinfo;
+			__u8   dcbxdis_pkd;
+			__u8   r8_lo[3];
+			__be64 r9;
+		} info;
+		struct fw_port_diags {
+			__u8   diagop;
+			__u8   r[3];
+			__be32 diagval;
+		} diags;
+		union fw_port_dcb {
+			struct fw_port_dcb_pgid {
+				__u8   type;
+				__u8   apply_pkd;
+				__u8   r10_lo[2];
+				__be32 pgid;
+				__be64 r11;
+			} pgid;
+			struct fw_port_dcb_pgrate {
+				__u8   type;
+				__u8   apply_pkd;
+				__u8   r10_lo[5];
+				__u8   num_tcs_supported;
+				__u8   pgrate[8];
+				__u8   tsa[8];
+			} pgrate;
+			struct fw_port_dcb_priorate {
+				__u8   type;
+				__u8   apply_pkd;
+				__u8   r10_lo[6];
+				__u8   strict_priorate[8];
+			} priorate;
+			struct fw_port_dcb_pfc {
+				__u8   type;
+				__u8   pfcen;
+				__u8   r10[5];
+				__u8   max_pfc_tcs;
+				__be64 r11;
+			} pfc;
+			struct fw_port_app_priority {
+				__u8   type;
+				__u8   r10[2];
+				__u8   idx;
+				__u8   user_prio_map;
+				__u8   sel_field;
+				__be16 protocolid;
+				__be64 r12;
+			} app_priority;
+			struct fw_port_dcb_control {
+				__u8   type;
+				__u8   all_syncd_pkd;
+				__be16 dcb_version_to_app_state;
+				__be32 r11;
+				__be64 r12;
+			} control;
+		} dcb;
+	} u;
+};
+
+#define FW_PORT_CMD_READ_S	22
+#define FW_PORT_CMD_READ_V(x)	((x) << FW_PORT_CMD_READ_S)
+#define FW_PORT_CMD_READ_F	FW_PORT_CMD_READ_V(1U)
+
+#define FW_PORT_CMD_PORTID_S	0
+#define FW_PORT_CMD_PORTID_M	0xf
+#define FW_PORT_CMD_PORTID_V(x)	((x) << FW_PORT_CMD_PORTID_S)
+#define FW_PORT_CMD_PORTID_G(x)	\
+	(((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M)
+
+#define FW_PORT_CMD_ACTION_S	16
+#define FW_PORT_CMD_ACTION_M	0xffff
+#define FW_PORT_CMD_ACTION_V(x)	((x) << FW_PORT_CMD_ACTION_S)
+#define FW_PORT_CMD_ACTION_G(x)	\
+	(((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M)
+
+#define FW_PORT_CMD_OVLAN3_S	7
+#define FW_PORT_CMD_OVLAN3_V(x)	((x) << FW_PORT_CMD_OVLAN3_S)
+
+#define FW_PORT_CMD_OVLAN2_S	6
+#define FW_PORT_CMD_OVLAN2_V(x)	((x) << FW_PORT_CMD_OVLAN2_S)
+
+#define FW_PORT_CMD_OVLAN1_S	5
+#define FW_PORT_CMD_OVLAN1_V(x)	((x) << FW_PORT_CMD_OVLAN1_S)
+
+#define FW_PORT_CMD_OVLAN0_S	4
+#define FW_PORT_CMD_OVLAN0_V(x)	((x) << FW_PORT_CMD_OVLAN0_S)
+
+#define FW_PORT_CMD_IVLAN0_S	3
+#define FW_PORT_CMD_IVLAN0_V(x)	((x) << FW_PORT_CMD_IVLAN0_S)
+
+#define FW_PORT_CMD_TXIPG_S	3
+#define FW_PORT_CMD_TXIPG_V(x)	((x) << FW_PORT_CMD_TXIPG_S)
+
+#define FW_PORT_CMD_LSTATUS_S           31
+#define FW_PORT_CMD_LSTATUS_M           0x1
+#define FW_PORT_CMD_LSTATUS_V(x)        ((x) << FW_PORT_CMD_LSTATUS_S)
+#define FW_PORT_CMD_LSTATUS_G(x)        \
+	(((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M)
+#define FW_PORT_CMD_LSTATUS_F   FW_PORT_CMD_LSTATUS_V(1U)
+
+#define FW_PORT_CMD_LSPEED_S	24
+#define FW_PORT_CMD_LSPEED_M	0x3f
+#define FW_PORT_CMD_LSPEED_V(x)	((x) << FW_PORT_CMD_LSPEED_S)
+#define FW_PORT_CMD_LSPEED_G(x)	\
+	(((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M)
+
+#define FW_PORT_CMD_TXPAUSE_S		23
+#define FW_PORT_CMD_TXPAUSE_V(x)	((x) << FW_PORT_CMD_TXPAUSE_S)
+#define FW_PORT_CMD_TXPAUSE_F	FW_PORT_CMD_TXPAUSE_V(1U)
+
+#define FW_PORT_CMD_RXPAUSE_S		22
+#define FW_PORT_CMD_RXPAUSE_V(x)	((x) << FW_PORT_CMD_RXPAUSE_S)
+#define FW_PORT_CMD_RXPAUSE_F	FW_PORT_CMD_RXPAUSE_V(1U)
+
+#define FW_PORT_CMD_MDIOCAP_S		21
+#define FW_PORT_CMD_MDIOCAP_V(x)	((x) << FW_PORT_CMD_MDIOCAP_S)
+#define FW_PORT_CMD_MDIOCAP_F	FW_PORT_CMD_MDIOCAP_V(1U)
+
+#define FW_PORT_CMD_MDIOADDR_S		16
+#define FW_PORT_CMD_MDIOADDR_M		0x1f
+#define FW_PORT_CMD_MDIOADDR_G(x)	\
+	(((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M)
+
+#define FW_PORT_CMD_LPTXPAUSE_S		15
+#define FW_PORT_CMD_LPTXPAUSE_V(x)	((x) << FW_PORT_CMD_LPTXPAUSE_S)
+#define FW_PORT_CMD_LPTXPAUSE_F	FW_PORT_CMD_LPTXPAUSE_V(1U)
+
+#define FW_PORT_CMD_LPRXPAUSE_S		14
+#define FW_PORT_CMD_LPRXPAUSE_V(x)	((x) << FW_PORT_CMD_LPRXPAUSE_S)
+#define FW_PORT_CMD_LPRXPAUSE_F	FW_PORT_CMD_LPRXPAUSE_V(1U)
+
+#define FW_PORT_CMD_PTYPE_S	8
+#define FW_PORT_CMD_PTYPE_M	0x1f
+#define FW_PORT_CMD_PTYPE_G(x)	\
+	(((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
+
+#define FW_PORT_CMD_MODTYPE_S		0
+#define FW_PORT_CMD_MODTYPE_M		0x1f
+#define FW_PORT_CMD_MODTYPE_V(x)	((x) << FW_PORT_CMD_MODTYPE_S)
+#define FW_PORT_CMD_MODTYPE_G(x)	\
+	(((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M)
+
+#define FW_PORT_CMD_DCBXDIS_S		7
+#define FW_PORT_CMD_DCBXDIS_V(x)	((x) << FW_PORT_CMD_DCBXDIS_S)
+#define FW_PORT_CMD_DCBXDIS_F	FW_PORT_CMD_DCBXDIS_V(1U)
+
+#define FW_PORT_CMD_APPLY_S	7
+#define FW_PORT_CMD_APPLY_V(x)	((x) << FW_PORT_CMD_APPLY_S)
+#define FW_PORT_CMD_APPLY_F	FW_PORT_CMD_APPLY_V(1U)
+
+#define FW_PORT_CMD_ALL_SYNCD_S		7
+#define FW_PORT_CMD_ALL_SYNCD_V(x)	((x) << FW_PORT_CMD_ALL_SYNCD_S)
+#define FW_PORT_CMD_ALL_SYNCD_F	FW_PORT_CMD_ALL_SYNCD_V(1U)
+
+#define FW_PORT_CMD_DCB_VERSION_S	12
+#define FW_PORT_CMD_DCB_VERSION_M	0x7
+#define FW_PORT_CMD_DCB_VERSION_G(x)	\
+	(((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M)
+
+enum fw_port_type {
+	FW_PORT_TYPE_FIBER_XFI,
+	FW_PORT_TYPE_FIBER_XAUI,
+	FW_PORT_TYPE_BT_SGMII,
+	FW_PORT_TYPE_BT_XFI,
+	FW_PORT_TYPE_BT_XAUI,
+	FW_PORT_TYPE_KX4,
+	FW_PORT_TYPE_CX4,
+	FW_PORT_TYPE_KX,
+	FW_PORT_TYPE_KR,
+	FW_PORT_TYPE_SFP,
+	FW_PORT_TYPE_BP_AP,
+	FW_PORT_TYPE_BP4_AP,
+	FW_PORT_TYPE_QSFP_10G,
+	FW_PORT_TYPE_QSA,
+	FW_PORT_TYPE_QSFP,
+	FW_PORT_TYPE_BP40_BA,
+
+	FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
+};
+
+enum fw_port_module_type {
+	FW_PORT_MOD_TYPE_NA,
+	FW_PORT_MOD_TYPE_LR,
+	FW_PORT_MOD_TYPE_SR,
+	FW_PORT_MOD_TYPE_ER,
+	FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
+	FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
+	FW_PORT_MOD_TYPE_LRM,
+	FW_PORT_MOD_TYPE_ERROR		= FW_PORT_CMD_MODTYPE_M - 3,
+	FW_PORT_MOD_TYPE_UNKNOWN	= FW_PORT_CMD_MODTYPE_M - 2,
+	FW_PORT_MOD_TYPE_NOTSUPPORTED	= FW_PORT_CMD_MODTYPE_M - 1,
+
+	FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M
+};
+
+enum fw_port_mod_sub_type {
+	FW_PORT_MOD_SUB_TYPE_NA,
+	FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1,
+	FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2,
+	FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3,
+	FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4,
+	FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5,
+	FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8,
+
+	/* The following will never been in the VPD.  They are TWINAX cable
+	 * lengths decoded from SFP+ module i2c PROMs.  These should
+	 * almost certainly go somewhere else ...
+	 */
+	FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9,
+	FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA,
+	FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB,
+	FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
+};
+
+enum fw_port_stats_tx_index {
+	FW_STAT_TX_PORT_BYTES_IX = 0,
+	FW_STAT_TX_PORT_FRAMES_IX,
+	FW_STAT_TX_PORT_BCAST_IX,
+	FW_STAT_TX_PORT_MCAST_IX,
+	FW_STAT_TX_PORT_UCAST_IX,
+	FW_STAT_TX_PORT_ERROR_IX,
+	FW_STAT_TX_PORT_64B_IX,
+	FW_STAT_TX_PORT_65B_127B_IX,
+	FW_STAT_TX_PORT_128B_255B_IX,
+	FW_STAT_TX_PORT_256B_511B_IX,
+	FW_STAT_TX_PORT_512B_1023B_IX,
+	FW_STAT_TX_PORT_1024B_1518B_IX,
+	FW_STAT_TX_PORT_1519B_MAX_IX,
+	FW_STAT_TX_PORT_DROP_IX,
+	FW_STAT_TX_PORT_PAUSE_IX,
+	FW_STAT_TX_PORT_PPP0_IX,
+	FW_STAT_TX_PORT_PPP1_IX,
+	FW_STAT_TX_PORT_PPP2_IX,
+	FW_STAT_TX_PORT_PPP3_IX,
+	FW_STAT_TX_PORT_PPP4_IX,
+	FW_STAT_TX_PORT_PPP5_IX,
+	FW_STAT_TX_PORT_PPP6_IX,
+	FW_STAT_TX_PORT_PPP7_IX,
+	FW_NUM_PORT_TX_STATS
+};
+
+enum fw_port_stat_rx_index {
+	FW_STAT_RX_PORT_BYTES_IX = 0,
+	FW_STAT_RX_PORT_FRAMES_IX,
+	FW_STAT_RX_PORT_BCAST_IX,
+	FW_STAT_RX_PORT_MCAST_IX,
+	FW_STAT_RX_PORT_UCAST_IX,
+	FW_STAT_RX_PORT_MTU_ERROR_IX,
+	FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+	FW_STAT_RX_PORT_CRC_ERROR_IX,
+	FW_STAT_RX_PORT_LEN_ERROR_IX,
+	FW_STAT_RX_PORT_SYM_ERROR_IX,
+	FW_STAT_RX_PORT_64B_IX,
+	FW_STAT_RX_PORT_65B_127B_IX,
+	FW_STAT_RX_PORT_128B_255B_IX,
+	FW_STAT_RX_PORT_256B_511B_IX,
+	FW_STAT_RX_PORT_512B_1023B_IX,
+	FW_STAT_RX_PORT_1024B_1518B_IX,
+	FW_STAT_RX_PORT_1519B_MAX_IX,
+	FW_STAT_RX_PORT_PAUSE_IX,
+	FW_STAT_RX_PORT_PPP0_IX,
+	FW_STAT_RX_PORT_PPP1_IX,
+	FW_STAT_RX_PORT_PPP2_IX,
+	FW_STAT_RX_PORT_PPP3_IX,
+	FW_STAT_RX_PORT_PPP4_IX,
+	FW_STAT_RX_PORT_PPP5_IX,
+	FW_STAT_RX_PORT_PPP6_IX,
+	FW_STAT_RX_PORT_PPP7_IX,
+	FW_STAT_RX_PORT_LESS_64B_IX,
+	FW_STAT_RX_PORT_MAC_ERROR_IX,
+	FW_NUM_PORT_RX_STATS
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
+struct fw_port_stats_cmd {
+	__be32 op_to_portid;
+	__be32 retval_len16;
+	union fw_port_stats {
+		struct fw_port_stats_ctl {
+			u8 nstats_bg_bm;
+			u8 tx_ix;
+			__be16 r6;
+			__be32 r7;
+			__be64 stat0;
+			__be64 stat1;
+			__be64 stat2;
+			__be64 stat3;
+			__be64 stat4;
+			__be64 stat5;
+		} ctl;
+		struct fw_port_stats_all {
+			__be64 tx_bytes;
+			__be64 tx_frames;
+			__be64 tx_bcast;
+			__be64 tx_mcast;
+			__be64 tx_ucast;
+			__be64 tx_error;
+			__be64 tx_64b;
+			__be64 tx_65b_127b;
+			__be64 tx_128b_255b;
+			__be64 tx_256b_511b;
+			__be64 tx_512b_1023b;
+			__be64 tx_1024b_1518b;
+			__be64 tx_1519b_max;
+			__be64 tx_drop;
+			__be64 tx_pause;
+			__be64 tx_ppp0;
+			__be64 tx_ppp1;
+			__be64 tx_ppp2;
+			__be64 tx_ppp3;
+			__be64 tx_ppp4;
+			__be64 tx_ppp5;
+			__be64 tx_ppp6;
+			__be64 tx_ppp7;
+			__be64 rx_bytes;
+			__be64 rx_frames;
+			__be64 rx_bcast;
+			__be64 rx_mcast;
+			__be64 rx_ucast;
+			__be64 rx_mtu_error;
+			__be64 rx_mtu_crc_error;
+			__be64 rx_crc_error;
+			__be64 rx_len_error;
+			__be64 rx_sym_error;
+			__be64 rx_64b;
+			__be64 rx_65b_127b;
+			__be64 rx_128b_255b;
+			__be64 rx_256b_511b;
+			__be64 rx_512b_1023b;
+			__be64 rx_1024b_1518b;
+			__be64 rx_1519b_max;
+			__be64 rx_pause;
+			__be64 rx_ppp0;
+			__be64 rx_ppp1;
+			__be64 rx_ppp2;
+			__be64 rx_ppp3;
+			__be64 rx_ppp4;
+			__be64 rx_ppp5;
+			__be64 rx_ppp6;
+			__be64 rx_ppp7;
+			__be64 rx_less_64b;
+			__be64 rx_bg_drop;
+			__be64 rx_bg_trunc;
+		} all;
+	} u;
+};
+
+/* port loopback stats */
+#define FW_NUM_LB_STATS 16
+enum fw_port_lb_stats_index {
+	FW_STAT_LB_PORT_BYTES_IX,
+	FW_STAT_LB_PORT_FRAMES_IX,
+	FW_STAT_LB_PORT_BCAST_IX,
+	FW_STAT_LB_PORT_MCAST_IX,
+	FW_STAT_LB_PORT_UCAST_IX,
+	FW_STAT_LB_PORT_ERROR_IX,
+	FW_STAT_LB_PORT_64B_IX,
+	FW_STAT_LB_PORT_65B_127B_IX,
+	FW_STAT_LB_PORT_128B_255B_IX,
+	FW_STAT_LB_PORT_256B_511B_IX,
+	FW_STAT_LB_PORT_512B_1023B_IX,
+	FW_STAT_LB_PORT_1024B_1518B_IX,
+	FW_STAT_LB_PORT_1519B_MAX_IX,
+	FW_STAT_LB_PORT_DROP_FRAMES_IX
+};
+
+struct fw_port_lb_stats_cmd {
+	__be32 op_to_lbport;
+	__be32 retval_len16;
+	union fw_port_lb_stats {
+		struct fw_port_lb_stats_ctl {
+			u8 nstats_bg_bm;
+			u8 ix_pkd;
+			__be16 r6;
+			__be32 r7;
+			__be64 stat0;
+			__be64 stat1;
+			__be64 stat2;
+			__be64 stat3;
+			__be64 stat4;
+			__be64 stat5;
+		} ctl;
+		struct fw_port_lb_stats_all {
+			__be64 tx_bytes;
+			__be64 tx_frames;
+			__be64 tx_bcast;
+			__be64 tx_mcast;
+			__be64 tx_ucast;
+			__be64 tx_error;
+			__be64 tx_64b;
+			__be64 tx_65b_127b;
+			__be64 tx_128b_255b;
+			__be64 tx_256b_511b;
+			__be64 tx_512b_1023b;
+			__be64 tx_1024b_1518b;
+			__be64 tx_1519b_max;
+			__be64 rx_lb_drop;
+			__be64 rx_lb_trunc;
+		} all;
+	} u;
+};
+
+struct fw_rss_ind_tbl_cmd {
+	__be32 op_to_viid;
+	__be32 retval_len16;
+	__be16 niqid;
+	__be16 startidx;
+	__be32 r3;
+	__be32 iq0_to_iq2;
+	__be32 iq3_to_iq5;
+	__be32 iq6_to_iq8;
+	__be32 iq9_to_iq11;
+	__be32 iq12_to_iq14;
+	__be32 iq15_to_iq17;
+	__be32 iq18_to_iq20;
+	__be32 iq21_to_iq23;
+	__be32 iq24_to_iq26;
+	__be32 iq27_to_iq29;
+	__be32 iq30_iq31;
+	__be32 r15_lo;
+};
+
+#define FW_RSS_IND_TBL_CMD_VIID_S	0
+#define FW_RSS_IND_TBL_CMD_VIID_V(x)	((x) << FW_RSS_IND_TBL_CMD_VIID_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ0_S	20
+#define FW_RSS_IND_TBL_CMD_IQ0_V(x)	((x) << FW_RSS_IND_TBL_CMD_IQ0_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ1_S	10
+#define FW_RSS_IND_TBL_CMD_IQ1_V(x)	((x) << FW_RSS_IND_TBL_CMD_IQ1_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ2_S	0
+#define FW_RSS_IND_TBL_CMD_IQ2_V(x)	((x) << FW_RSS_IND_TBL_CMD_IQ2_S)
+
+struct fw_rss_glb_config_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	union fw_rss_glb_config {
+		struct fw_rss_glb_config_manual {
+			__be32 mode_pkd;
+			__be32 r3;
+			__be64 r4;
+			__be64 r5;
+		} manual;
+		struct fw_rss_glb_config_basicvirtual {
+			__be32 mode_pkd;
+			__be32 synmapen_to_hashtoeplitz;
+			__be64 r8;
+			__be64 r9;
+		} basicvirtual;
+	} u;
+};
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_S	28
+#define FW_RSS_GLB_CONFIG_CMD_MODE_M	0xf
+#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x)	((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x)	\
+	(((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL	0
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL	1
+
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S	8
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F	\
+	FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S		7
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F	\
+	FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S		6
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F	\
+	FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S		5
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F	\
+	FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S		4
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F	\
+	FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S	3
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F	\
+	FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S	2
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F	\
+	FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S	1
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S)
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F	\
+	FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S	0
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x)	\
+	((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S)
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F	\
+	FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U)
+
+struct fw_rss_vi_config_cmd {
+	__be32 op_to_viid;
+#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
+	__be32 retval_len16;
+	union fw_rss_vi_config {
+		struct fw_rss_vi_config_manual {
+			__be64 r3;
+			__be64 r4;
+			__be64 r5;
+		} manual;
+		struct fw_rss_vi_config_basicvirtual {
+			__be32 r6;
+			__be32 defaultq_to_udpen;
+			__be64 r9;
+			__be64 r10;
+		} basicvirtual;
+	} u;
+};
+
+#define FW_RSS_VI_CONFIG_CMD_VIID_S	0
+#define FW_RSS_VI_CONFIG_CMD_VIID_V(x)	((x) << FW_RSS_VI_CONFIG_CMD_VIID_S)
+
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S		16
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M		0x3ff
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x)	\
+	((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S)
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x)	\
+	(((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \
+	 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M)
+
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S	4
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x)	\
+	((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F	\
+	FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S	3
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x)	\
+	((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F	\
+	FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S	2
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x)	\
+	((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F	\
+	FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S	1
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x)	\
+	((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F	\
+	FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_S	0
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x)	((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_F	FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U)
+
+struct fw_clip_cmd {
+	__be32 op_to_write;
+	__be32 alloc_to_len16;
+	__be64 ip_hi;
+	__be64 ip_lo;
+	__be32 r4[2];
+};
+
+#define FW_CLIP_CMD_ALLOC_S     31
+#define FW_CLIP_CMD_ALLOC_V(x)  ((x) << FW_CLIP_CMD_ALLOC_S)
+#define FW_CLIP_CMD_ALLOC_F     FW_CLIP_CMD_ALLOC_V(1U)
+
+#define FW_CLIP_CMD_FREE_S      30
+#define FW_CLIP_CMD_FREE_V(x)   ((x) << FW_CLIP_CMD_FREE_S)
+#define FW_CLIP_CMD_FREE_F      FW_CLIP_CMD_FREE_V(1U)
+
+enum fw_error_type {
+	FW_ERROR_TYPE_EXCEPTION		= 0x0,
+	FW_ERROR_TYPE_HWMODULE		= 0x1,
+	FW_ERROR_TYPE_WR		= 0x2,
+	FW_ERROR_TYPE_ACL		= 0x3,
+};
+
+struct fw_error_cmd {
+	__be32 op_to_type;
+	__be32 len16_pkd;
+	union fw_error {
+		struct fw_error_exception {
+			__be32 info[6];
+		} exception;
+		struct fw_error_hwmodule {
+			__be32 regaddr;
+			__be32 regval;
+		} hwmodule;
+		struct fw_error_wr {
+			__be16 cidx;
+			__be16 pfn_vfn;
+			__be32 eqid;
+			u8 wrhdr[16];
+		} wr;
+		struct fw_error_acl {
+			__be16 cidx;
+			__be16 pfn_vfn;
+			__be32 eqid;
+			__be16 mv_pkd;
+			u8 val[6];
+			__be64 r4;
+		} acl;
+	} u;
+};
+
+struct fw_debug_cmd {
+	__be32 op_type;
+	__be32 len16_pkd;
+	union fw_debug {
+		struct fw_debug_assert {
+			__be32 fcid;
+			__be32 line;
+			__be32 x;
+			__be32 y;
+			u8 filename_0_7[8];
+			u8 filename_8_15[8];
+			__be64 r3;
+		} assert;
+		struct fw_debug_prt {
+			__be16 dprtstridx;
+			__be16 r3[3];
+			__be32 dprtstrparam0;
+			__be32 dprtstrparam1;
+			__be32 dprtstrparam2;
+			__be32 dprtstrparam3;
+		} prt;
+	} u;
+};
+
+#define FW_DEBUG_CMD_TYPE_S	0
+#define FW_DEBUG_CMD_TYPE_M	0xff
+#define FW_DEBUG_CMD_TYPE_G(x)	\
+	(((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
+
+#define PCIE_FW_ERR_S		31
+#define PCIE_FW_ERR_V(x)	((x) << PCIE_FW_ERR_S)
+#define PCIE_FW_ERR_F		PCIE_FW_ERR_V(1U)
+
+#define PCIE_FW_INIT_S		30
+#define PCIE_FW_INIT_V(x)	((x) << PCIE_FW_INIT_S)
+#define PCIE_FW_INIT_F		PCIE_FW_INIT_V(1U)
+
+#define PCIE_FW_HALT_S          29
+#define PCIE_FW_HALT_V(x)       ((x) << PCIE_FW_HALT_S)
+#define PCIE_FW_HALT_F          PCIE_FW_HALT_V(1U)
+
+#define PCIE_FW_EVAL_S		24
+#define PCIE_FW_EVAL_M		0x7
+#define PCIE_FW_EVAL_G(x)	(((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M)
+
+#define PCIE_FW_MASTER_VLD_S	15
+#define PCIE_FW_MASTER_VLD_V(x)	((x) << PCIE_FW_MASTER_VLD_S)
+#define PCIE_FW_MASTER_VLD_F	PCIE_FW_MASTER_VLD_V(1U)
+
+#define PCIE_FW_MASTER_S	12
+#define PCIE_FW_MASTER_M	0x7
+#define PCIE_FW_MASTER_V(x)	((x) << PCIE_FW_MASTER_S)
+#define PCIE_FW_MASTER_G(x)	(((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M)
+
+struct fw_hdr {
+	u8 ver;
+	u8 chip;			/* terminator chip type */
+	__be16	len512;			/* bin length in units of 512-bytes */
+	__be32	fw_ver;			/* firmware version */
+	__be32	tp_microcode_ver;
+	u8 intfver_nic;
+	u8 intfver_vnic;
+	u8 intfver_ofld;
+	u8 intfver_ri;
+	u8 intfver_iscsipdu;
+	u8 intfver_iscsi;
+	u8 intfver_fcoepdu;
+	u8 intfver_fcoe;
+	__u32   reserved2;
+	__u32   reserved3;
+	__u32   reserved4;
+	__be32  flags;
+	__be32  reserved6[23];
+};
+
+enum fw_hdr_chip {
+	FW_HDR_CHIP_T4,
+	FW_HDR_CHIP_T5,
+	FW_HDR_CHIP_T6
+};
+
+#define FW_HDR_FW_VER_MAJOR_S	24
+#define FW_HDR_FW_VER_MAJOR_M	0xff
+#define FW_HDR_FW_VER_MAJOR_V(x) \
+	((x) << FW_HDR_FW_VER_MAJOR_S)
+#define FW_HDR_FW_VER_MAJOR_G(x) \
+	(((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M)
+
+#define FW_HDR_FW_VER_MINOR_S	16
+#define FW_HDR_FW_VER_MINOR_M	0xff
+#define FW_HDR_FW_VER_MINOR_V(x) \
+	((x) << FW_HDR_FW_VER_MINOR_S)
+#define FW_HDR_FW_VER_MINOR_G(x) \
+	(((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M)
+
+#define FW_HDR_FW_VER_MICRO_S	8
+#define FW_HDR_FW_VER_MICRO_M	0xff
+#define FW_HDR_FW_VER_MICRO_V(x) \
+	((x) << FW_HDR_FW_VER_MICRO_S)
+#define FW_HDR_FW_VER_MICRO_G(x) \
+	(((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M)
+
+#define FW_HDR_FW_VER_BUILD_S	0
+#define FW_HDR_FW_VER_BUILD_M	0xff
+#define FW_HDR_FW_VER_BUILD_V(x) \
+	((x) << FW_HDR_FW_VER_BUILD_S)
+#define FW_HDR_FW_VER_BUILD_G(x) \
+	(((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M)
+
+enum fw_hdr_intfver {
+	FW_HDR_INTFVER_NIC      = 0x00,
+	FW_HDR_INTFVER_VNIC     = 0x00,
+	FW_HDR_INTFVER_OFLD     = 0x00,
+	FW_HDR_INTFVER_RI       = 0x00,
+	FW_HDR_INTFVER_ISCSIPDU = 0x00,
+	FW_HDR_INTFVER_ISCSI    = 0x00,
+	FW_HDR_INTFVER_FCOEPDU  = 0x00,
+	FW_HDR_INTFVER_FCOE     = 0x00,
+};
+
+enum fw_hdr_flags {
+	FW_HDR_FLAGS_RESET_HALT = 0x00000001,
+};
+
+/* length of the formatting string  */
+#define FW_DEVLOG_FMT_LEN	192
+
+/* maximum number of the formatting string parameters */
+#define FW_DEVLOG_FMT_PARAMS_NUM 8
+
+/* priority levels */
+enum fw_devlog_level {
+	FW_DEVLOG_LEVEL_EMERG	= 0x0,
+	FW_DEVLOG_LEVEL_CRIT	= 0x1,
+	FW_DEVLOG_LEVEL_ERR	= 0x2,
+	FW_DEVLOG_LEVEL_NOTICE	= 0x3,
+	FW_DEVLOG_LEVEL_INFO	= 0x4,
+	FW_DEVLOG_LEVEL_DEBUG	= 0x5,
+	FW_DEVLOG_LEVEL_MAX	= 0x5,
+};
+
+/* facilities that may send a log message */
+enum fw_devlog_facility {
+	FW_DEVLOG_FACILITY_CORE		= 0x00,
+	FW_DEVLOG_FACILITY_CF		= 0x01,
+	FW_DEVLOG_FACILITY_SCHED	= 0x02,
+	FW_DEVLOG_FACILITY_TIMER	= 0x04,
+	FW_DEVLOG_FACILITY_RES		= 0x06,
+	FW_DEVLOG_FACILITY_HW		= 0x08,
+	FW_DEVLOG_FACILITY_FLR		= 0x10,
+	FW_DEVLOG_FACILITY_DMAQ		= 0x12,
+	FW_DEVLOG_FACILITY_PHY		= 0x14,
+	FW_DEVLOG_FACILITY_MAC		= 0x16,
+	FW_DEVLOG_FACILITY_PORT		= 0x18,
+	FW_DEVLOG_FACILITY_VI		= 0x1A,
+	FW_DEVLOG_FACILITY_FILTER	= 0x1C,
+	FW_DEVLOG_FACILITY_ACL		= 0x1E,
+	FW_DEVLOG_FACILITY_TM		= 0x20,
+	FW_DEVLOG_FACILITY_QFC		= 0x22,
+	FW_DEVLOG_FACILITY_DCB		= 0x24,
+	FW_DEVLOG_FACILITY_ETH		= 0x26,
+	FW_DEVLOG_FACILITY_OFLD		= 0x28,
+	FW_DEVLOG_FACILITY_RI		= 0x2A,
+	FW_DEVLOG_FACILITY_ISCSI	= 0x2C,
+	FW_DEVLOG_FACILITY_FCOE		= 0x2E,
+	FW_DEVLOG_FACILITY_FOISCSI	= 0x30,
+	FW_DEVLOG_FACILITY_FOFCOE	= 0x32,
+	FW_DEVLOG_FACILITY_CHNET        = 0x34,
+	FW_DEVLOG_FACILITY_MAX          = 0x34,
+};
+
+/* log message format */
+struct fw_devlog_e {
+	__be64	timestamp;
+	__be32	seqno;
+	__be16	reserved1;
+	__u8	level;
+	__u8	facility;
+	__u8	fmt[FW_DEVLOG_FMT_LEN];
+	__be32	params[FW_DEVLOG_FMT_PARAMS_NUM];
+	__be32	reserved3[4];
+};
+
+struct fw_devlog_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__u8   level;
+	__u8   r2[7];
+	__be32 memtype_devlog_memaddr16_devlog;
+	__be32 memsize_devlog;
+	__be32 r3[2];
+};
+
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S		28
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M		0xf
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x)	\
+	(((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
+	 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
+
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S	0
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M	0xfffffff
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x)	\
+	(((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
+	 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+
+/* P C I E   F W   P F 7   R E G I S T E R */
+
+/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
+ * access the "devlog" which needing to contact firmware.  The encoding is
+ * mostly the same as that returned by the DEVLOG command except for the size
+ * which is encoded as the number of entries in multiples-1 of 128 here rather
+ * than the memory size as is done in the DEVLOG command.  Thus, 0 means 128
+ * and 15 means 2048.  This of course in turn constrains the allowed values
+ * for the devlog size ...
+ */
+#define PCIE_FW_PF_DEVLOG		7
+
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_S	28
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_M	0xf
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
+	((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
+	(((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
+	 PCIE_FW_PF_DEVLOG_NENTRIES128_M)
+
+#define PCIE_FW_PF_DEVLOG_ADDR16_S	4
+#define PCIE_FW_PF_DEVLOG_ADDR16_M	0xffffff
+#define PCIE_FW_PF_DEVLOG_ADDR16_V(x)	((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
+#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
+	(((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
+
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_S	0
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_M	0xf
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x)	((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
+	(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
new file mode 100644
index 0000000..c4b262c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -0,0 +1,64 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4FW_VERSION_H__
+#define __T4FW_VERSION_H__
+
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x0E
+#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_BUILD 0x00
+
+#define T4FW_MIN_VERSION_MAJOR 0x01
+#define T4FW_MIN_VERSION_MINOR 0x04
+#define T4FW_MIN_VERSION_MICRO 0x00
+
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x0E
+#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_BUILD 0x00
+
+#define T5FW_MIN_VERSION_MAJOR 0x00
+#define T5FW_MIN_VERSION_MINOR 0x00
+#define T5FW_MIN_VERSION_MICRO 0x00
+
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0E
+#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_BUILD 0x00
+
+#define T6FW_MIN_VERSION_MAJOR 0x00
+#define T6FW_MIN_VERSION_MINOR 0x00
+#define T6FW_MIN_VERSION_MICRO 0x00
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/Makefile b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile
new file mode 100644
index 0000000..d72ee26
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 SR-IOV Virtual Function Driver
+#
+
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
+
+cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
new file mode 100644
index 0000000..6049f70
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -0,0 +1,552 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file should not be included directly.  Include t4vf_common.h instead.
+ */
+
+#ifndef __CXGB4VF_ADAPTER_H__
+#define __CXGB4VF_ADAPTER_H__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "../cxgb4/t4_hw.h"
+
+/*
+ * Constants of the implementation.
+ */
+enum {
+	MAX_NPORTS	= 1,		/* max # of "ports" */
+	MAX_PORT_QSETS	= 8,		/* max # of Queue Sets / "port" */
+	MAX_ETH_QSETS	= MAX_NPORTS*MAX_PORT_QSETS,
+
+	/*
+	 * MSI-X interrupt index usage.
+	 */
+	MSIX_FW		= 0,		/* MSI-X index for firmware Q */
+	MSIX_IQFLINT	= 1,		/* MSI-X index base for Ingress Qs */
+	MSIX_EXTRAS	= 1,
+	MSIX_ENTRIES	= MAX_ETH_QSETS + MSIX_EXTRAS,
+
+	/*
+	 * The maximum number of Ingress and Egress Queues is determined by
+	 * the maximum number of "Queue Sets" which we support plus any
+	 * ancillary queues.  Each "Queue Set" requires one Ingress Queue
+	 * for RX Packet Ingress Event notifications and two Egress Queues for
+	 * a Free List and an Ethernet TX list.
+	 */
+	INGQ_EXTRAS	= 2,		/* firmware event queue and */
+					/*   forwarded interrupts */
+	MAX_INGQ	= MAX_ETH_QSETS+INGQ_EXTRAS,
+	MAX_EGRQ	= MAX_ETH_QSETS*2,
+};
+
+/*
+ * Forward structure definition references.
+ */
+struct adapter;
+struct sge_eth_rxq;
+struct sge_rspq;
+
+/*
+ * Per-"port" information.  This is really per-Virtual Interface information
+ * but the use of the "port" nomanclature makes it easier to go back and forth
+ * between the PF and VF drivers ...
+ */
+struct port_info {
+	struct adapter *adapter;	/* our adapter */
+	u16 viid;			/* virtual interface ID */
+	s16 xact_addr_filt;		/* index of our MAC address filter */
+	u16 rss_size;			/* size of VI's RSS table slice */
+	u8 pidx;			/* index into adapter port[] */
+	s8 mdio_addr;
+	u8 port_type;			/* firmware port type */
+	u8 mod_type;			/* firmware module type */
+	u8 port_id;			/* physical port ID */
+	u8 nqsets;			/* # of "Queue Sets" */
+	u8 first_qset;			/* index of first "Queue Set" */
+	struct link_config link_cfg;	/* physical port configuration */
+};
+
+/*
+ * Scatter Gather Engine resources for the "adapter".  Our ingress and egress
+ * queues are organized into "Queue Sets" with one ingress and one egress
+ * queue per Queue Set.  These Queue Sets are aportionable between the "ports"
+ * (Virtual Interfaces).  One extra ingress queue is used to receive
+ * asynchronous messages from the firmware.  Note that the "Queue IDs" that we
+ * use here are really "Relative Queue IDs" which are returned as part of the
+ * firmware command to allocate queues.  These queue IDs are relative to the
+ * absolute Queue ID base of the section of the Queue ID space allocated to
+ * the PF/VF.
+ */
+
+/*
+ * SGE free-list queue state.
+ */
+struct rx_sw_desc;
+struct sge_fl {
+	unsigned int avail;		/* # of available RX buffers */
+	unsigned int pend_cred;		/* new buffers since last FL DB ring */
+	unsigned int cidx;		/* consumer index */
+	unsigned int pidx;		/* producer index */
+	unsigned long alloc_failed;	/* # of buffer allocation failures */
+	unsigned long large_alloc_failed;
+	unsigned long starving;		/* # of times FL was found starving */
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	unsigned int cntxt_id;		/* SGE relative QID for the free list */
+	unsigned int abs_id;		/* SGE absolute QID for the free list */
+	unsigned int size;		/* capacity of free list */
+	struct rx_sw_desc *sdesc;	/* address of SW RX descriptor ring */
+	__be64 *desc;			/* address of HW RX descriptor ring */
+	dma_addr_t addr;		/* PCI bus address of hardware ring */
+	void __iomem *bar2_addr;	/* address of BAR2 Queue registers */
+	unsigned int bar2_qid;		/* Queue ID for BAR2 Queue registers */
+};
+
+/*
+ * An ingress packet gather list.
+ */
+struct pkt_gl {
+	struct page_frag frags[MAX_SKB_FRAGS];
+	void *va;			/* virtual address of first byte */
+	unsigned int nfrags;		/* # of fragments */
+	unsigned int tot_len;		/* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
+			      const struct pkt_gl *);
+
+/*
+ * State for an SGE Response Queue.
+ */
+struct sge_rspq {
+	struct napi_struct napi;	/* NAPI scheduling control */
+	const __be64 *cur_desc;		/* current descriptor in queue */
+	unsigned int cidx;		/* consumer index */
+	u8 gen;				/* current generation bit */
+	u8 next_intr_params;		/* holdoff params for next interrupt */
+	int offset;			/* offset into current FL buffer */
+
+	unsigned int unhandled_irqs;	/* bogus interrupts */
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	u8 intr_params;			/* interrupt holdoff parameters */
+	u8 pktcnt_idx;			/* interrupt packet threshold */
+	u8 idx;				/* queue index within its group */
+	u16 cntxt_id;			/* SGE rel QID for the response Q */
+	u16 abs_id;			/* SGE abs QID for the response Q */
+	__be64 *desc;			/* address of hardware response ring */
+	dma_addr_t phys_addr;		/* PCI bus address of ring */
+	void __iomem *bar2_addr;	/* address of BAR2 Queue registers */
+	unsigned int bar2_qid;		/* Queue ID for BAR2 Queue registers */
+	unsigned int iqe_len;		/* entry size */
+	unsigned int size;		/* capcity of response Q */
+	struct adapter *adapter;	/* our adapter */
+	struct net_device *netdev;	/* associated net device */
+	rspq_handler_t handler;		/* the handler for this response Q */
+};
+
+/*
+ * Ethernet queue statistics
+ */
+struct sge_eth_stats {
+	unsigned long pkts;		/* # of ethernet packets */
+	unsigned long lro_pkts;		/* # of LRO super packets */
+	unsigned long lro_merged;	/* # of wire packets merged by LRO */
+	unsigned long rx_cso;		/* # of Rx checksum offloads */
+	unsigned long vlan_ex;		/* # of Rx VLAN extractions */
+	unsigned long rx_drops;		/* # of packets dropped due to no mem */
+};
+
+/*
+ * State for an Ethernet Receive Queue.
+ */
+struct sge_eth_rxq {
+	struct sge_rspq rspq;		/* Response Queue */
+	struct sge_fl fl;		/* Free List */
+	struct sge_eth_stats stats;	/* receive statistics */
+};
+
+/*
+ * SGE Transmit Queue state.  This contains all of the resources associated
+ * with the hardware status of a TX Queue which is a circular ring of hardware
+ * TX Descriptors.  For convenience, it also contains a pointer to a parallel
+ * "Software Descriptor" array but we don't know anything about it here other
+ * than its type name.
+ */
+struct tx_desc {
+	/*
+	 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
+	 * hardware: Sizes, Producer and Consumer indices, etc.
+	 */
+	__be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
+};
+struct tx_sw_desc;
+struct sge_txq {
+	unsigned int in_use;		/* # of in-use TX descriptors */
+	unsigned int size;		/* # of descriptors */
+	unsigned int cidx;		/* SW consumer index */
+	unsigned int pidx;		/* producer index */
+	unsigned long stops;		/* # of times queue has been stopped */
+	unsigned long restarts;		/* # of queue restarts */
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	unsigned int cntxt_id;		/* SGE relative QID for the TX Q */
+	unsigned int abs_id;		/* SGE absolute QID for the TX Q */
+	struct tx_desc *desc;		/* address of HW TX descriptor ring */
+	struct tx_sw_desc *sdesc;	/* address of SW TX descriptor ring */
+	struct sge_qstat *stat;		/* queue status entry */
+	dma_addr_t phys_addr;		/* PCI bus address of hardware ring */
+	void __iomem *bar2_addr;	/* address of BAR2 Queue registers */
+	unsigned int bar2_qid;		/* Queue ID for BAR2 Queue registers */
+};
+
+/*
+ * State for an Ethernet Transmit Queue.
+ */
+struct sge_eth_txq {
+	struct sge_txq q;		/* SGE TX Queue */
+	struct netdev_queue *txq;	/* associated netdev TX queue */
+	unsigned long tso;		/* # of TSO requests */
+	unsigned long tx_cso;		/* # of TX checksum offloads */
+	unsigned long vlan_ins;		/* # of TX VLAN insertions */
+	unsigned long mapping_err;	/* # of I/O MMU packet mapping errors */
+};
+
+/*
+ * The complete set of Scatter/Gather Engine resources.
+ */
+struct sge {
+	/*
+	 * Our "Queue Sets" ...
+	 */
+	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+
+	/*
+	 * Extra ingress queues for asynchronous firmware events and
+	 * forwarded interrupts (when in MSI mode).
+	 */
+	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+	struct sge_rspq intrq ____cacheline_aligned_in_smp;
+	spinlock_t intrq_lock;
+
+	/*
+	 * State for managing "starving Free Lists" -- Free Lists which have
+	 * fallen below a certain threshold of buffers available to the
+	 * hardware and attempts to refill them up to that threshold have
+	 * failed.  We have a regular "slow tick" timer process which will
+	 * make periodic attempts to refill these starving Free Lists ...
+	 */
+	DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+	struct timer_list rx_timer;
+
+	/*
+	 * State for cleaning up completed TX descriptors.
+	 */
+	struct timer_list tx_timer;
+
+	/*
+	 * Write-once/infrequently fields.
+	 * -------------------------------
+	 */
+
+	u16 max_ethqsets;		/* # of available Ethernet queue sets */
+	u16 ethqsets;			/* # of active Ethernet queue sets */
+	u16 ethtxq_rover;		/* Tx queue to clean up next */
+	u16 timer_val[SGE_NTIMERS];	/* interrupt holdoff timer array */
+	u8 counter_val[SGE_NCOUNTERS];	/* interrupt RX threshold array */
+
+	/* Decoded Adapter Parameters.
+	 */
+	u32 fl_pg_order;		/* large page allocation size */
+	u32 stat_len;			/* length of status page at ring end */
+	u32 pktshift;			/* padding between CPL & packet data */
+	u32 fl_align;			/* response queue message alignment */
+	u32 fl_starve_thres;		/* Free List starvation threshold */
+
+	/*
+	 * Reverse maps from Absolute Queue IDs to associated queue pointers.
+	 * The absolute Queue IDs are in a compact range which start at a
+	 * [potentially large] Base Queue ID.  We perform the reverse map by
+	 * first converting the Absolute Queue ID into a Relative Queue ID by
+	 * subtracting off the Base Queue ID and then use a Relative Queue ID
+	 * indexed table to get the pointer to the corresponding software
+	 * queue structure.
+	 */
+	unsigned int egr_base;
+	unsigned int ingr_base;
+	void *egr_map[MAX_EGRQ];
+	struct sge_rspq *ingr_map[MAX_INGQ];
+};
+
+/*
+ * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
+ * and Ingress-Queues.  The EQ_MAP() and IQ_MAP() macros which provide
+ * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
+ */
+#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
+#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
+
+#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
+#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
+
+/*
+ * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
+ */
+#define for_each_ethrxq(sge, iter) \
+	for (iter = 0; iter < (sge)->ethqsets; iter++)
+
+/*
+ * Per-"adapter" (Virtual Function) information.
+ */
+struct adapter {
+	/* PCI resources */
+	void __iomem *regs;
+	void __iomem *bar2;
+	struct pci_dev *pdev;
+	struct device *pdev_dev;
+
+	/* "adapter" resources */
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+	struct adapter_params params;
+
+	/* queue and interrupt resources */
+	struct {
+		unsigned short vec;
+		char desc[22];
+	} msix_info[MSIX_ENTRIES];
+	struct sge sge;
+
+	/* Linux network device resources */
+	struct net_device *port[MAX_NPORTS];
+	const char *name;
+	unsigned int msg_enable;
+
+	/* debugfs resources */
+	struct dentry *debugfs_root;
+
+	/* various locks */
+	spinlock_t stats_lock;
+};
+
+enum { /* adapter flags */
+	FULL_INIT_DONE     = (1UL << 0),
+	USING_MSI          = (1UL << 1),
+	USING_MSIX         = (1UL << 2),
+	QUEUES_BOUND       = (1UL << 3),
+};
+
+/*
+ * The following register read/write routine definitions are required by
+ * the common code.
+ */
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+	return readl(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg - write a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+	writel(val, adapter->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, addr + 4);
+}
+#endif
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+	return readq(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+				  u64 val)
+{
+	writeq(val, adapter->regs + reg_addr);
+}
+
+/**
+ * port_name - return the string name of a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Return the string name of the selected port.
+ */
+static inline const char *port_name(struct adapter *adapter, int pidx)
+{
+	return adapter->port[pidx]->name;
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW.  Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
+				     u8 hw_addr[])
+{
+	memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @pidx: the port index
+ *
+ * Return the port_info structure for the adapter.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
+{
+	return netdev_priv(adapter->port[pidx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->adapter;
+}
+
+/*
+ * OS "Callback" function declarations.  These are functions that the OS code
+ * is "contracted" to provide for the common code.
+ */
+void t4vf_os_link_changed(struct adapter *, int, int);
+void t4vf_os_portmod_changed(struct adapter *, int);
+
+/*
+ * SGE function prototype declarations.
+ */
+int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
+		       struct net_device *, int,
+		       struct sge_fl *, rspq_handler_t);
+int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
+			   struct net_device *, struct netdev_queue *,
+			   unsigned int);
+void t4vf_free_sge_resources(struct adapter *);
+
+int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
+int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
+		       const struct pkt_gl *);
+
+irq_handler_t t4vf_intr_handler(struct adapter *);
+irqreturn_t t4vf_sge_intr_msix(int, void *);
+
+int t4vf_sge_init(struct adapter *);
+void t4vf_sge_start(struct adapter *);
+void t4vf_sge_stop(struct adapter *);
+
+#endif /* __CXGB4VF_ADAPTER_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
new file mode 100644
index 0000000..0cfa5d7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -0,0 +1,3099 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Generic information about the driver.
+ */
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
+
+/*
+ * Module Parameters.
+ * ==================
+ */
+
+/*
+ * Default ethtool "message level" for adapters.
+ */
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable,
+		 "default adapter ethtool message level bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X then MSI.  This parameter determines which of these schemes the
+ * driver may consider as follows:
+ *
+ *     msi = 2: choose from among MSI-X and MSI
+ *     msi = 1: only consider MSI interrupts
+ *
+ * Note that unlike the Physical Function driver, this Virtual Function driver
+ * does _not_ support legacy INTx interrupts (this limitation is mandated by
+ * the PCI-E SR-IOV standard).
+ */
+#define MSI_MSIX	2
+#define MSI_MSI		1
+#define MSI_DEFAULT	MSI_MSIX
+
+static int msi = MSI_DEFAULT;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
+
+/*
+ * Fundamental constants.
+ * ======================
+ */
+
+enum {
+	MAX_TXQ_ENTRIES		= 16384,
+	MAX_RSPQ_ENTRIES	= 16384,
+	MAX_RX_BUFFERS		= 16384,
+
+	MIN_TXQ_ENTRIES		= 32,
+	MIN_RSPQ_ENTRIES	= 128,
+	MIN_FL_ENTRIES		= 16,
+
+	/*
+	 * For purposes of manipulating the Free List size we need to
+	 * recognize that Free Lists are actually Egress Queues (the host
+	 * produces free buffers which the hardware consumes), Egress Queues
+	 * indices are all in units of Egress Context Units bytes, and free
+	 * list entries are 64-bit PCI DMA addresses.  And since the state of
+	 * the Producer Index == the Consumer Index implies an EMPTY list, we
+	 * always have at least one Egress Unit's worth of Free List entries
+	 * unused.  See sge.c for more details ...
+	 */
+	EQ_UNIT = SGE_EQ_IDXSIZE,
+	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+	MIN_FL_RESID = FL_PER_EQ_UNIT,
+};
+
+/*
+ * Global driver state.
+ * ====================
+ */
+
+static struct dentry *cxgb4vf_debugfs_root;
+
+/*
+ * OS "Callback" functions.
+ * ========================
+ */
+
+/*
+ * The link status has changed on the indicated "port" (Virtual Interface).
+ */
+void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
+{
+	struct net_device *dev = adapter->port[pidx];
+
+	/*
+	 * If the port is disabled or the current recorded "link up"
+	 * status matches the new status, just return.
+	 */
+	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
+		return;
+
+	/*
+	 * Tell the OS that the link status has changed and print a short
+	 * informative message on the console about the event.
+	 */
+	if (link_ok) {
+		const char *s;
+		const char *fc;
+		const struct port_info *pi = netdev_priv(dev);
+
+		netif_carrier_on(dev);
+
+		switch (pi->link_cfg.speed) {
+		case 40000:
+			s = "40Gbps";
+			break;
+
+		case 10000:
+			s = "10Gbps";
+			break;
+
+		case 1000:
+			s = "1000Mbps";
+			break;
+
+		case 100:
+			s = "100Mbps";
+			break;
+
+		default:
+			s = "unknown";
+			break;
+		}
+
+		switch (pi->link_cfg.fc) {
+		case PAUSE_RX:
+			fc = "RX";
+			break;
+
+		case PAUSE_TX:
+			fc = "TX";
+			break;
+
+		case PAUSE_RX|PAUSE_TX:
+			fc = "RX/TX";
+			break;
+
+		default:
+			fc = "no";
+			break;
+		}
+
+		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
+	} else {
+		netif_carrier_off(dev);
+		netdev_info(dev, "link down\n");
+	}
+}
+
+/*
+ * THe port module type has changed on the indicated "port" (Virtual
+ * Interface).
+ */
+void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
+{
+	static const char * const mod_str[] = {
+		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+	};
+	const struct net_device *dev = adapter->port[pidx];
+	const struct port_info *pi = netdev_priv(dev);
+
+	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
+			 dev->name);
+	else if (pi->mod_type < ARRAY_SIZE(mod_str))
+		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
+			 dev->name, mod_str[pi->mod_type]);
+	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
+			 "module inserted\n", dev->name);
+	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
+			 "forcing TWINAX\n", dev->name);
+	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
+			 dev->name);
+	else
+		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
+			 "inserted\n", dev->name, pi->mod_type);
+}
+
+/*
+ * Net device operations.
+ * ======================
+ */
+
+
+
+
+/*
+ * Perform the MAC and PHY actions needed to enable a "port" (Virtual
+ * Interface).
+ */
+static int link_start(struct net_device *dev)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	/*
+	 * We do not set address filters and promiscuity here, the stack does
+	 * that step explicitly. Enable vlan accel.
+	 */
+	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
+			      true);
+	if (ret == 0) {
+		ret = t4vf_change_mac(pi->adapter, pi->viid,
+				      pi->xact_addr_filt, dev->dev_addr, true);
+		if (ret >= 0) {
+			pi->xact_addr_filt = ret;
+			ret = 0;
+		}
+	}
+
+	/*
+	 * We don't need to actually "start the link" itself since the
+	 * firmware will do that for us when the first Virtual Interface
+	 * is enabled on a port.
+	 */
+	if (ret == 0)
+		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
+	return ret;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adapter)
+{
+	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
+	int pidx;
+
+	/*
+	 * Firmware events.
+	 */
+	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
+		 "%s-FWeventq", adapter->name);
+	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
+
+	/*
+	 * Ethernet queues.
+	 */
+	for_each_port(adapter, pidx) {
+		struct net_device *dev = adapter->port[pidx];
+		const struct port_info *pi = netdev_priv(dev);
+		int qs, msi;
+
+		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
+			snprintf(adapter->msix_info[msi].desc, namelen,
+				 "%s-%d", dev->name, qs);
+			adapter->msix_info[msi].desc[namelen] = 0;
+		}
+	}
+}
+
+/*
+ * Request all of our MSI-X resources.
+ */
+static int request_msix_queue_irqs(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int rxq, msi, err;
+
+	/*
+	 * Firmware events.
+	 */
+	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
+			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
+	if (err)
+		return err;
+
+	/*
+	 * Ethernet queues.
+	 */
+	msi = MSIX_IQFLINT;
+	for_each_ethrxq(s, rxq) {
+		err = request_irq(adapter->msix_info[msi].vec,
+				  t4vf_sge_intr_msix, 0,
+				  adapter->msix_info[msi].desc,
+				  &s->ethrxq[rxq].rspq);
+		if (err)
+			goto err_free_irqs;
+		msi++;
+	}
+	return 0;
+
+err_free_irqs:
+	while (--rxq >= 0)
+		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
+	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+	return err;
+}
+
+/*
+ * Free our MSI-X resources.
+ */
+static void free_msix_queue_irqs(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int rxq, msi;
+
+	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+	msi = MSIX_IQFLINT;
+	for_each_ethrxq(s, rxq)
+		free_irq(adapter->msix_info[msi++].vec,
+			 &s->ethrxq[rxq].rspq);
+}
+
+/*
+ * Turn on NAPI and start up interrupts on a response queue.
+ */
+static void qenable(struct sge_rspq *rspq)
+{
+	napi_enable(&rspq->napi);
+
+	/*
+	 * 0-increment the Going To Sleep register to start the timer and
+	 * enable interrupts.
+	 */
+	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+		     CIDXINC_V(0) |
+		     SEINTARM_V(rspq->intr_params) |
+		     INGRESSQID_V(rspq->cntxt_id));
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Receive Queues.
+ */
+static void enable_rx(struct adapter *adapter)
+{
+	int rxq;
+	struct sge *s = &adapter->sge;
+
+	for_each_ethrxq(s, rxq)
+		qenable(&s->ethrxq[rxq].rspq);
+	qenable(&s->fw_evtq);
+
+	/*
+	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
+	 * its Going To Sleep register here to get it started.
+	 */
+	if (adapter->flags & USING_MSI)
+		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+			     CIDXINC_V(0) |
+			     SEINTARM_V(s->intrq.intr_params) |
+			     INGRESSQID_V(s->intrq.cntxt_id));
+
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int rxq;
+
+	for_each_ethrxq(s, rxq)
+		napi_disable(&s->ethrxq[rxq].rspq.napi);
+	napi_disable(&s->fw_evtq.napi);
+}
+
+/*
+ * Response queue handler for the firmware event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
+			  const struct pkt_gl *gl)
+{
+	/*
+	 * Extract response opcode and get pointer to CPL message body.
+	 */
+	struct adapter *adapter = rspq->adapter;
+	u8 opcode = ((const struct rss_header *)rsp)->opcode;
+	void *cpl = (void *)(rsp + 1);
+
+	switch (opcode) {
+	case CPL_FW6_MSG: {
+		/*
+		 * We've received an asynchronous message from the firmware.
+		 */
+		const struct cpl_fw6_msg *fw_msg = cpl;
+		if (fw_msg->type == FW6_TYPE_CMD_RPL)
+			t4vf_handle_fw_rpl(adapter, fw_msg->data);
+		break;
+	}
+
+	case CPL_FW4_MSG: {
+		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+		 */
+		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
+		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
+		if (opcode != CPL_SGE_EGR_UPDATE) {
+			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
+				, opcode);
+			break;
+		}
+		cpl = (void *)p;
+		/*FALLTHROUGH*/
+	}
+
+	case CPL_SGE_EGR_UPDATE: {
+		/*
+		 * We've received an Egress Queue Status Update message.  We
+		 * get these, if the SGE is configured to send these when the
+		 * firmware passes certain points in processing our TX
+		 * Ethernet Queue or if we make an explicit request for one.
+		 * We use these updates to determine when we may need to
+		 * restart a TX Ethernet Queue which was stopped for lack of
+		 * free TX Queue Descriptors ...
+		 */
+		const struct cpl_sge_egr_update *p = cpl;
+		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
+		struct sge *s = &adapter->sge;
+		struct sge_txq *tq;
+		struct sge_eth_txq *txq;
+		unsigned int eq_idx;
+
+		/*
+		 * Perform sanity checking on the Queue ID to make sure it
+		 * really refers to one of our TX Ethernet Egress Queues which
+		 * is active and matches the queue's ID.  None of these error
+		 * conditions should ever happen so we may want to either make
+		 * them fatal and/or conditionalized under DEBUG.
+		 */
+		eq_idx = EQ_IDX(s, qid);
+		if (unlikely(eq_idx >= MAX_EGRQ)) {
+			dev_err(adapter->pdev_dev,
+				"Egress Update QID %d out of range\n", qid);
+			break;
+		}
+		tq = s->egr_map[eq_idx];
+		if (unlikely(tq == NULL)) {
+			dev_err(adapter->pdev_dev,
+				"Egress Update QID %d TXQ=NULL\n", qid);
+			break;
+		}
+		txq = container_of(tq, struct sge_eth_txq, q);
+		if (unlikely(tq->abs_id != qid)) {
+			dev_err(adapter->pdev_dev,
+				"Egress Update QID %d refers to TXQ %d\n",
+				qid, tq->abs_id);
+			break;
+		}
+
+		/*
+		 * Restart a stopped TX Queue which has less than half of its
+		 * TX ring in use ...
+		 */
+		txq->q.restarts++;
+		netif_tx_wake_queue(txq->txq);
+		break;
+	}
+
+	default:
+		dev_err(adapter->pdev_dev,
+			"unexpected CPL %#x on FW event queue\n", opcode);
+	}
+
+	return 0;
+}
+
+/*
+ * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
+ * to use and initializes them.  We support multiple "Queue Sets" per port if
+ * we have MSI-X, otherwise just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int err, pidx, msix;
+
+	/*
+	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
+	 * state.
+	 */
+	bitmap_zero(s->starving_fl, MAX_EGRQ);
+
+	/*
+	 * If we're using MSI interrupt mode we need to set up a "forwarded
+	 * interrupt" queue which we'll set up with our MSI vector.  The rest
+	 * of the ingress queues will be set up to forward their interrupts to
+	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
+	 * the intrq's queue ID as the interrupt forwarding queue for the
+	 * subsequent calls ...
+	 */
+	if (adapter->flags & USING_MSI) {
+		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
+					 adapter->port[0], 0, NULL, NULL);
+		if (err)
+			goto err_free_queues;
+	}
+
+	/*
+	 * Allocate our ingress queue for asynchronous firmware messages.
+	 */
+	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
+				 MSIX_FW, NULL, fwevtq_handler);
+	if (err)
+		goto err_free_queues;
+
+	/*
+	 * Allocate each "port"'s initial Queue Sets.  These can be changed
+	 * later on ... up to the point where any interface on the adapter is
+	 * brought up at which point lots of things get nailed down
+	 * permanently ...
+	 */
+	msix = MSIX_IQFLINT;
+	for_each_port(adapter, pidx) {
+		struct net_device *dev = adapter->port[pidx];
+		struct port_info *pi = netdev_priv(dev);
+		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+		int qs;
+
+		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
+						 dev, msix++,
+						 &rxq->fl, t4vf_ethrx_handler);
+			if (err)
+				goto err_free_queues;
+
+			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
+					     netdev_get_tx_queue(dev, qs),
+					     s->fw_evtq.cntxt_id);
+			if (err)
+				goto err_free_queues;
+
+			rxq->rspq.idx = qs;
+			memset(&rxq->stats, 0, sizeof(rxq->stats));
+		}
+	}
+
+	/*
+	 * Create the reverse mappings for the queues.
+	 */
+	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
+	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
+	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
+	for_each_port(adapter, pidx) {
+		struct net_device *dev = adapter->port[pidx];
+		struct port_info *pi = netdev_priv(dev);
+		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+		int qs;
+
+		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
+			EQ_MAP(s, txq->q.abs_id) = &txq->q;
+
+			/*
+			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
+			 * for Free Lists but since all of the Egress Queues
+			 * (including Free Lists) have Relative Queue IDs
+			 * which are computed as Absolute - Base Queue ID, we
+			 * can synthesize the Absolute Queue IDs for the Free
+			 * Lists.  This is useful for debugging purposes when
+			 * we want to dump Queue Contexts via the PF Driver.
+			 */
+			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
+			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
+		}
+	}
+	return 0;
+
+err_free_queues:
+	t4vf_free_sge_resources(adapter);
+	return err;
+}
+
+/*
+ * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
+ * queues.  We configure the RSS CPU lookup table to distribute to the number
+ * of HW receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each "port" (Virtual
+ * Interface).  We always configure the RSS mapping for all ports since the
+ * mapping table has plenty of entries.
+ */
+static int setup_rss(struct adapter *adapter)
+{
+	int pidx;
+
+	for_each_port(adapter, pidx) {
+		struct port_info *pi = adap2pinfo(adapter, pidx);
+		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+		u16 rss[MAX_PORT_QSETS];
+		int qs, err;
+
+		for (qs = 0; qs < pi->nqsets; qs++)
+			rss[qs] = rxq[qs].rspq.abs_id;
+
+		err = t4vf_config_rss_range(adapter, pi->viid,
+					    0, pi->rss_size, rss, pi->nqsets);
+		if (err)
+			return err;
+
+		/*
+		 * Perform Global RSS Mode-specific initialization.
+		 */
+		switch (adapter->params.rss.mode) {
+		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
+			/*
+			 * If Tunnel All Lookup isn't specified in the global
+			 * RSS Configuration, then we need to specify a
+			 * default Ingress Queue for any ingress packets which
+			 * aren't hashed.  We'll use our first ingress queue
+			 * ...
+			 */
+			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
+				union rss_vi_config config;
+				err = t4vf_read_rss_vi_config(adapter,
+							      pi->viid,
+							      &config);
+				if (err)
+					return err;
+				config.basicvirtual.defaultq =
+					rxq[0].rspq.abs_id;
+				err = t4vf_write_rss_vi_config(adapter,
+							       pi->viid,
+							       &config);
+				if (err)
+					return err;
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Bring the adapter up.  Called whenever we go from no "ports" open to having
+ * one open.  This function performs the actions necessary to make an adapter
+ * operational, such as completing the initialization of HW modules, and
+ * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
+ * this is called "cxgb_up" in the PF Driver.)
+ */
+static int adapter_up(struct adapter *adapter)
+{
+	int err;
+
+	/*
+	 * If this is the first time we've been called, perform basic
+	 * adapter setup.  Once we've done this, many of our adapter
+	 * parameters can no longer be changed ...
+	 */
+	if ((adapter->flags & FULL_INIT_DONE) == 0) {
+		err = setup_sge_queues(adapter);
+		if (err)
+			return err;
+		err = setup_rss(adapter);
+		if (err) {
+			t4vf_free_sge_resources(adapter);
+			return err;
+		}
+
+		if (adapter->flags & USING_MSIX)
+			name_msix_vecs(adapter);
+		adapter->flags |= FULL_INIT_DONE;
+	}
+
+	/*
+	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
+	 */
+	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+	if (adapter->flags & USING_MSIX)
+		err = request_msix_queue_irqs(adapter);
+	else
+		err = request_irq(adapter->pdev->irq,
+				  t4vf_intr_handler(adapter), 0,
+				  adapter->name, adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
+			err);
+		return err;
+	}
+
+	/*
+	 * Enable NAPI ingress processing and return success.
+	 */
+	enable_rx(adapter);
+	t4vf_sge_start(adapter);
+	return 0;
+}
+
+/*
+ * Bring the adapter down.  Called whenever the last "port" (Virtual
+ * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
+ * Driver.)
+ */
+static void adapter_down(struct adapter *adapter)
+{
+	/*
+	 * Free interrupt resources.
+	 */
+	if (adapter->flags & USING_MSIX)
+		free_msix_queue_irqs(adapter);
+	else
+		free_irq(adapter->pdev->irq, adapter);
+
+	/*
+	 * Wait for NAPI handlers to finish.
+	 */
+	quiesce_rx(adapter);
+}
+
+/*
+ * Start up a net device.
+ */
+static int cxgb4vf_open(struct net_device *dev)
+{
+	int err;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	/*
+	 * If this is the first interface that we're opening on the "adapter",
+	 * bring the "adapter" up now.
+	 */
+	if (adapter->open_device_map == 0) {
+		err = adapter_up(adapter);
+		if (err)
+			return err;
+	}
+
+	/*
+	 * Note that this interface is up and start everything up ...
+	 */
+	netif_set_real_num_tx_queues(dev, pi->nqsets);
+	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+	if (err)
+		goto err_unwind;
+	err = link_start(dev);
+	if (err)
+		goto err_unwind;
+
+	netif_tx_start_all_queues(dev);
+	set_bit(pi->port_id, &adapter->open_device_map);
+	return 0;
+
+err_unwind:
+	if (adapter->open_device_map == 0)
+		adapter_down(adapter);
+	return err;
+}
+
+/*
+ * Shut down a net device.  This routine is called "cxgb_close" in the PF
+ * Driver ...
+ */
+static int cxgb4vf_stop(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	netif_tx_stop_all_queues(dev);
+	netif_carrier_off(dev);
+	t4vf_enable_vi(adapter, pi->viid, false, false);
+	pi->link_cfg.link_ok = 0;
+
+	clear_bit(pi->port_id, &adapter->open_device_map);
+	if (adapter->open_device_map == 0)
+		adapter_down(adapter);
+	return 0;
+}
+
+/*
+ * Translate our basic statistics into the standard "ifconfig" statistics.
+ */
+static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
+{
+	struct t4vf_port_stats stats;
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adapter = pi->adapter;
+	struct net_device_stats *ns = &dev->stats;
+	int err;
+
+	spin_lock(&adapter->stats_lock);
+	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
+	spin_unlock(&adapter->stats_lock);
+
+	memset(ns, 0, sizeof(*ns));
+	if (err)
+		return ns;
+
+	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
+			stats.tx_ucast_bytes + stats.tx_offload_bytes);
+	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
+			  stats.tx_ucast_frames + stats.tx_offload_frames);
+	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
+			stats.rx_ucast_bytes);
+	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
+			  stats.rx_ucast_frames);
+	ns->multicast = stats.rx_mcast_frames;
+	ns->tx_errors = stats.tx_drop_frames;
+	ns->rx_errors = stats.rx_err_frames;
+
+	return ns;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
+ */
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+							const u8 **addr,
+							unsigned int offset,
+							unsigned int maxaddrs)
+{
+	unsigned int index = 0;
+	unsigned int naddr = 0;
+	const struct netdev_hw_addr *ha;
+
+	for_each_dev_addr(dev, ha)
+		if (index++ >= offset) {
+			addr[naddr++] = ha->addr;
+			if (naddr >= maxaddrs)
+				break;
+		}
+	return naddr;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
+ */
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+							const u8 **addr,
+							unsigned int offset,
+							unsigned int maxaddrs)
+{
+	unsigned int index = 0;
+	unsigned int naddr = 0;
+	const struct netdev_hw_addr *ha;
+
+	netdev_for_each_mc_addr(ha, dev)
+		if (index++ >= offset) {
+			addr[naddr++] = ha->addr;
+			if (naddr >= maxaddrs)
+				break;
+		}
+	return naddr;
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+	u64 mhash = 0;
+	u64 uhash = 0;
+	bool free = true;
+	unsigned int offset, naddr;
+	const u8 *addr[7];
+	int ret;
+	const struct port_info *pi = netdev_priv(dev);
+
+	/* first do the secondary unicast addresses */
+	for (offset = 0; ; offset += naddr) {
+		naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+						     ARRAY_SIZE(addr));
+		if (naddr == 0)
+			break;
+
+		ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+					  naddr, addr, NULL, &uhash, sleep);
+		if (ret < 0)
+			return ret;
+
+		free = false;
+	}
+
+	/* next set up the multicast addresses */
+	for (offset = 0; ; offset += naddr) {
+		naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+						     ARRAY_SIZE(addr));
+		if (naddr == 0)
+			break;
+
+		ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+					  naddr, addr, NULL, &mhash, sleep);
+		if (ret < 0)
+			return ret;
+		free = false;
+	}
+
+	return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
+				  uhash | mhash, sleep);
+}
+
+/*
+ * Set RX properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	ret = set_addr_filters(dev, sleep_ok);
+	if (ret == 0)
+		ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
+				      (dev->flags & IFF_PROMISC) != 0,
+				      (dev->flags & IFF_ALLMULTI) != 0,
+				      1, -1, sleep_ok);
+	return ret;
+}
+
+/*
+ * Set the current receive modes on the device.
+ */
+static void cxgb4vf_set_rxmode(struct net_device *dev)
+{
+	/* unfortunately we can't return errors to the stack */
+	set_rxmode(dev, -1, false);
+}
+
+/*
+ * Find the entry in the interrupt holdoff timer value array which comes
+ * closest to the specified interrupt holdoff value.
+ */
+static int closest_timer(const struct sge *s, int us)
+{
+	int i, timer_idx = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+		int delta = us - s->timer_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			timer_idx = i;
+		}
+	}
+	return timer_idx;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+		delta = thres - s->counter_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			pktcnt_idx = i;
+		}
+	}
+	return pktcnt_idx;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us.  0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adapter,
+			       const struct sge_rspq *rspq)
+{
+	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
+
+	return timer_idx < SGE_NTIMERS
+		? adapter->sge.timer_val[timer_idx]
+		: 0;
+}
+
+/**
+ *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ *	@adapter: the adapter
+ *	@rspq: the RX response queue
+ *	@us: the hold-off time in us, or 0 to disable timer
+ *	@cnt: the hold-off packet count, or 0 to disable counter
+ *
+ *	Sets an RX response queue's interrupt hold-off time and packet count.
+ *	At least one of the two needs to be enabled for the queue to generate
+ *	interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
+			       unsigned int us, unsigned int cnt)
+{
+	unsigned int timer_idx;
+
+	/*
+	 * If both the interrupt holdoff timer and count are specified as
+	 * zero, default to a holdoff count of 1 ...
+	 */
+	if ((us | cnt) == 0)
+		cnt = 1;
+
+	/*
+	 * If an interrupt holdoff count has been specified, then find the
+	 * closest configured holdoff count and use that.  If the response
+	 * queue has already been created, then update its queue context
+	 * parameters ...
+	 */
+	if (cnt) {
+		int err;
+		u32 v, pktcnt_idx;
+
+		pktcnt_idx = closest_thres(&adapter->sge, cnt);
+		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
+			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+			    FW_PARAMS_PARAM_X_V(
+					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
+			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
+			if (err)
+				return err;
+		}
+		rspq->pktcnt_idx = pktcnt_idx;
+	}
+
+	/*
+	 * Compute the closest holdoff timer index from the supplied holdoff
+	 * timer value.
+	 */
+	timer_idx = (us == 0
+		     ? SGE_TIMER_RSTRT_CNTR
+		     : closest_timer(&adapter->sge, us));
+
+	/*
+	 * Update the response queue's interrupt coalescing parameters and
+	 * return success.
+	 */
+	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+			     QINTR_CNT_EN_V(cnt > 0));
+	return 0;
+}
+
+/*
+ * Return a version number to identify the type of adapter.  The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *adapter)
+{
+	/*
+	 * Chip version 4, revision 0x3f (cxgb4vf).
+	 */
+	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
+}
+
+/*
+ * Execute the specified ioctl command.
+ */
+static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	    /*
+	     * The VF Driver doesn't have access to any of the other
+	     * common Ethernet device ioctl()'s (like reading/writing
+	     * PHY registers, etc.
+	     */
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+/*
+ * Change the device's MTU.
+ */
+static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* accommodate SACK */
+	if (new_mtu < 81)
+		return -EINVAL;
+
+	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
+			      -1, -1, -1, -1, true);
+	if (!ret)
+		dev->mtu = new_mtu;
+	return ret;
+}
+
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	/*
+	 * Since there is no support for separate rx/tx vlan accel
+	 * enable/disable make sure tx flag is always in same state as rx.
+	 */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		features |= NETIF_F_HW_VLAN_CTAG_TX;
+	else
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+	return features;
+}
+
+static int cxgb4vf_set_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	struct port_info *pi = netdev_priv(dev);
+	netdev_features_t changed = dev->features ^ features;
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
+				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
+
+	return 0;
+}
+
+/*
+ * Change the devices MAC address.
+ */
+static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
+{
+	int ret;
+	struct sockaddr *addr = _addr;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
+			      addr->sa_data, true);
+	if (ret < 0)
+		return ret;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	pi->xact_addr_filt = ret;
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Poll all of our receive queues.  This is called outside of normal interrupt
+ * context.
+ */
+static void cxgb4vf_poll_controller(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (adapter->flags & USING_MSIX) {
+		struct sge_eth_rxq *rxq;
+		int nqsets;
+
+		rxq = &adapter->sge.ethrxq[pi->first_qset];
+		for (nqsets = pi->nqsets; nqsets; nqsets--) {
+			t4vf_sge_intr_msix(0, &rxq->rspq);
+			rxq++;
+		}
+	} else
+		t4vf_intr_handler(adapter)(0, adapter);
+}
+#endif
+
+/*
+ * Ethtool operations.
+ * ===================
+ *
+ * Note that we don't support any ethtool operations which change the physical
+ * state of the port to which we're linked.
+ */
+
+static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
+					  unsigned int caps)
+{
+	unsigned int v = 0;
+
+	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+	    type == FW_PORT_TYPE_BT_XAUI) {
+		v |= SUPPORTED_TP;
+		if (caps & FW_PORT_CAP_SPEED_100M)
+			v |= SUPPORTED_100baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseT_Full;
+	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+		v |= SUPPORTED_Backplane;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseKX_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseKX4_Full;
+	} else if (type == FW_PORT_TYPE_KR)
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+	else if (type == FW_PORT_TYPE_BP_AP)
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+	else if (type == FW_PORT_TYPE_BP4_AP)
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+		     SUPPORTED_10000baseKX4_Full;
+	else if (type == FW_PORT_TYPE_FIBER_XFI ||
+		 type == FW_PORT_TYPE_FIBER_XAUI ||
+		 type == FW_PORT_TYPE_SFP ||
+		 type == FW_PORT_TYPE_QSFP_10G ||
+		 type == FW_PORT_TYPE_QSA) {
+		v |= SUPPORTED_FIBRE;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseT_Full;
+	} else if (type == FW_PORT_TYPE_BP40_BA ||
+		   type == FW_PORT_TYPE_QSFP) {
+		v |= SUPPORTED_40000baseSR4_Full;
+		v |= SUPPORTED_FIBRE;
+	}
+
+	if (caps & FW_PORT_CAP_ANEG)
+		v |= SUPPORTED_Autoneg;
+	return v;
+}
+
+static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	const struct port_info *p = netdev_priv(dev);
+
+	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+	    p->port_type == FW_PORT_TYPE_BT_XFI ||
+	    p->port_type == FW_PORT_TYPE_BT_XAUI)
+		cmd->port = PORT_TP;
+	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+		cmd->port = PORT_FIBRE;
+	else if (p->port_type == FW_PORT_TYPE_SFP ||
+		 p->port_type == FW_PORT_TYPE_QSFP_10G ||
+		 p->port_type == FW_PORT_TYPE_QSA ||
+		 p->port_type == FW_PORT_TYPE_QSFP) {
+		if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+		    p->mod_type == FW_PORT_MOD_TYPE_SR ||
+		    p->mod_type == FW_PORT_MOD_TYPE_ER ||
+		    p->mod_type == FW_PORT_MOD_TYPE_LRM)
+			cmd->port = PORT_FIBRE;
+		else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+			 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+			cmd->port = PORT_DA;
+		else
+			cmd->port = PORT_OTHER;
+	} else
+		cmd->port = PORT_OTHER;
+
+	if (p->mdio_addr >= 0) {
+		cmd->phy_address = p->mdio_addr;
+		cmd->transceiver = XCVR_EXTERNAL;
+		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+	} else {
+		cmd->phy_address = 0;  /* not really, but no better option */
+		cmd->transceiver = XCVR_INTERNAL;
+		cmd->mdio_support = 0;
+	}
+
+	cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
+					       p->link_cfg.supported);
+	cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
+					    p->link_cfg.advertising);
+	ethtool_cmd_speed_set(cmd,
+			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
+	cmd->duplex = DUPLEX_FULL;
+	cmd->autoneg = p->link_cfg.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+/*
+ * Return our driver information.
+ */
+static void cxgb4vf_get_drvinfo(struct net_device *dev,
+				struct ethtool_drvinfo *drvinfo)
+{
+	struct adapter *adapter = netdev2adap(dev);
+
+	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+		sizeof(drvinfo->bus_info));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
+		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
+		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
+		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
+		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
+		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
+}
+
+/*
+ * Return current adapter message level.
+ */
+static u32 cxgb4vf_get_msglevel(struct net_device *dev)
+{
+	return netdev2adap(dev)->msg_enable;
+}
+
+/*
+ * Set current adapter message level.
+ */
+static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
+{
+	netdev2adap(dev)->msg_enable = msglevel;
+}
+
+/*
+ * Return the device's current Queue Set ring size parameters along with the
+ * allowed maximum values.  Since ethtool doesn't understand the concept of
+ * multi-queue devices, we just return the current values associated with the
+ * first Queue Set.
+ */
+static void cxgb4vf_get_ringparam(struct net_device *dev,
+				  struct ethtool_ringparam *rp)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct sge *s = &pi->adapter->sge;
+
+	rp->rx_max_pending = MAX_RX_BUFFERS;
+	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+	rp->rx_jumbo_max_pending = 0;
+	rp->tx_max_pending = MAX_TXQ_ENTRIES;
+
+	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
+	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+	rp->rx_jumbo_pending = 0;
+	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+/*
+ * Set the Queue Set ring size parameters for the device.  Again, since
+ * ethtool doesn't allow for the concept of multiple queues per device, we'll
+ * apply these new values across all of the Queue Sets associated with the
+ * device -- after vetting them of course!
+ */
+static int cxgb4vf_set_ringparam(struct net_device *dev,
+				 struct ethtool_ringparam *rp)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct sge *s = &adapter->sge;
+	int qs;
+
+	if (rp->rx_pending > MAX_RX_BUFFERS ||
+	    rp->rx_jumbo_pending ||
+	    rp->tx_pending > MAX_TXQ_ENTRIES ||
+	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+	    rp->rx_pending < MIN_FL_ENTRIES ||
+	    rp->tx_pending < MIN_TXQ_ENTRIES)
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
+		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
+		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
+		s->ethtxq[qs].q.size = rp->tx_pending;
+	}
+	return 0;
+}
+
+/*
+ * Return the interrupt holdoff timer and count for the first Queue Set on the
+ * device.  Our extension ioctl() (the cxgbtool interface) allows the
+ * interrupt holdoff timer to be read on all of the device's Queue Sets.
+ */
+static int cxgb4vf_get_coalesce(struct net_device *dev,
+				struct ethtool_coalesce *coalesce)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct adapter *adapter = pi->adapter;
+	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
+
+	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
+	coalesce->rx_max_coalesced_frames =
+		((rspq->intr_params & QINTR_CNT_EN_F)
+		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
+		 : 0);
+	return 0;
+}
+
+/*
+ * Set the RX interrupt holdoff timer and count for the first Queue Set on the
+ * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
+ * the interrupt holdoff timer on any of the device's Queue Sets.
+ */
+static int cxgb4vf_set_coalesce(struct net_device *dev,
+				struct ethtool_coalesce *coalesce)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	return set_rxq_intr_params(adapter,
+				   &adapter->sge.ethrxq[pi->first_qset].rspq,
+				   coalesce->rx_coalesce_usecs,
+				   coalesce->rx_max_coalesced_frames);
+}
+
+/*
+ * Report current port link pause parameter settings.
+ */
+static void cxgb4vf_get_pauseparam(struct net_device *dev,
+				   struct ethtool_pauseparam *pauseparam)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
+	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+/*
+ * Identify the port by blinking the port's LED.
+ */
+static int cxgb4vf_phys_id(struct net_device *dev,
+			   enum ethtool_phys_id_state state)
+{
+	unsigned int val;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (state == ETHTOOL_ID_ACTIVE)
+		val = 0xffff;
+	else if (state == ETHTOOL_ID_INACTIVE)
+		val = 0;
+	else
+		return -EINVAL;
+
+	return t4vf_identify_port(pi->adapter, pi->viid, val);
+}
+
+/*
+ * Port stats maintained per queue of the port.
+ */
+struct queue_port_stats {
+	u64 tso;
+	u64 tx_csum;
+	u64 rx_csum;
+	u64 vlan_ex;
+	u64 vlan_ins;
+	u64 lro_pkts;
+	u64 lro_merged;
+};
+
+/*
+ * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
+ * these need to match the order of statistics returned by
+ * t4vf_get_port_stats().
+ */
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+	/*
+	 * These must match the layout of the t4vf_port_stats structure.
+	 */
+	"TxBroadcastBytes  ",
+	"TxBroadcastFrames ",
+	"TxMulticastBytes  ",
+	"TxMulticastFrames ",
+	"TxUnicastBytes    ",
+	"TxUnicastFrames   ",
+	"TxDroppedFrames   ",
+	"TxOffloadBytes    ",
+	"TxOffloadFrames   ",
+	"RxBroadcastBytes  ",
+	"RxBroadcastFrames ",
+	"RxMulticastBytes  ",
+	"RxMulticastFrames ",
+	"RxUnicastBytes    ",
+	"RxUnicastFrames   ",
+	"RxErrorFrames     ",
+
+	/*
+	 * These are accumulated per-queue statistics and must match the
+	 * order of the fields in the queue_port_stats structure.
+	 */
+	"TSO               ",
+	"TxCsumOffload     ",
+	"RxCsumGood        ",
+	"VLANextractions   ",
+	"VLANinsertions    ",
+	"GROPackets        ",
+	"GROMerged         ",
+};
+
+/*
+ * Return the number of statistics in the specified statistics set.
+ */
+static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(stats_strings);
+	default:
+		return -EOPNOTSUPP;
+	}
+	/*NOTREACHED*/
+}
+
+/*
+ * Return the strings for the specified statistics set.
+ */
+static void cxgb4vf_get_strings(struct net_device *dev,
+				u32 sset,
+				u8 *data)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		memcpy(data, stats_strings, sizeof(stats_strings));
+		break;
+	}
+}
+
+/*
+ * Small utility routine to accumulate queue statistics across the queues of
+ * a "port".
+ */
+static void collect_sge_port_stats(const struct adapter *adapter,
+				   const struct port_info *pi,
+				   struct queue_port_stats *stats)
+{
+	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
+	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+	int qs;
+
+	memset(stats, 0, sizeof(*stats));
+	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+		stats->tso += txq->tso;
+		stats->tx_csum += txq->tx_cso;
+		stats->rx_csum += rxq->stats.rx_cso;
+		stats->vlan_ex += rxq->stats.vlan_ex;
+		stats->vlan_ins += txq->vlan_ins;
+		stats->lro_pkts += rxq->stats.lro_pkts;
+		stats->lro_merged += rxq->stats.lro_merged;
+	}
+}
+
+/*
+ * Return the ETH_SS_STATS statistics set.
+ */
+static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
+				      struct ethtool_stats *stats,
+				      u64 *data)
+{
+	struct port_info *pi = netdev2pinfo(dev);
+	struct adapter *adapter = pi->adapter;
+	int err = t4vf_get_port_stats(adapter, pi->pidx,
+				      (struct t4vf_port_stats *)data);
+	if (err)
+		memset(data, 0, sizeof(struct t4vf_port_stats));
+
+	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
+	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return the size of our register map.
+ */
+static int cxgb4vf_get_regs_len(struct net_device *dev)
+{
+	return T4VF_REGMAP_SIZE;
+}
+
+/*
+ * Dump a block of registers, start to end inclusive, into a buffer.
+ */
+static void reg_block_dump(struct adapter *adapter, void *regbuf,
+			   unsigned int start, unsigned int end)
+{
+	u32 *bp = regbuf + start - T4VF_REGMAP_START;
+
+	for ( ; start <= end; start += sizeof(u32)) {
+		/*
+		 * Avoid reading the Mailbox Control register since that
+		 * can trigger a Mailbox Ownership Arbitration cycle and
+		 * interfere with communication with the firmware.
+		 */
+		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
+			*bp++ = 0xffff;
+		else
+			*bp++ = t4_read_reg(adapter, start);
+	}
+}
+
+/*
+ * Copy our entire register map into the provided buffer.
+ */
+static void cxgb4vf_get_regs(struct net_device *dev,
+			     struct ethtool_regs *regs,
+			     void *regbuf)
+{
+	struct adapter *adapter = netdev2adap(dev);
+
+	regs->version = mk_adap_vers(adapter);
+
+	/*
+	 * Fill in register buffer with our register map.
+	 */
+	memset(regbuf, 0, T4VF_REGMAP_SIZE);
+
+	reg_block_dump(adapter, regbuf,
+		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
+		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
+	reg_block_dump(adapter, regbuf,
+		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
+		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
+
+	/* T5 adds new registers in the PL Register map.
+	 */
+	reg_block_dump(adapter, regbuf,
+		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
+		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
+		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
+	reg_block_dump(adapter, regbuf,
+		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
+		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
+
+	reg_block_dump(adapter, regbuf,
+		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
+		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
+}
+
+/*
+ * Report current Wake On LAN settings.
+ */
+static void cxgb4vf_get_wol(struct net_device *dev,
+			    struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+static const struct ethtool_ops cxgb4vf_ethtool_ops = {
+	.get_settings		= cxgb4vf_get_settings,
+	.get_drvinfo		= cxgb4vf_get_drvinfo,
+	.get_msglevel		= cxgb4vf_get_msglevel,
+	.set_msglevel		= cxgb4vf_set_msglevel,
+	.get_ringparam		= cxgb4vf_get_ringparam,
+	.set_ringparam		= cxgb4vf_set_ringparam,
+	.get_coalesce		= cxgb4vf_get_coalesce,
+	.set_coalesce		= cxgb4vf_set_coalesce,
+	.get_pauseparam		= cxgb4vf_get_pauseparam,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= cxgb4vf_get_strings,
+	.set_phys_id		= cxgb4vf_phys_id,
+	.get_sset_count		= cxgb4vf_get_sset_count,
+	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
+	.get_regs_len		= cxgb4vf_get_regs_len,
+	.get_regs		= cxgb4vf_get_regs,
+	.get_wol		= cxgb4vf_get_wol,
+};
+
+/*
+ * /sys/kernel/debug/cxgb4vf support code and data.
+ * ================================================
+ */
+
+/*
+ * Show SGE Queue Set information.  We display QPL Queues Sets per line.
+ */
+#define QPL	4
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+	int qs, r = (uintptr_t)v - 1;
+
+	if (r)
+		seq_putc(seq, '\n');
+
+	#define S3(fmt_spec, s, v) \
+		do {\
+			seq_printf(seq, "%-12s", s); \
+			for (qs = 0; qs < n; ++qs) \
+				seq_printf(seq, " %16" fmt_spec, v); \
+			seq_putc(seq, '\n'); \
+		} while (0)
+	#define S(s, v)		S3("s", s, v)
+	#define T(s, v)		S3("u", s, txq[qs].v)
+	#define R(s, v)		S3("u", s, rxq[qs].v)
+
+	if (r < eth_entries) {
+		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+		S("QType:", "Ethernet");
+		S("Interface:",
+		  (rxq[qs].rspq.netdev
+		   ? rxq[qs].rspq.netdev->name
+		   : "N/A"));
+		S3("d", "Port:",
+		   (rxq[qs].rspq.netdev
+		    ? ((struct port_info *)
+		       netdev_priv(rxq[qs].rspq.netdev))->port_id
+		    : -1));
+		T("TxQ ID:", q.abs_id);
+		T("TxQ size:", q.size);
+		T("TxQ inuse:", q.in_use);
+		T("TxQ PIdx:", q.pidx);
+		T("TxQ CIdx:", q.cidx);
+		R("RspQ ID:", rspq.abs_id);
+		R("RspQ size:", rspq.size);
+		R("RspQE size:", rspq.iqe_len);
+		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
+		S3("u", "Intr pktcnt:",
+		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
+		R("RspQ CIdx:", rspq.cidx);
+		R("RspQ Gen:", rspq.gen);
+		R("FL ID:", fl.abs_id);
+		R("FL size:", fl.size - MIN_FL_RESID);
+		R("FL avail:", fl.avail);
+		R("FL PIdx:", fl.pidx);
+		R("FL CIdx:", fl.cidx);
+		return 0;
+	}
+
+	r -= eth_entries;
+	if (r == 0) {
+		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+			   qtimer_val(adapter, evtq));
+		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+			   adapter->sge.counter_val[evtq->pktcnt_idx]);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+	} else if (r == 1) {
+		const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
+		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
+		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+			   qtimer_val(adapter, intrq));
+		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+			   adapter->sge.counter_val[intrq->pktcnt_idx]);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
+		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
+	}
+
+	#undef R
+	#undef T
+	#undef S
+	#undef S3
+
+	return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file".  We group the multi-Queue
+ * sections with QPL Queue Sets per "entry".  The sections of the output are:
+ *
+ *     Ethernet RX/TX Queue Sets
+ *     Firmware Event Queue
+ *     Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_queue_entries(const struct adapter *adapter)
+{
+	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+		((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+	int entries = sge_queue_entries(seq->private);
+
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int entries = sge_queue_entries(seq->private);
+
+	++*pos;
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+	.start = sge_queue_start,
+	.next  = sge_queue_next,
+	.stop  = sge_queue_stop,
+	.show  = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &sge_qinfo_seq_ops);
+
+	if (!res) {
+		struct seq_file *seq = file->private_data;
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = sge_qinfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
+ */
+#define QPL	4
+
+static int sge_qstats_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+	int qs, r = (uintptr_t)v - 1;
+
+	if (r)
+		seq_putc(seq, '\n');
+
+	#define S3(fmt, s, v) \
+		do { \
+			seq_printf(seq, "%-16s", s); \
+			for (qs = 0; qs < n; ++qs) \
+				seq_printf(seq, " %8" fmt, v); \
+			seq_putc(seq, '\n'); \
+		} while (0)
+	#define S(s, v)		S3("s", s, v)
+
+	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
+	#define T(s, v)		T3("lu", s, v)
+
+	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
+	#define R(s, v)		R3("lu", s, v)
+
+	if (r < eth_entries) {
+		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+		S("QType:", "Ethernet");
+		S("Interface:",
+		  (rxq[qs].rspq.netdev
+		   ? rxq[qs].rspq.netdev->name
+		   : "N/A"));
+		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
+		R("RxPackets:", stats.pkts);
+		R("RxCSO:", stats.rx_cso);
+		R("VLANxtract:", stats.vlan_ex);
+		R("LROmerged:", stats.lro_merged);
+		R("LROpackets:", stats.lro_pkts);
+		R("RxDrops:", stats.rx_drops);
+		T("TSO:", tso);
+		T("TxCSO:", tx_cso);
+		T("VLANins:", vlan_ins);
+		T("TxQFull:", q.stops);
+		T("TxQRestarts:", q.restarts);
+		T("TxMapErr:", mapping_err);
+		R("FLAllocErr:", fl.alloc_failed);
+		R("FLLrgAlcErr:", fl.large_alloc_failed);
+		R("FLStarving:", fl.starving);
+		return 0;
+	}
+
+	r -= eth_entries;
+	if (r == 0) {
+		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
+		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
+			   evtq->unhandled_irqs);
+		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
+		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
+	} else if (r == 1) {
+		const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
+		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
+			   intrq->unhandled_irqs);
+		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
+		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
+	}
+
+	#undef R
+	#undef T
+	#undef S
+	#undef R3
+	#undef T3
+	#undef S3
+
+	return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file".  We group the multi-Queue
+ * sections with QPL Queue Sets per "entry".  The sections of the output are:
+ *
+ *     Ethernet RX/TX Queue Sets
+ *     Firmware Event Queue
+ *     Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_qstats_entries(const struct adapter *adapter)
+{
+	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+		((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
+{
+	int entries = sge_qstats_entries(seq->private);
+
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_qstats_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int entries = sge_qstats_entries(seq->private);
+
+	(*pos)++;
+	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qstats_seq_ops = {
+	.start = sge_qstats_start,
+	.next  = sge_qstats_next,
+	.stop  = sge_qstats_stop,
+	.show  = sge_qstats_show
+};
+
+static int sge_qstats_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &sge_qstats_seq_ops);
+
+	if (res == 0) {
+		struct seq_file *seq = file->private_data;
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations sge_qstats_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = sge_qstats_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * Show PCI-E SR-IOV Virtual Function Resource Limits.
+ */
+static int resources_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	struct vf_resources *vfres = &adapter->params.vfres;
+
+	#define S(desc, fmt, var) \
+		seq_printf(seq, "%-60s " fmt "\n", \
+			   desc " (" #var "):", vfres->var)
+
+	S("Virtual Interfaces", "%d", nvi);
+	S("Egress Queues", "%d", neq);
+	S("Ethernet Control", "%d", nethctrl);
+	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+	S("Ingress Queues", "%d", niq);
+	S("Traffic Class", "%d", tc);
+	S("Port Access Rights Mask", "%#x", pmask);
+	S("MAC Address Filters", "%d", nexactf);
+	S("Firmware Command Read Capabilities", "%#x", r_caps);
+	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+	#undef S
+
+	return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = resources_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ * Show Virtual Interfaces.
+ */
+static int interfaces_show(struct seq_file *seq, void *v)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Interface  Port   VIID\n");
+	} else {
+		struct adapter *adapter = seq->private;
+		int pidx = (uintptr_t)v - 2;
+		struct net_device *dev = adapter->port[pidx];
+		struct port_info *pi = netdev_priv(dev);
+
+		seq_printf(seq, "%9s  %4d  %#5x\n",
+			   dev->name, pi->port_id, pi->viid);
+	}
+	return 0;
+}
+
+static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
+{
+	return pos <= adapter->params.nports
+		? (void *)(uintptr_t)(pos + 1)
+		: NULL;
+}
+
+static void *interfaces_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos
+		? interfaces_get_idx(seq->private, *pos)
+		: SEQ_START_TOKEN;
+}
+
+static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return interfaces_get_idx(seq->private, *pos);
+}
+
+static void interfaces_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations interfaces_seq_ops = {
+	.start = interfaces_start,
+	.next  = interfaces_next,
+	.stop  = interfaces_stop,
+	.show  = interfaces_show
+};
+
+static int interfaces_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &interfaces_seq_ops);
+
+	if (res == 0) {
+		struct seq_file *seq = file->private_data;
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations interfaces_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = interfaces_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * /sys/kernel/debugfs/cxgb4vf/ files list.
+ */
+struct cxgb4vf_debugfs_entry {
+	const char *name;		/* name of debugfs node */
+	umode_t mode;			/* file system mode */
+	const struct file_operations *fops;
+};
+
+static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
+	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
+	{ "resources",  S_IRUGO, &resources_proc_fops },
+	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
+};
+
+/*
+ * Module and device initialization and cleanup code.
+ * ==================================================
+ */
+
+/*
+ * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
+ * directory (debugfs_root) has already been set up.
+ */
+static int setup_debugfs(struct adapter *adapter)
+{
+	int i;
+
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
+
+	/*
+	 * Debugfs support is best effort.
+	 */
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		(void)debugfs_create_file(debugfs_files[i].name,
+				  debugfs_files[i].mode,
+				  adapter->debugfs_root,
+				  (void *)adapter,
+				  debugfs_files[i].fops);
+
+	return 0;
+}
+
+/*
+ * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
+ * it to our caller to tear down the directory (debugfs_root).
+ */
+static void cleanup_debugfs(struct adapter *adapter)
+{
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
+
+	/*
+	 * Unlike our sister routine cleanup_proc(), we don't need to remove
+	 * individual entries because a call will be made to
+	 * debugfs_remove_recursive().  We just need to clean up any ancillary
+	 * persistent state.
+	 */
+	/* nothing to do */
+}
+
+/*
+ * Perform early "adapter" initialization.  This is where we discover what
+ * adapter parameters we're going to be using and initialize basic adapter
+ * hardware support.
+ */
+static int adap_init0(struct adapter *adapter)
+{
+	struct vf_resources *vfres = &adapter->params.vfres;
+	struct sge_params *sge_params = &adapter->params.sge;
+	struct sge *s = &adapter->sge;
+	unsigned int ethqsets;
+	int err;
+	u32 param, val = 0;
+
+	/*
+	 * Wait for the device to become ready before proceeding ...
+	 */
+	err = t4vf_wait_dev_ready(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "device didn't become ready:"
+			" err=%d\n", err);
+		return err;
+	}
+
+	/*
+	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+	 * 2.6.31 and later we can't call pci_reset_function() in order to
+	 * issue an FLR because of a self- deadlock on the device semaphore.
+	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+	 * cases where they're needed -- for instance, some versions of KVM
+	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
+	 * use the firmware based reset in order to reset any per function
+	 * state.
+	 */
+	err = t4vf_fw_reset(adapter);
+	if (err < 0) {
+		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+		return err;
+	}
+
+	/*
+	 * Grab basic operational parameters.  These will predominantly have
+	 * been set up by the Physical Function Driver or will be hard coded
+	 * into the adapter.  We just have to live with them ...  Note that
+	 * we _must_ get our VPD parameters before our SGE parameters because
+	 * we need to know the adapter's core clock from the VPD in order to
+	 * properly decode the SGE Timer Values.
+	 */
+	err = t4vf_get_dev_params(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" device parameters: err=%d\n", err);
+		return err;
+	}
+	err = t4vf_get_vpd_params(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" VPD parameters: err=%d\n", err);
+		return err;
+	}
+	err = t4vf_get_sge_params(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" SGE parameters: err=%d\n", err);
+		return err;
+	}
+	err = t4vf_get_rss_glb_config(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+			" RSS parameters: err=%d\n", err);
+		return err;
+	}
+	if (adapter->params.rss.mode !=
+	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+			" mode %d\n", adapter->params.rss.mode);
+		return -EINVAL;
+	}
+	err = t4vf_sge_init(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
+			" err=%d\n", err);
+		return err;
+	}
+
+	/* If we're running on newer firmware, let it know that we're
+	 * prepared to deal with encapsulated CPL messages.  Older
+	 * firmware won't understand this and we'll just get
+	 * unencapsulated messages ...
+	 */
+	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+	val = 1;
+	(void) t4vf_set_params(adapter, 1, &param, &val);
+
+	/*
+	 * Retrieve our RX interrupt holdoff timer values and counter
+	 * threshold values from the SGE parameters.
+	 */
+	s->timer_val[0] = core_ticks_to_us(adapter,
+		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
+	s->timer_val[1] = core_ticks_to_us(adapter,
+		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
+	s->timer_val[2] = core_ticks_to_us(adapter,
+		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
+	s->timer_val[3] = core_ticks_to_us(adapter,
+		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
+	s->timer_val[4] = core_ticks_to_us(adapter,
+		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
+	s->timer_val[5] = core_ticks_to_us(adapter,
+		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
+
+	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
+	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
+	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
+	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
+
+	/*
+	 * Grab our Virtual Interface resource allocation, extract the
+	 * features that we're interested in and do a bit of sanity testing on
+	 * what we discover.
+	 */
+	err = t4vf_get_vfres(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "unable to get virtual interface"
+			" resources: err=%d\n", err);
+		return err;
+	}
+
+	/*
+	 * The number of "ports" which we support is equal to the number of
+	 * Virtual Interfaces with which we've been provisioned.
+	 */
+	adapter->params.nports = vfres->nvi;
+	if (adapter->params.nports > MAX_NPORTS) {
+		dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
+			 " virtual interfaces\n", MAX_NPORTS,
+			 adapter->params.nports);
+		adapter->params.nports = MAX_NPORTS;
+	}
+
+	/*
+	 * We need to reserve a number of the ingress queues with Free List
+	 * and Interrupt capabilities for special interrupt purposes (like
+	 * asynchronous firmware messages, or forwarded interrupts if we're
+	 * using MSI).  The rest of the FL/Intr-capable ingress queues will be
+	 * matched up one-for-one with Ethernet/Control egress queues in order
+	 * to form "Queue Sets" which will be aportioned between the "ports".
+	 * For each Queue Set, we'll need the ability to allocate two Egress
+	 * Contexts -- one for the Ingress Queue Free List and one for the TX
+	 * Ethernet Queue.
+	 */
+	ethqsets = vfres->niqflint - INGQ_EXTRAS;
+	if (vfres->nethctrl != ethqsets) {
+		dev_warn(adapter->pdev_dev, "unequal number of [available]"
+			 " ingress/egress queues (%d/%d); using minimum for"
+			 " number of Queue Sets\n", ethqsets, vfres->nethctrl);
+		ethqsets = min(vfres->nethctrl, ethqsets);
+	}
+	if (vfres->neq < ethqsets*2) {
+		dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
+			 " to support Queue Sets (%d); reducing allowed Queue"
+			 " Sets\n", vfres->neq, ethqsets);
+		ethqsets = vfres->neq/2;
+	}
+	if (ethqsets > MAX_ETH_QSETS) {
+		dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
+			 " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
+		ethqsets = MAX_ETH_QSETS;
+	}
+	if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
+		dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
+			 " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
+	}
+	adapter->sge.max_ethqsets = ethqsets;
+
+	/*
+	 * Check for various parameter sanity issues.  Most checks simply
+	 * result in us using fewer resources than our provissioning but we
+	 * do need at least  one "port" with which to work ...
+	 */
+	if (adapter->sge.max_ethqsets < adapter->params.nports) {
+		dev_warn(adapter->pdev_dev, "only using %d of %d available"
+			 " virtual interfaces (too few Queue Sets)\n",
+			 adapter->sge.max_ethqsets, adapter->params.nports);
+		adapter->params.nports = adapter->sge.max_ethqsets;
+	}
+	if (adapter->params.nports == 0) {
+		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+			"usable!\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
+			     u8 pkt_cnt_idx, unsigned int size,
+			     unsigned int iqe_size)
+{
+	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+			     (pkt_cnt_idx < SGE_NCOUNTERS ?
+			      QINTR_CNT_EN_F : 0));
+	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
+			    ? pkt_cnt_idx
+			    : 0);
+	rspq->iqe_len = iqe_size;
+	rspq->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and
+ * type of ports we found and the number of available CPUs.  Most settings can
+ * be modified by the admin via ethtool and cxgbtool prior to the adapter
+ * being brought up for the first time.
+ */
+static void cfg_queues(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	int q10g, n10g, qidx, pidx, qs;
+	size_t iqe_size;
+
+	/*
+	 * We should not be called till we know how many Queue Sets we can
+	 * support.  In particular, this means that we need to know what kind
+	 * of interrupts we'll be using ...
+	 */
+	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+
+	/*
+	 * Count the number of 10GbE Virtual Interfaces that we have.
+	 */
+	n10g = 0;
+	for_each_port(adapter, pidx)
+		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+
+	/*
+	 * We default to 1 queue per non-10G port and up to # of cores queues
+	 * per 10G port.
+	 */
+	if (n10g == 0)
+		q10g = 0;
+	else {
+		int n1g = (adapter->params.nports - n10g);
+		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
+		if (q10g > num_online_cpus())
+			q10g = num_online_cpus();
+	}
+
+	/*
+	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
+	 * The layout will be established in setup_sge_queues() when the
+	 * adapter is brough up for the first time.
+	 */
+	qidx = 0;
+	for_each_port(adapter, pidx) {
+		struct port_info *pi = adap2pinfo(adapter, pidx);
+
+		pi->first_qset = qidx;
+		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+		qidx += pi->nqsets;
+	}
+	s->ethqsets = qidx;
+
+	/*
+	 * The Ingress Queue Entry Size for our various Response Queues needs
+	 * to be big enough to accommodate the largest message we can receive
+	 * from the chip/firmware; which is 64 bytes ...
+	 */
+	iqe_size = 64;
+
+	/*
+	 * Set up default Queue Set parameters ...  Start off with the
+	 * shortest interrupt holdoff timer.
+	 */
+	for (qs = 0; qs < s->max_ethqsets; qs++) {
+		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
+		struct sge_eth_txq *txq = &s->ethtxq[qs];
+
+		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
+		rxq->fl.size = 72;
+		txq->q.size = 1024;
+	}
+
+	/*
+	 * The firmware event queue is used for link state changes and
+	 * notifications of TX DMA completions.
+	 */
+	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
+
+	/*
+	 * The forwarded interrupt queue is used when we're in MSI interrupt
+	 * mode.  In this mode all interrupts associated with RX queues will
+	 * be forwarded to a single queue which we'll associate with our MSI
+	 * interrupt vector.  The messages dropped in the forwarded interrupt
+	 * queue will indicate which ingress queue needs servicing ...  This
+	 * queue needs to be large enough to accommodate all of the ingress
+	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
+	 * from equalling the CIDX if every ingress queue has an outstanding
+	 * interrupt).  The queue doesn't need to be any larger because no
+	 * ingress queue will ever have more than one outstanding interrupt at
+	 * any time ...
+	 */
+	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
+		  iqe_size);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void reduce_ethqs(struct adapter *adapter, int n)
+{
+	int i;
+	struct port_info *pi;
+
+	/*
+	 * While we have too many active Ether Queue Sets, interate across the
+	 * "ports" and reduce their individual Queue Set allocations.
+	 */
+	BUG_ON(n < adapter->params.nports);
+	while (n < adapter->sge.ethqsets)
+		for_each_port(adapter, i) {
+			pi = adap2pinfo(adapter, i);
+			if (pi->nqsets > 1) {
+				pi->nqsets--;
+				adapter->sge.ethqsets--;
+				if (adapter->sge.ethqsets <= n)
+					break;
+			}
+		}
+
+	/*
+	 * Reassign the starting Queue Sets for each of the "ports" ...
+	 */
+	n = 0;
+	for_each_port(adapter, i) {
+		pi = adap2pinfo(adapter, i);
+		pi->first_qset = n;
+		n += pi->nqsets;
+	}
+}
+
+/*
+ * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
+ * we get a separate MSI-X vector for every "Queue Set" plus any extras we
+ * need.  Minimally we need one for every Virtual Interface plus those needed
+ * for our "extras".  Note that this process may lower the maximum number of
+ * allowed Queue Sets ...
+ */
+static int enable_msix(struct adapter *adapter)
+{
+	int i, want, need, nqsets;
+	struct msix_entry entries[MSIX_ENTRIES];
+	struct sge *s = &adapter->sge;
+
+	for (i = 0; i < MSIX_ENTRIES; ++i)
+		entries[i].entry = i;
+
+	/*
+	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
+	 * plus those needed for our "extras" (for example, the firmware
+	 * message queue).  We _need_ at least one "Queue Set" per Virtual
+	 * Interface plus those needed for our "extras".  So now we get to see
+	 * if the song is right ...
+	 */
+	want = s->max_ethqsets + MSIX_EXTRAS;
+	need = adapter->params.nports + MSIX_EXTRAS;
+
+	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+	if (want < 0)
+		return want;
+
+	nqsets = want - MSIX_EXTRAS;
+	if (nqsets < s->max_ethqsets) {
+		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+			 " for %d Queue Sets\n", nqsets);
+		s->max_ethqsets = nqsets;
+		if (nqsets < s->ethqsets)
+			reduce_ethqs(adapter, nqsets);
+	}
+	for (i = 0; i < want; ++i)
+		adapter->msix_info[i].vec = entries[i].vector;
+
+	return 0;
+}
+
+static const struct net_device_ops cxgb4vf_netdev_ops	= {
+	.ndo_open		= cxgb4vf_open,
+	.ndo_stop		= cxgb4vf_stop,
+	.ndo_start_xmit		= t4vf_eth_xmit,
+	.ndo_get_stats		= cxgb4vf_get_stats,
+	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
+	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
+	.ndo_change_mtu		= cxgb4vf_change_mtu,
+	.ndo_fix_features	= cxgb4vf_fix_features,
+	.ndo_set_features	= cxgb4vf_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= cxgb4vf_poll_controller,
+#endif
+};
+
+/*
+ * "Probe" a device: initialize a device and construct all kernel and driver
+ * state needed to manage the device.  This routine is called "init_one" in
+ * the PF Driver ...
+ */
+static int cxgb4vf_pci_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	int pci_using_dac;
+	int err, pidx;
+	unsigned int pmask;
+	struct adapter *adapter;
+	struct port_info *pi;
+	struct net_device *netdev;
+
+	/*
+	 * Print our driver banner the first time we're called to initialize a
+	 * device.
+	 */
+	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+	/*
+	 * Initialize generic PCI device state.
+	 */
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		return err;
+	}
+
+	/*
+	 * Reserve PCI resources for the device.  If we can't get them some
+	 * other driver may have already claimed the device ...
+	 */
+	err = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+		goto err_disable_device;
+	}
+
+	/*
+	 * Set up our DMA mask: try for 64-bit address masking first and
+	 * fall back to 32-bit if we can't get 64 bits ...
+	 */
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err == 0) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
+				" coherent allocations\n");
+			goto err_release_regions;
+		}
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err != 0) {
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
+			goto err_release_regions;
+		}
+		pci_using_dac = 0;
+	}
+
+	/*
+	 * Enable bus mastering for the device ...
+	 */
+	pci_set_master(pdev);
+
+	/*
+	 * Allocate our adapter data structure and attach it to the device.
+	 */
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+	pci_set_drvdata(pdev, adapter);
+	adapter->pdev = pdev;
+	adapter->pdev_dev = &pdev->dev;
+
+	/*
+	 * Initialize SMP data synchronization resources.
+	 */
+	spin_lock_init(&adapter->stats_lock);
+
+	/*
+	 * Map our I/O registers in BAR0.
+	 */
+	adapter->regs = pci_ioremap_bar(pdev, 0);
+	if (!adapter->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -ENOMEM;
+		goto err_free_adapter;
+	}
+
+	/* Wait for the device to become ready before proceeding ...
+	 */
+	err = t4vf_prep_adapter(adapter);
+	if (err) {
+		dev_err(adapter->pdev_dev, "device didn't become ready:"
+			" err=%d\n", err);
+		goto err_unmap_bar0;
+	}
+
+	/* For T5 and later we want to use the new BAR-based User Doorbells,
+	 * so we need to map BAR2 here ...
+	 */
+	if (!is_t4(adapter->params.chip)) {
+		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+					   pci_resource_len(pdev, 2));
+		if (!adapter->bar2) {
+			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
+			err = -ENOMEM;
+			goto err_unmap_bar0;
+		}
+	}
+	/*
+	 * Initialize adapter level features.
+	 */
+	adapter->name = pci_name(pdev);
+	adapter->msg_enable = dflt_msg_enable;
+	err = adap_init0(adapter);
+	if (err)
+		goto err_unmap_bar;
+
+	/*
+	 * Allocate our "adapter ports" and stitch everything together.
+	 */
+	pmask = adapter->params.vfres.pmask;
+	for_each_port(adapter, pidx) {
+		int port_id, viid;
+
+		/*
+		 * We simplistically allocate our virtual interfaces
+		 * sequentially across the port numbers to which we have
+		 * access rights.  This should be configurable in some manner
+		 * ...
+		 */
+		if (pmask == 0)
+			break;
+		port_id = ffs(pmask) - 1;
+		pmask &= ~(1 << port_id);
+		viid = t4vf_alloc_vi(adapter, port_id);
+		if (viid < 0) {
+			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+				" err=%d\n", port_id, viid);
+			err = viid;
+			goto err_free_dev;
+		}
+
+		/*
+		 * Allocate our network device and stitch things together.
+		 */
+		netdev = alloc_etherdev_mq(sizeof(struct port_info),
+					   MAX_PORT_QSETS);
+		if (netdev == NULL) {
+			t4vf_free_vi(adapter, viid);
+			err = -ENOMEM;
+			goto err_free_dev;
+		}
+		adapter->port[pidx] = netdev;
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+		pi = netdev_priv(netdev);
+		pi->adapter = adapter;
+		pi->pidx = pidx;
+		pi->port_id = port_id;
+		pi->viid = viid;
+
+		/*
+		 * Initialize the starting state of our "port" and register
+		 * it.
+		 */
+		pi->xact_addr_filt = -1;
+		netif_carrier_off(netdev);
+		netdev->irq = pdev->irq;
+
+		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
+		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
+			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+			NETIF_F_HIGHDMA;
+		netdev->features = netdev->hw_features |
+				   NETIF_F_HW_VLAN_CTAG_TX;
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+
+		netdev->priv_flags |= IFF_UNICAST_FLT;
+
+		netdev->netdev_ops = &cxgb4vf_netdev_ops;
+		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
+
+		/*
+		 * Initialize the hardware/software state for the port.
+		 */
+		err = t4vf_port_init(adapter, pidx);
+		if (err) {
+			dev_err(&pdev->dev, "cannot initialize port %d\n",
+				pidx);
+			goto err_free_dev;
+		}
+	}
+
+	/*
+	 * The "card" is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole "card" but rather proceed
+	 * only with the ports we manage to register successfully.  However we
+	 * must register at least one net device.
+	 */
+	for_each_port(adapter, pidx) {
+		netdev = adapter->port[pidx];
+		if (netdev == NULL)
+			continue;
+
+		err = register_netdev(netdev);
+		if (err) {
+			dev_warn(&pdev->dev, "cannot register net device %s,"
+				 " skipping\n", netdev->name);
+			continue;
+		}
+
+		set_bit(pidx, &adapter->registered_device_map);
+	}
+	if (adapter->registered_device_map == 0) {
+		dev_err(&pdev->dev, "could not register any net devices\n");
+		goto err_free_dev;
+	}
+
+	/*
+	 * Set up our debugfs entries.
+	 */
+	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
+		adapter->debugfs_root =
+			debugfs_create_dir(pci_name(pdev),
+					   cxgb4vf_debugfs_root);
+		if (IS_ERR_OR_NULL(adapter->debugfs_root))
+			dev_warn(&pdev->dev, "could not create debugfs"
+				 " directory");
+		else
+			setup_debugfs(adapter);
+	}
+
+	/*
+	 * See what interrupts we'll be using.  If we've been configured to
+	 * use MSI-X interrupts, try to enable them but fall back to using
+	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
+	 * get MSI interrupts we bail with the error.
+	 */
+	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
+		adapter->flags |= USING_MSIX;
+	else {
+		err = pci_enable_msi(pdev);
+		if (err) {
+			dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
+				" err=%d\n",
+				msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
+			goto err_free_debugfs;
+		}
+		adapter->flags |= USING_MSI;
+	}
+
+	/*
+	 * Now that we know how many "ports" we have and what their types are,
+	 * and how many Queue Sets we can support, we can configure our queue
+	 * resources.
+	 */
+	cfg_queues(adapter);
+
+	/*
+	 * Print a short notice on the existence and configuration of the new
+	 * VF network device ...
+	 */
+	for_each_port(adapter, pidx) {
+		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
+			 adapter->port[pidx]->name,
+			 (adapter->flags & USING_MSIX) ? "MSI-X" :
+			 (adapter->flags & USING_MSI)  ? "MSI" : "");
+	}
+
+	/*
+	 * Return success!
+	 */
+	return 0;
+
+	/*
+	 * Error recovery and exit code.  Unwind state that's been created
+	 * so far and return the error.
+	 */
+
+err_free_debugfs:
+	if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
+		cleanup_debugfs(adapter);
+		debugfs_remove_recursive(adapter->debugfs_root);
+	}
+
+err_free_dev:
+	for_each_port(adapter, pidx) {
+		netdev = adapter->port[pidx];
+		if (netdev == NULL)
+			continue;
+		pi = netdev_priv(netdev);
+		t4vf_free_vi(adapter, pi->viid);
+		if (test_bit(pidx, &adapter->registered_device_map))
+			unregister_netdev(netdev);
+		free_netdev(netdev);
+	}
+
+err_unmap_bar:
+	if (!is_t4(adapter->params.chip))
+		iounmap(adapter->bar2);
+
+err_unmap_bar0:
+	iounmap(adapter->regs);
+
+err_free_adapter:
+	kfree(adapter);
+
+err_release_regions:
+	pci_release_regions(pdev);
+	pci_clear_master(pdev);
+
+err_disable_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+/*
+ * "Remove" a device: tear down all kernel and driver state created in the
+ * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
+ * that this is called "remove_one" in the PF Driver.)
+ */
+static void cxgb4vf_pci_remove(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	/*
+	 * Tear down driver state associated with device.
+	 */
+	if (adapter) {
+		int pidx;
+
+		/*
+		 * Stop all of our activity.  Unregister network port,
+		 * disable interrupts, etc.
+		 */
+		for_each_port(adapter, pidx)
+			if (test_bit(pidx, &adapter->registered_device_map))
+				unregister_netdev(adapter->port[pidx]);
+		t4vf_sge_stop(adapter);
+		if (adapter->flags & USING_MSIX) {
+			pci_disable_msix(adapter->pdev);
+			adapter->flags &= ~USING_MSIX;
+		} else if (adapter->flags & USING_MSI) {
+			pci_disable_msi(adapter->pdev);
+			adapter->flags &= ~USING_MSI;
+		}
+
+		/*
+		 * Tear down our debugfs entries.
+		 */
+		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
+			cleanup_debugfs(adapter);
+			debugfs_remove_recursive(adapter->debugfs_root);
+		}
+
+		/*
+		 * Free all of the various resources which we've acquired ...
+		 */
+		t4vf_free_sge_resources(adapter);
+		for_each_port(adapter, pidx) {
+			struct net_device *netdev = adapter->port[pidx];
+			struct port_info *pi;
+
+			if (netdev == NULL)
+				continue;
+
+			pi = netdev_priv(netdev);
+			t4vf_free_vi(adapter, pi->viid);
+			free_netdev(netdev);
+		}
+		iounmap(adapter->regs);
+		if (!is_t4(adapter->params.chip))
+			iounmap(adapter->bar2);
+		kfree(adapter);
+	}
+
+	/*
+	 * Disable the device and release its PCI resources.
+	 */
+	pci_disable_device(pdev);
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+}
+
+/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+	struct adapter *adapter;
+	int pidx;
+
+	adapter = pci_get_drvdata(pdev);
+	if (!adapter)
+		return;
+
+	/* Disable all Virtual Interfaces.  This will shut down the
+	 * delivery of all ingress packets into the chip for these
+	 * Virtual Interfaces.
+	 */
+	for_each_port(adapter, pidx)
+		if (test_bit(pidx, &adapter->registered_device_map))
+			unregister_netdev(adapter->port[pidx]);
+
+	/* Free up all Queues which will prevent further DMA and
+	 * Interrupts allowing various internal pathways to drain.
+	 */
+	t4vf_sge_stop(adapter);
+	if (adapter->flags & USING_MSIX) {
+		pci_disable_msix(adapter->pdev);
+		adapter->flags &= ~USING_MSIX;
+	} else if (adapter->flags & USING_MSI) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~USING_MSI;
+	}
+
+	/*
+	 * Free up all Queues which will prevent further DMA and
+	 * Interrupts allowing various internal pathways to drain.
+	 */
+	t4vf_free_sge_resources(adapter);
+	pci_set_drvdata(pdev, NULL);
+}
+
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION	0x8
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
+
+#include "../cxgb4/t4_pci_id_tbl.h"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
+
+static struct pci_driver cxgb4vf_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= cxgb4vf_pci_tbl,
+	.probe		= cxgb4vf_pci_probe,
+	.remove		= cxgb4vf_pci_remove,
+	.shutdown	= cxgb4vf_pci_shutdown,
+};
+
+/*
+ * Initialize global driver state.
+ */
+static int __init cxgb4vf_module_init(void)
+{
+	int ret;
+
+	/*
+	 * Vet our module parameters.
+	 */
+	if (msi != MSI_MSIX && msi != MSI_MSI) {
+		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
+			msi, MSI_MSIX, MSI_MSI);
+		return -EINVAL;
+	}
+
+	/* Debugfs support is optional, just warn if this fails */
+	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+		pr_warn("could not create debugfs entry, continuing\n");
+
+	ret = pci_register_driver(&cxgb4vf_driver);
+	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+		debugfs_remove(cxgb4vf_debugfs_root);
+	return ret;
+}
+
+/*
+ * Tear down global driver state.
+ */
+static void __exit cxgb4vf_module_exit(void)
+{
+	pci_unregister_driver(&cxgb4vf_driver);
+	debugfs_remove(cxgb4vf_debugfs_root);
+}
+
+module_init(cxgb4vf_module_init);
+module_exit(cxgb4vf_module_exit);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
new file mode 100644
index 0000000..fa3786a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -0,0 +1,2700 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
+#include "../cxgb4/t4fw_api.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Constants ...
+ */
+enum {
+	/*
+	 * Egress Queue sizes, producer and consumer indices are all in units
+	 * of Egress Context Units bytes.  Note that as far as the hardware is
+	 * concerned, the free list is an Egress Queue (the host produces free
+	 * buffers which the hardware consumes) and free list entries are
+	 * 64-bit PCI DMA addresses.
+	 */
+	EQ_UNIT = SGE_EQ_IDXSIZE,
+	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+	/*
+	 * Max number of TX descriptors we clean up at a time.  Should be
+	 * modest as freeing skbs isn't cheap and it happens while holding
+	 * locks.  We just need to free packets faster than they arrive, we
+	 * eventually catch up and keep the amortized cost reasonable.
+	 */
+	MAX_TX_RECLAIM = 16,
+
+	/*
+	 * Max number of Rx buffers we replenish at a time.  Again keep this
+	 * modest, allocating buffers isn't cheap either.
+	 */
+	MAX_RX_REFILL = 16,
+
+	/*
+	 * Period of the Rx queue check timer.  This timer is infrequent as it
+	 * has something to do only when the system experiences severe memory
+	 * shortage.
+	 */
+	RX_QCHECK_PERIOD = (HZ / 2),
+
+	/*
+	 * Period of the TX queue check timer and the maximum number of TX
+	 * descriptors to be reclaimed by the TX timer.
+	 */
+	TX_QCHECK_PERIOD = (HZ / 2),
+	MAX_TIMER_TX_RECLAIM = 100,
+
+	/*
+	 * Suspend an Ethernet TX queue with fewer available descriptors than
+	 * this.  We always want to have room for a maximum sized packet:
+	 * inline immediate data + MAX_SKB_FRAGS. This is the same as
+	 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
+	 * (see that function and its helpers for a description of the
+	 * calculation).
+	 */
+	ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
+	ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
+				   ((ETHTXQ_MAX_FRAGS-1) & 1) +
+				   2),
+	ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+			  sizeof(struct cpl_tx_pkt_lso_core) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+	ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
+
+	ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
+
+	/*
+	 * Max TX descriptor space we allow for an Ethernet packet to be
+	 * inlined into a WR.  This is limited by the maximum value which
+	 * we can specify for immediate data in the firmware Ethernet TX
+	 * Work Request.
+	 */
+	MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
+
+	/*
+	 * Max size of a WR sent through a control TX queue.
+	 */
+	MAX_CTRL_WR_LEN = 256,
+
+	/*
+	 * Maximum amount of data which we'll ever need to inline into a
+	 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
+	 */
+	MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
+			  ? MAX_IMM_TX_PKT_LEN
+			  : MAX_CTRL_WR_LEN),
+
+	/*
+	 * For incoming packets less than RX_COPY_THRES, we copy the data into
+	 * an skb rather than referencing the data.  We allocate enough
+	 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
+	 * of the data (header).
+	 */
+	RX_COPY_THRES = 256,
+	RX_PULL_LEN = 128,
+
+	/*
+	 * Main body length for sk_buffs used for RX Ethernet packets with
+	 * fragments.  Should be >= RX_PULL_LEN but possibly bigger to give
+	 * pskb_may_pull() some room.
+	 */
+	RX_SKB_LEN = 512,
+};
+
+/*
+ * Software state per TX descriptor.
+ */
+struct tx_sw_desc {
+	struct sk_buff *skb;		/* socket buffer of TX data source */
+	struct ulptx_sgl *sgl;		/* scatter/gather list in TX Queue */
+};
+
+/*
+ * Software state per RX Free List descriptor.  We keep track of the allocated
+ * FL page, its size, and its PCI DMA address (if the page is mapped).  The FL
+ * page size and its PCI DMA mapped state are stored in the low bits of the
+ * PCI DMA address as per below.
+ */
+struct rx_sw_desc {
+	struct page *page;		/* Free List page buffer */
+	dma_addr_t dma_addr;		/* PCI DMA address (if mapped) */
+					/*   and flags (see below) */
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning.  Note that the
+ * SGE also uses the low 4 bits to determine the size of the buffer.  It uses
+ * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
+ * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
+ * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
+ * to the SGE.  Thus, our software state of "is the buffer mapped for DMA" is
+ * maintained in an inverse sense so the hardware never sees that bit high.
+ */
+enum {
+	RX_LARGE_BUF    = 1 << 0,	/* buffer is SGE_FL_BUFFER_SIZE[1] */
+	RX_UNMAPPED_BUF = 1 << 1,	/* buffer is not mapped */
+};
+
+/**
+ *	get_buf_addr - return DMA buffer address of software descriptor
+ *	@sdesc: pointer to the software buffer descriptor
+ *
+ *	Return the DMA buffer address of a software descriptor (stripping out
+ *	our low-order flag bits).
+ */
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
+{
+	return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+/**
+ *	is_buf_mapped - is buffer mapped for DMA?
+ *	@sdesc: pointer to the software buffer descriptor
+ *
+ *	Determine whether the buffer associated with a software descriptor in
+ *	mapped for DMA or not.
+ */
+static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
+{
+	return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ *	need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ *	Returns true if the platform needs sk_buff unmapping.  The compiler
+ *	optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+	return 1;
+#else
+	return 0;
+#endif
+}
+
+/**
+ *	txq_avail - return the number of available slots in a TX queue
+ *	@tq: the TX queue
+ *
+ *	Returns the number of available descriptors in a TX queue.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *tq)
+{
+	return tq->size - 1 - tq->in_use;
+}
+
+/**
+ *	fl_cap - return the capacity of a Free List
+ *	@fl: the Free List
+ *
+ *	Returns the capacity of a Free List.  The capacity is less than the
+ *	size because an Egress Queue Index Unit worth of descriptors needs to
+ *	be left unpopulated, otherwise the Producer and Consumer indices PIDX
+ *	and CIDX will match and the hardware will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+	return fl->size - FL_PER_EQ_UNIT;
+}
+
+/**
+ *	fl_starving - return whether a Free List is starving.
+ *	@adapter: pointer to the adapter
+ *	@fl: the Free List
+ *
+ *	Tests specified Free List to see whether the number of buffers
+ *	available to the hardware has falled below our "starvation"
+ *	threshold.
+ */
+static inline bool fl_starving(const struct adapter *adapter,
+			       const struct sge_fl *fl)
+{
+	const struct sge *s = &adapter->sge;
+
+	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
+}
+
+/**
+ *	map_skb -  map an skb for DMA to the device
+ *	@dev: the egress net device
+ *	@skb: the packet to map
+ *	@addr: a pointer to the base of the DMA mapping array
+ *
+ *	Map an skb for DMA to the device and return an array of DMA addresses.
+ */
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+		   dma_addr_t *addr)
+{
+	const skb_frag_t *fp, *end;
+	const struct skb_shared_info *si;
+
+	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, *addr))
+		goto out_err;
+
+	si = skb_shinfo(skb);
+	end = &si->frags[si->nr_frags];
+	for (fp = si->frags; fp < end; fp++) {
+		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, *addr))
+			goto unwind;
+	}
+	return 0;
+
+unwind:
+	while (fp-- > si->frags)
+		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+
+out_err:
+	return -ENOMEM;
+}
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+		      const struct ulptx_sgl *sgl, const struct sge_txq *tq)
+{
+	const struct ulptx_sge_pair *p;
+	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+	if (likely(skb_headlen(skb)))
+		dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
+				 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+	else {
+		dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
+			       be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+		nfrags--;
+	}
+
+	/*
+	 * the complexity below is because of the possibility of a wrap-around
+	 * in the middle of an SGL
+	 */
+	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+		if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
+unmap:
+			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+			p++;
+		} else if ((u8 *)p == (u8 *)tq->stat) {
+			p = (const struct ulptx_sge_pair *)tq->desc;
+			goto unmap;
+		} else if ((u8 *)p + 8 == (u8 *)tq->stat) {
+			const __be64 *addr = (const __be64 *)tq->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[1]),
+				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[2];
+		} else {
+			const __be64 *addr = (const __be64 *)tq->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[1];
+		}
+	}
+	if (nfrags) {
+		__be64 addr;
+
+		if ((u8 *)p == (u8 *)tq->stat)
+			p = (const struct ulptx_sge_pair *)tq->desc;
+		addr = ((u8 *)p + 16 <= (u8 *)tq->stat
+			? p->addr[0]
+			: *(const __be64 *)tq->desc);
+		dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
+			       DMA_TO_DEVICE);
+	}
+}
+
+/**
+ *	free_tx_desc - reclaims TX descriptors and their buffers
+ *	@adapter: the adapter
+ *	@tq: the TX queue to reclaim descriptors from
+ *	@n: the number of descriptors to reclaim
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims TX descriptors from an SGE TX queue and frees the associated
+ *	TX buffers.  Called with the TX queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
+			 unsigned int n, bool unmap)
+{
+	struct tx_sw_desc *sdesc;
+	unsigned int cidx = tq->cidx;
+	struct device *dev = adapter->pdev_dev;
+
+	const int need_unmap = need_skb_unmap() && unmap;
+
+	sdesc = &tq->sdesc[cidx];
+	while (n--) {
+		/*
+		 * If we kept a reference to the original TX skb, we need to
+		 * unmap it from PCI DMA space (if required) and free it.
+		 */
+		if (sdesc->skb) {
+			if (need_unmap)
+				unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
+			dev_consume_skb_any(sdesc->skb);
+			sdesc->skb = NULL;
+		}
+
+		sdesc++;
+		if (++cidx == tq->size) {
+			cidx = 0;
+			sdesc = tq->sdesc;
+		}
+	}
+	tq->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a TX queue.
+ */
+static inline int reclaimable(const struct sge_txq *tq)
+{
+	int hw_cidx = be16_to_cpu(tq->stat->cidx);
+	int reclaimable = hw_cidx - tq->cidx;
+	if (reclaimable < 0)
+		reclaimable += tq->size;
+	return reclaimable;
+}
+
+/**
+ *	reclaim_completed_tx - reclaims completed TX descriptors
+ *	@adapter: the adapter
+ *	@tq: the TX queue to reclaim completed descriptors from
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims TX descriptors that the SGE has indicated it has processed,
+ *	and frees the associated buffers if possible.  Called with the TX
+ *	queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adapter,
+					struct sge_txq *tq,
+					bool unmap)
+{
+	int avail = reclaimable(tq);
+
+	if (avail) {
+		/*
+		 * Limit the amount of clean up work we do at a time to keep
+		 * the TX lock hold time O(1).
+		 */
+		if (avail > MAX_TX_RECLAIM)
+			avail = MAX_TX_RECLAIM;
+
+		free_tx_desc(adapter, tq, avail, unmap);
+		tq->in_use -= avail;
+	}
+}
+
+/**
+ *	get_buf_size - return the size of an RX Free List buffer.
+ *	@adapter: pointer to the associated adapter
+ *	@sdesc: pointer to the software buffer descriptor
+ */
+static inline int get_buf_size(const struct adapter *adapter,
+			       const struct rx_sw_desc *sdesc)
+{
+	const struct sge *s = &adapter->sge;
+
+	return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+		? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
+}
+
+/**
+ *	free_rx_bufs - free RX buffers on an SGE Free List
+ *	@adapter: the adapter
+ *	@fl: the SGE Free List to free buffers from
+ *	@n: how many buffers to free
+ *
+ *	Release the next @n buffers on an SGE Free List RX queue.   The
+ *	buffers must be made inaccessible to hardware before calling this
+ *	function.
+ */
+static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
+{
+	while (n--) {
+		struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+		if (is_buf_mapped(sdesc))
+			dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+				       get_buf_size(adapter, sdesc),
+				       PCI_DMA_FROMDEVICE);
+		put_page(sdesc->page);
+		sdesc->page = NULL;
+		if (++fl->cidx == fl->size)
+			fl->cidx = 0;
+		fl->avail--;
+	}
+}
+
+/**
+ *	unmap_rx_buf - unmap the current RX buffer on an SGE Free List
+ *	@adapter: the adapter
+ *	@fl: the SGE Free List
+ *
+ *	Unmap the current buffer on an SGE Free List RX queue.   The
+ *	buffer must be made inaccessible to HW before calling this function.
+ *
+ *	This is similar to @free_rx_bufs above but does not free the buffer.
+ *	Do note that the FL still loses any further access to the buffer.
+ *	This is used predominantly to "transfer ownership" of an FL buffer
+ *	to another entity (typically an skb's fragment list).
+ */
+static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
+{
+	struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+	if (is_buf_mapped(sdesc))
+		dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+			       get_buf_size(adapter, sdesc),
+			       PCI_DMA_FROMDEVICE);
+	sdesc->page = NULL;
+	if (++fl->cidx == fl->size)
+		fl->cidx = 0;
+	fl->avail--;
+}
+
+/**
+ *	ring_fl_db - righ doorbell on free list
+ *	@adapter: the adapter
+ *	@fl: the Free List whose doorbell should be rung ...
+ *
+ *	Tell the Scatter Gather Engine that there are new free list entries
+ *	available.
+ */
+static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
+{
+	u32 val = adapter->params.arch.sge_fl_db;
+
+	/* The SGE keeps track of its Producer and Consumer Indices in terms
+	 * of Egress Queue Units so we can only tell it about integral numbers
+	 * of multiples of Free List Entries per Egress Queue Units ...
+	 */
+	if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+		if (is_t4(adapter->params.chip))
+			val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+		else
+			val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
+
+		/* Make sure all memory writes to the Free List queue are
+		 * committed before we tell the hardware about them.
+		 */
+		wmb();
+
+		/* If we don't have access to the new User Doorbell (T5+), use
+		 * the old doorbell mechanism; otherwise use the new BAR2
+		 * mechanism.
+		 */
+		if (unlikely(fl->bar2_addr == NULL)) {
+			t4_write_reg(adapter,
+				     T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+				     QID_V(fl->cntxt_id) | val);
+		} else {
+			writel(val | QID_V(fl->bar2_qid),
+			       fl->bar2_addr + SGE_UDB_KDOORBELL);
+
+			/* This Write memory Barrier will force the write to
+			 * the User Doorbell area to be flushed.
+			 */
+			wmb();
+		}
+		fl->pend_cred %= FL_PER_EQ_UNIT;
+	}
+}
+
+/**
+ *	set_rx_sw_desc - initialize software RX buffer descriptor
+ *	@sdesc: pointer to the softwore RX buffer descriptor
+ *	@page: pointer to the page data structure backing the RX buffer
+ *	@dma_addr: PCI DMA address (possibly with low-bit flags)
+ */
+static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
+				  dma_addr_t dma_addr)
+{
+	sdesc->page = page;
+	sdesc->dma_addr = dma_addr;
+}
+
+/*
+ * Support for poisoning RX buffers ...
+ */
+#define POISON_BUF_VAL -1
+
+static inline void poison_buf(struct page *page, size_t sz)
+{
+#if POISON_BUF_VAL >= 0
+	memset(page_address(page), POISON_BUF_VAL, sz);
+#endif
+}
+
+/**
+ *	refill_fl - refill an SGE RX buffer ring
+ *	@adapter: the adapter
+ *	@fl: the Free List ring to refill
+ *	@n: the number of new buffers to allocate
+ *	@gfp: the gfp flags for the allocations
+ *
+ *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ *	allocated with the supplied gfp flags.  The caller must assure that
+ *	@n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
+ *	EGRESS QUEUE UNITS_ indicates an empty Free List!  Returns the number
+ *	of buffers allocated.  If afterwards the queue is found critically low,
+ *	mark it as starving in the bitmap of starving FLs.
+ */
+static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
+			      int n, gfp_t gfp)
+{
+	struct sge *s = &adapter->sge;
+	struct page *page;
+	dma_addr_t dma_addr;
+	unsigned int cred = fl->avail;
+	__be64 *d = &fl->desc[fl->pidx];
+	struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
+
+	/*
+	 * Sanity: ensure that the result of adding n Free List buffers
+	 * won't result in wrapping the SGE's Producer Index around to
+	 * it's Consumer Index thereby indicating an empty Free List ...
+	 */
+	BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+
+	gfp |= __GFP_NOWARN;
+
+	/*
+	 * If we support large pages, prefer large buffers and fail over to
+	 * small pages if we can't allocate large pages to satisfy the refill.
+	 * If we don't support large pages, drop directly into the small page
+	 * allocation code.
+	 */
+	if (s->fl_pg_order == 0)
+		goto alloc_small_pages;
+
+	while (n) {
+		page = __dev_alloc_pages(gfp, s->fl_pg_order);
+		if (unlikely(!page)) {
+			/*
+			 * We've failed inour attempt to allocate a "large
+			 * page".  Fail over to the "small page" allocation
+			 * below.
+			 */
+			fl->large_alloc_failed++;
+			break;
+		}
+		poison_buf(page, PAGE_SIZE << s->fl_pg_order);
+
+		dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
+					PAGE_SIZE << s->fl_pg_order,
+					PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+			/*
+			 * We've run out of DMA mapping space.  Free up the
+			 * buffer and return with what we've managed to put
+			 * into the free list.  We don't want to fail over to
+			 * the small page allocation below in this case
+			 * because DMA mapping resources are typically
+			 * critical resources once they become scarse.
+			 */
+			__free_pages(page, s->fl_pg_order);
+			goto out;
+		}
+		dma_addr |= RX_LARGE_BUF;
+		*d++ = cpu_to_be64(dma_addr);
+
+		set_rx_sw_desc(sdesc, page, dma_addr);
+		sdesc++;
+
+		fl->avail++;
+		if (++fl->pidx == fl->size) {
+			fl->pidx = 0;
+			sdesc = fl->sdesc;
+			d = fl->desc;
+		}
+		n--;
+	}
+
+alloc_small_pages:
+	while (n--) {
+		page = __dev_alloc_page(gfp);
+		if (unlikely(!page)) {
+			fl->alloc_failed++;
+			break;
+		}
+		poison_buf(page, PAGE_SIZE);
+
+		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
+				       PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+			put_page(page);
+			break;
+		}
+		*d++ = cpu_to_be64(dma_addr);
+
+		set_rx_sw_desc(sdesc, page, dma_addr);
+		sdesc++;
+
+		fl->avail++;
+		if (++fl->pidx == fl->size) {
+			fl->pidx = 0;
+			sdesc = fl->sdesc;
+			d = fl->desc;
+		}
+	}
+
+out:
+	/*
+	 * Update our accounting state to incorporate the new Free List
+	 * buffers, tell the hardware about them and return the number of
+	 * buffers which we were able to allocate.
+	 */
+	cred = fl->avail - cred;
+	fl->pend_cred += cred;
+	ring_fl_db(adapter, fl);
+
+	if (unlikely(fl_starving(adapter, fl))) {
+		smp_wmb();
+		set_bit(fl->cntxt_id, adapter->sge.starving_fl);
+	}
+
+	return cred;
+}
+
+/*
+ * Refill a Free List to its capacity or the Maximum Refill Increment,
+ * whichever is smaller ...
+ */
+static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
+{
+	refill_fl(adapter, fl,
+		  min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+		  GFP_ATOMIC);
+}
+
+/**
+ *	alloc_ring - allocate resources for an SGE descriptor ring
+ *	@dev: the PCI device's core device
+ *	@nelem: the number of descriptors
+ *	@hwsize: the size of each hardware descriptor
+ *	@swsize: the size of each software descriptor
+ *	@busaddrp: the physical PCI bus address of the allocated ring
+ *	@swringp: return address pointer for software ring
+ *	@stat_size: extra space in hardware ring for status information
+ *
+ *	Allocates resources for an SGE descriptor ring, such as TX queues,
+ *	free buffer lists, response queues, etc.  Each SGE ring requires
+ *	space for its hardware descriptors plus, optionally, space for software
+ *	state associated with each hardware entry (the metadata).  The function
+ *	returns three values: the virtual address for the hardware ring (the
+ *	return value of the function), the PCI bus address of the hardware
+ *	ring (in *busaddrp), and the address of the software ring (in swringp).
+ *	Both the hardware and software rings are returned zeroed out.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
+			size_t swsize, dma_addr_t *busaddrp, void *swringp,
+			size_t stat_size)
+{
+	/*
+	 * Allocate the hardware ring and PCI DMA bus address space for said.
+	 */
+	size_t hwlen = nelem * hwsize + stat_size;
+	void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+
+	if (!hwring)
+		return NULL;
+
+	/*
+	 * If the caller wants a software ring, allocate it and return a
+	 * pointer to it in *swringp.
+	 */
+	BUG_ON((swsize != 0) != (swringp != NULL));
+	if (swsize) {
+		void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
+
+		if (!swring) {
+			dma_free_coherent(dev, hwlen, hwring, *busaddrp);
+			return NULL;
+		}
+		*(void **)swringp = swring;
+	}
+
+	/*
+	 * Zero out the hardware ring and return its address as our function
+	 * value.
+	 */
+	memset(hwring, 0, hwlen);
+	return hwring;
+}
+
+/**
+ *	sgl_len - calculates the size of an SGL of the given capacity
+ *	@n: the number of SGL entries
+ *
+ *	Calculates the number of flits (8-byte units) needed for a Direct
+ *	Scatter/Gather List that can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	/*
+	 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
+	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+	 * repeated sequences of { Length[i], Length[i+1], Address[i],
+	 * Address[i+1] } (this ensures that all addresses are on 64-bit
+	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
+	 * Address[N+1] is omitted.
+	 *
+	 * The following calculation incorporates all of the above.  It's
+	 * somewhat hard to follow but, briefly: the "+2" accounts for the
+	 * first two flits which include the DSGL header, Length0 and
+	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
+	 * (n-1) is odd ...
+	 */
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ *	flits_to_desc - returns the num of TX descriptors for the given flits
+ *	@flits: the number of flits
+ *
+ *	Returns the number of TX descriptors needed for the supplied number
+ *	of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int flits)
+{
+	BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
+	return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
+}
+
+/**
+ *	is_eth_imm - can an Ethernet packet be sent as immediate data?
+ *	@skb: the packet
+ *
+ *	Returns whether an Ethernet packet is small enough to fit completely as
+ *	immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+	/*
+	 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+	 * which does not accommodate immediate data.  We could dike out all
+	 * of the support code for immediate data but that would tie our hands
+	 * too much if we ever want to enhace the firmware.  It would also
+	 * create more differences between the PF and VF Drivers.
+	 */
+	return false;
+}
+
+/**
+ *	calc_tx_flits - calculate the number of flits for a packet TX WR
+ *	@skb: the packet
+ *
+ *	Returns the number of flits needed for a TX Work Request for the
+ *	given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+	unsigned int flits;
+
+	/*
+	 * If the skb is small enough, we can pump it out as a work request
+	 * with only immediate data.  In that case we just have to have the
+	 * TX Packet header plus the skb data in the Work Request.
+	 */
+	if (is_eth_imm(skb))
+		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+				    sizeof(__be64));
+
+	/*
+	 * Otherwise, we're going to have to construct a Scatter gather list
+	 * of the skb body and fragments.  We also include the flits necessary
+	 * for the TX Packet Work Request and CPL.  We always have a firmware
+	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+	 * message or, if we're doing a Large Send Offload, an LSO CPL message
+	 * with an embedded TX Packet Write CPL message.
+	 */
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+	if (skb_shinfo(skb)->gso_size)
+		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+			  sizeof(struct cpl_tx_pkt_lso_core) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	else
+		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	return flits;
+}
+
+/**
+ *	write_sgl - populate a Scatter/Gather List for a packet
+ *	@skb: the packet
+ *	@tq: the TX queue we are writing into
+ *	@sgl: starting location for writing the SGL
+ *	@end: points right after the end of the SGL
+ *	@start: start offset into skb main-body data to include in the SGL
+ *	@addr: the list of DMA bus addresses for the SGL elements
+ *
+ *	Generates a Scatter/Gather List for the buffers that make up a packet.
+ *	The caller must provide adequate space for the SGL that will be written.
+ *	The SGL includes all of the packet's page fragments and the data in its
+ *	main body except for the first @start bytes.  @pos must be 16-byte
+ *	aligned and within a TX descriptor with available space.  @end points
+ *	write after the end of the SGL but does not account for any potential
+ *	wrap around, i.e., @end > @tq->stat.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
+		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+		      const dma_addr_t *addr)
+{
+	unsigned int i, len;
+	struct ulptx_sge_pair *to;
+	const struct skb_shared_info *si = skb_shinfo(skb);
+	unsigned int nfrags = si->nr_frags;
+	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+	len = skb_headlen(skb) - start;
+	if (likely(len)) {
+		sgl->len0 = htonl(len);
+		sgl->addr0 = cpu_to_be64(addr[0] + start);
+		nfrags++;
+	} else {
+		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
+		sgl->addr0 = cpu_to_be64(addr[1]);
+	}
+
+	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+			      ULPTX_NSGE_V(nfrags));
+	if (likely(--nfrags == 0))
+		return;
+	/*
+	 * Most of the complexity below deals with the possibility we hit the
+	 * end of the queue in the middle of writing the SGL.  For this case
+	 * only we create the SGL in a temporary buffer and then copy it.
+	 */
+	to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
+
+	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
+		to->addr[0] = cpu_to_be64(addr[i]);
+		to->addr[1] = cpu_to_be64(addr[++i]);
+	}
+	if (nfrags) {
+		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+		to->len[1] = cpu_to_be32(0);
+		to->addr[0] = cpu_to_be64(addr[i + 1]);
+	}
+	if (unlikely((u8 *)end > (u8 *)tq->stat)) {
+		unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
+
+		if (likely(part0))
+			memcpy(sgl->sge, buf, part0);
+		part1 = (u8 *)end - (u8 *)tq->stat;
+		memcpy(tq->desc, (u8 *)buf + part0, part1);
+		end = (void *)tq->desc + part1;
+	}
+	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
+		*end = 0;
+}
+
+/**
+ *	check_ring_tx_db - check and potentially ring a TX queue's doorbell
+ *	@adapter: the adapter
+ *	@tq: the TX queue
+ *	@n: number of new descriptors to give to HW
+ *
+ *	Ring the doorbel for a TX queue.
+ */
+static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
+			      int n)
+{
+	/* Make sure that all writes to the TX Descriptors are committed
+	 * before we tell the hardware about them.
+	 */
+	wmb();
+
+	/* If we don't have access to the new User Doorbell (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(tq->bar2_addr == NULL)) {
+		u32 val = PIDX_V(n);
+
+		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+			     QID_V(tq->cntxt_id) | val);
+	} else {
+		u32 val = PIDX_T5_V(n);
+
+		/* T4 and later chips share the same PIDX field offset within
+		 * the doorbell, but T5 and later shrank the field in order to
+		 * gain a bit for Doorbell Priority.  The field was absurdly
+		 * large in the first place (14 bits) so we just use the T5
+		 * and later limits and warn if a Queue ID is too large.
+		 */
+		WARN_ON(val & DBPRIO_F);
+
+		/* If we're only writing a single Egress Unit and the BAR2
+		 * Queue ID is 0, we can use the Write Combining Doorbell
+		 * Gather Buffer; otherwise we use the simple doorbell.
+		 */
+		if (n == 1 && tq->bar2_qid == 0) {
+			unsigned int index = (tq->pidx
+					      ? (tq->pidx - 1)
+					      : (tq->size - 1));
+			__be64 *src = (__be64 *)&tq->desc[index];
+			__be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
+							 SGE_UDB_WCDOORBELL);
+			unsigned int count = EQ_UNIT / sizeof(__be64);
+
+			/* Copy the TX Descriptor in a tight loop in order to
+			 * try to get it to the adapter in a single Write
+			 * Combined transfer on the PCI-E Bus.  If the Write
+			 * Combine fails (say because of an interrupt, etc.)
+			 * the hardware will simply take the last write as a
+			 * simple doorbell write with a PIDX Increment of 1
+			 * and will fetch the TX Descriptor from memory via
+			 * DMA.
+			 */
+			while (count) {
+				/* the (__force u64) is because the compiler
+				 * doesn't understand the endian swizzling
+				 * going on
+				 */
+				writeq((__force u64)*src, dst);
+				src++;
+				dst++;
+				count--;
+			}
+		} else
+			writel(val | QID_V(tq->bar2_qid),
+			       tq->bar2_addr + SGE_UDB_KDOORBELL);
+
+		/* This Write Memory Barrier will force the write to the User
+		 * Doorbell area to be flushed.  This is needed to prevent
+		 * writes on different CPUs for the same queue from hitting
+		 * the adapter out of order.  This is required when some Work
+		 * Requests take the Write Combine Gather Buffer path (user
+		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+		 * take the traditional path where we simply increment the
+		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+		 * hardware DMA read the actual Work Request.
+		 */
+		wmb();
+	}
+}
+
+/**
+ *	inline_tx_skb - inline a packet's data into TX descriptors
+ *	@skb: the packet
+ *	@tq: the TX queue where the packet will be inlined
+ *	@pos: starting position in the TX queue to inline the packet
+ *
+ *	Inline a packet's contents directly into TX descriptors, starting at
+ *	the given position within the TX DMA ring.
+ *	Most of the complexity of this operation is dealing with wrap arounds
+ *	in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
+			  void *pos)
+{
+	u64 *p;
+	int left = (void *)tq->stat - pos;
+
+	if (likely(skb->len <= left)) {
+		if (likely(!skb->data_len))
+			skb_copy_from_linear_data(skb, pos, skb->len);
+		else
+			skb_copy_bits(skb, 0, pos, skb->len);
+		pos += skb->len;
+	} else {
+		skb_copy_bits(skb, 0, pos, left);
+		skb_copy_bits(skb, left, tq->desc, skb->len - left);
+		pos = (void *)tq->desc + (skb->len - left);
+	}
+
+	/* 0-pad to multiple of 16 */
+	p = PTR_ALIGN(pos, 8);
+	if ((uintptr_t)p & 8)
+		*p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
+{
+	int csum_type;
+	const struct iphdr *iph = ip_hdr(skb);
+
+	if (iph->version == 4) {
+		if (iph->protocol == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP;
+		else if (iph->protocol == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP;
+		else {
+nocsum:
+			/*
+			 * unknown protocol, disable HW csum
+			 * and hope a bad packet is detected
+			 */
+			return TXPKT_L4CSUM_DIS_F;
+		}
+	} else {
+		/*
+		 * this doesn't work with extension headers
+		 */
+		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+		if (ip6h->nexthdr == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP6;
+		else if (ip6h->nexthdr == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP6;
+		else
+			goto nocsum;
+	}
+
+	if (likely(csum_type >= TX_CSUM_TCPIP)) {
+		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+		if (chip <= CHELSIO_T5)
+			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		else
+			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+	} else {
+		int start = skb_transport_offset(skb);
+
+		return TXPKT_CSUM_TYPE_V(csum_type) |
+			TXPKT_CSUM_START_V(start) |
+			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
+	}
+}
+
+/*
+ * Stop an Ethernet TX queue and record that state change.
+ */
+static void txq_stop(struct sge_eth_txq *txq)
+{
+	netif_tx_stop_queue(txq->txq);
+	txq->q.stops++;
+}
+
+/*
+ * Advance our software state for a TX queue by adding n in use descriptors.
+ */
+static inline void txq_advance(struct sge_txq *tq, unsigned int n)
+{
+	tq->in_use += n;
+	tq->pidx += n;
+	if (tq->pidx >= tq->size)
+		tq->pidx -= tq->size;
+}
+
+/**
+ *	t4vf_eth_xmit - add a packet to an Ethernet TX queue
+ *	@skb: the packet
+ *	@dev: the egress net device
+ *
+ *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
+ */
+int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	u32 wr_mid;
+	u64 cntrl, *end;
+	int qidx, credits, max_pkt_len;
+	unsigned int flits, ndesc;
+	struct adapter *adapter;
+	struct sge_eth_txq *txq;
+	const struct port_info *pi;
+	struct fw_eth_tx_pkt_vm_wr *wr;
+	struct cpl_tx_pkt_core *cpl;
+	const struct skb_shared_info *ssi;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+	const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+					sizeof(wr->ethmacsrc) +
+					sizeof(wr->ethtype) +
+					sizeof(wr->vlantci));
+
+	/*
+	 * The chip minimum packet length is 10 octets but the firmware
+	 * command that we are using requires that we copy the Ethernet header
+	 * (including the VLAN tag) into the header so we reject anything
+	 * smaller than that ...
+	 */
+	if (unlikely(skb->len < fw_hdr_copy_len))
+		goto out_free;
+
+	/* Discard the packet if the length is greater than mtu */
+	max_pkt_len = ETH_HLEN + dev->mtu;
+	if (skb_vlan_tag_present(skb))
+		max_pkt_len += VLAN_HLEN;
+	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+		goto out_free;
+
+	/*
+	 * Figure out which TX Queue we're going to use.
+	 */
+	pi = netdev_priv(dev);
+	adapter = pi->adapter;
+	qidx = skb_get_queue_mapping(skb);
+	BUG_ON(qidx >= pi->nqsets);
+	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+	/*
+	 * Take this opportunity to reclaim any TX Descriptors whose DMA
+	 * transfers have completed.
+	 */
+	reclaim_completed_tx(adapter, &txq->q, true);
+
+	/*
+	 * Calculate the number of flits and TX Descriptors we're going to
+	 * need along with how many TX Descriptors will be left over after
+	 * we inject our Work Request.
+	 */
+	flits = calc_tx_flits(skb);
+	ndesc = flits_to_desc(flits);
+	credits = txq_avail(&txq->q) - ndesc;
+
+	if (unlikely(credits < 0)) {
+		/*
+		 * Not enough room for this packet's Work Request.  Stop the
+		 * TX Queue and return a "busy" condition.  The queue will get
+		 * started later on when the firmware informs us that space
+		 * has opened up.
+		 */
+		txq_stop(txq);
+		dev_err(adapter->pdev_dev,
+			"%s: TX ring %u full while queue awake!\n",
+			dev->name, qidx);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (!is_eth_imm(skb) &&
+	    unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+		/*
+		 * We need to map the skb into PCI DMA space (because it can't
+		 * be in-lined directly into the Work Request) and the mapping
+		 * operation failed.  Record the error and drop the packet.
+		 */
+		txq->mapping_err++;
+		goto out_free;
+	}
+
+	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+		/*
+		 * After we're done injecting the Work Request for this
+		 * packet, we'll be below our "stop threshold" so stop the TX
+		 * Queue now and schedule a request for an SGE Egress Queue
+		 * Update message.  The queue will get started later on when
+		 * the firmware processes this Work Request and sends us an
+		 * Egress Queue Status Update message indicating that space
+		 * has opened up.
+		 */
+		txq_stop(txq);
+		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+	}
+
+	/*
+	 * Start filling in our Work Request.  Note that we do _not_ handle
+	 * the WR Header wrapping around the TX Descriptor Ring.  If our
+	 * maximum header size ever exceeds one TX Descriptor, we'll need to
+	 * do something else here.
+	 */
+	BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+	wr = (void *)&txq->q.desc[txq->q.pidx];
+	wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+	wr->r3[0] = cpu_to_be32(0);
+	wr->r3[1] = cpu_to_be32(0);
+	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+	end = (u64 *)wr + flits;
+
+	/*
+	 * If this is a Large Send Offload packet we'll put in an LSO CPL
+	 * message with an encapsulated TX Packet CPL message.  Otherwise we
+	 * just use a TX Packet CPL message.
+	 */
+	ssi = skb_shinfo(skb);
+	if (ssi->gso_size) {
+		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+		int l3hdr_len = skb_network_header_len(skb);
+		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+		wr->op_immdlen =
+			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+				    FW_WR_IMMDLEN_V(sizeof(*lso) +
+						    sizeof(*cpl)));
+		/*
+		 * Fill in the LSO CPL message.
+		 */
+		lso->lso_ctrl =
+			cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+				    LSO_FIRST_SLICE_F |
+				    LSO_LAST_SLICE_F |
+				    LSO_IPV6_V(v6) |
+				    LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+				    LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+		lso->ipid_ofst = cpu_to_be16(0);
+		lso->mss = cpu_to_be16(ssi->gso_size);
+		lso->seqno_offset = cpu_to_be32(0);
+		if (is_t4(adapter->params.chip))
+			lso->len = cpu_to_be32(skb->len);
+		else
+			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
+
+		/*
+		 * Set up TX Packet CPL pointer, control word and perform
+		 * accounting.
+		 */
+		cpl = (void *)(lso + 1);
+
+		if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+			cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+		else
+			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+			 TXPKT_IPHDR_LEN_V(l3hdr_len);
+		txq->tso++;
+		txq->tx_cso += ssi->gso_segs;
+	} else {
+		int len;
+
+		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+		wr->op_immdlen =
+			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+				    FW_WR_IMMDLEN_V(len));
+
+		/*
+		 * Set up TX Packet CPL pointer, control word and perform
+		 * accounting.
+		 */
+		cpl = (void *)(wr + 1);
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			cntrl = hwcsum(adapter->params.chip, skb) |
+				TXPKT_IPCSUM_DIS_F;
+			txq->tx_cso++;
+		} else
+			cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+	}
+
+	/*
+	 * If there's a VLAN tag present, add that to the list of things to
+	 * do in this Work Request.
+	 */
+	if (skb_vlan_tag_present(skb)) {
+		txq->vlan_ins++;
+		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+	}
+
+	/*
+	 * Fill in the TX Packet CPL message header.
+	 */
+	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+				 TXPKT_INTF_V(pi->port_id) |
+				 TXPKT_PF_V(0));
+	cpl->pack = cpu_to_be16(0);
+	cpl->len = cpu_to_be16(skb->len);
+	cpl->ctrl1 = cpu_to_be64(cntrl);
+
+#ifdef T4_TRACE
+	T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
+		  "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
+		  ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
+#endif
+
+	/*
+	 * Fill in the body of the TX Packet CPL message with either in-lined
+	 * data or a Scatter/Gather List.
+	 */
+	if (is_eth_imm(skb)) {
+		/*
+		 * In-line the packet's data and free the skb since we don't
+		 * need it any longer.
+		 */
+		inline_tx_skb(skb, &txq->q, cpl + 1);
+		dev_consume_skb_any(skb);
+	} else {
+		/*
+		 * Write the skb's Scatter/Gather list into the TX Packet CPL
+		 * message and retain a pointer to the skb so we can free it
+		 * later when its DMA completes.  (We store the skb pointer
+		 * in the Software Descriptor corresponding to the last TX
+		 * Descriptor used by the Work Request.)
+		 *
+		 * The retained skb will be freed when the corresponding TX
+		 * Descriptors are reclaimed after their DMAs complete.
+		 * However, this could take quite a while since, in general,
+		 * the hardware is set up to be lazy about sending DMA
+		 * completion notifications to us and we mostly perform TX
+		 * reclaims in the transmit routine.
+		 *
+		 * This is good for performamce but means that we rely on new
+		 * TX packets arriving to run the destructors of completed
+		 * packets, which open up space in their sockets' send queues.
+		 * Sometimes we do not get such new packets causing TX to
+		 * stall.  A single UDP transmitter is a good example of this
+		 * situation.  We have a clean up timer that periodically
+		 * reclaims completed packets but it doesn't run often enough
+		 * (nor do we want it to) to prevent lengthy stalls.  A
+		 * solution to this problem is to run the destructor early,
+		 * after the packet is queued but before it's DMAd.  A con is
+		 * that we lie to socket memory accounting, but the amount of
+		 * extra memory is reasonable (limited by the number of TX
+		 * descriptors), the packets do actually get freed quickly by
+		 * new packets almost always, and for protocols like TCP that
+		 * wait for acks to really free up the data the extra memory
+		 * is even less.  On the positive side we run the destructors
+		 * on the sending CPU rather than on a potentially different
+		 * completing CPU, usually a good thing.
+		 *
+		 * Run the destructor before telling the DMA engine about the
+		 * packet to make sure it doesn't complete and get freed
+		 * prematurely.
+		 */
+		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+		struct sge_txq *tq = &txq->q;
+		int last_desc;
+
+		/*
+		 * If the Work Request header was an exact multiple of our TX
+		 * Descriptor length, then it's possible that the starting SGL
+		 * pointer lines up exactly with the end of our TX Descriptor
+		 * ring.  If that's the case, wrap around to the beginning
+		 * here ...
+		 */
+		if (unlikely((void *)sgl == (void *)tq->stat)) {
+			sgl = (void *)tq->desc;
+			end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
+		}
+
+		write_sgl(skb, tq, sgl, end, 0, addr);
+		skb_orphan(skb);
+
+		last_desc = tq->pidx + ndesc - 1;
+		if (last_desc >= tq->size)
+			last_desc -= tq->size;
+		tq->sdesc[last_desc].skb = skb;
+		tq->sdesc[last_desc].sgl = sgl;
+	}
+
+	/*
+	 * Advance our internal TX Queue state, tell the hardware about
+	 * the new TX descriptors and return success.
+	 */
+	txq_advance(&txq->q, ndesc);
+	dev->trans_start = jiffies;
+	ring_tx_db(adapter, &txq->q, ndesc);
+	return NETDEV_TX_OK;
+
+out_free:
+	/*
+	 * An error of some sort happened.  Free the TX skb and tell the
+	 * OS that we've "dealt" with the packet ...
+	 */
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/**
+ *	copy_frags - copy fragments from gather list into skb_shared_info
+ *	@skb: destination skb
+ *	@gl: source internal packet gather list
+ *	@offset: packet start offset in first page
+ *
+ *	Copy an internal packet gather list into a Linux skb_shared_info
+ *	structure.
+ */
+static inline void copy_frags(struct sk_buff *skb,
+			      const struct pkt_gl *gl,
+			      unsigned int offset)
+{
+	int i;
+
+	/* usually there's just one frag */
+	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
+			     gl->frags[0].offset + offset,
+			     gl->frags[0].size - offset);
+	skb_shinfo(skb)->nr_frags = gl->nfrags;
+	for (i = 1; i < gl->nfrags; i++)
+		__skb_fill_page_desc(skb, i, gl->frags[i].page,
+				     gl->frags[i].offset,
+				     gl->frags[i].size);
+
+	/* get a reference to the last page, we don't own it */
+	get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+/**
+ *	t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ *	@gl: the gather list
+ *	@skb_len: size of sk_buff main body if it carries fragments
+ *	@pull_len: amount of data to move to the sk_buff's main body
+ *
+ *	Builds an sk_buff from the given packet gather list.  Returns the
+ *	sk_buff or %NULL if sk_buff allocation failed.
+ */
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+					 unsigned int skb_len,
+					 unsigned int pull_len)
+{
+	struct sk_buff *skb;
+
+	/*
+	 * If the ingress packet is small enough, allocate an skb large enough
+	 * for all of the data and copy it inline.  Otherwise, allocate an skb
+	 * with enough room to pull in the header and reference the rest of
+	 * the data via the skb fragment list.
+	 *
+	 * Below we rely on RX_COPY_THRES being less than the smallest Rx
+	 * buff!  size, which is expected since buffers are at least
+	 * PAGE_SIZEd.  In this case packets up to RX_COPY_THRES have only one
+	 * fragment.
+	 */
+	if (gl->tot_len <= RX_COPY_THRES) {
+		/* small packets have only one fragment */
+		skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, gl->tot_len);
+		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+	} else {
+		skb = alloc_skb(skb_len, GFP_ATOMIC);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, pull_len);
+		skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+		copy_frags(skb, gl, pull_len);
+		skb->len = gl->tot_len;
+		skb->data_len = skb->len - pull_len;
+		skb->truesize += skb->data_len;
+	}
+
+out:
+	return skb;
+}
+
+/**
+ *	t4vf_pktgl_free - free a packet gather list
+ *	@gl: the gather list
+ *
+ *	Releases the pages of a packet gather list.  We do not own the last
+ *	page on the list and do not free it.
+ */
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
+{
+	int frag;
+
+	frag = gl->nfrags - 1;
+	while (frag--)
+		put_page(gl->frags[frag].page);
+}
+
+/**
+ *	do_gro - perform Generic Receive Offload ingress packet processing
+ *	@rxq: ingress RX Ethernet Queue
+ *	@gl: gather list for ingress packet
+ *	@pkt: CPL header for last packet fragment
+ *
+ *	Perform Generic Receive Offload (GRO) ingress packet processing.
+ *	We use the standard Linux GRO interfaces for this.
+ */
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+		   const struct cpl_rx_pkt *pkt)
+{
+	struct adapter *adapter = rxq->rspq.adapter;
+	struct sge *s = &adapter->sge;
+	int ret;
+	struct sk_buff *skb;
+
+	skb = napi_get_frags(&rxq->rspq.napi);
+	if (unlikely(!skb)) {
+		t4vf_pktgl_free(gl);
+		rxq->stats.rx_drops++;
+		return;
+	}
+
+	copy_frags(skb, gl, s->pktshift);
+	skb->len = gl->tot_len - s->pktshift;
+	skb->data_len = skb->len;
+	skb->truesize += skb->data_len;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_record_rx_queue(skb, rxq->rspq.idx);
+
+	if (pkt->vlan_ex) {
+		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
+					be16_to_cpu(pkt->vlan));
+		rxq->stats.vlan_ex++;
+	}
+	ret = napi_gro_frags(&rxq->rspq.napi);
+
+	if (ret == GRO_HELD)
+		rxq->stats.lro_pkts++;
+	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+		rxq->stats.lro_merged++;
+	rxq->stats.pkts++;
+	rxq->stats.rx_cso++;
+}
+
+/**
+ *	t4vf_ethrx_handler - process an ingress ethernet packet
+ *	@rspq: the response queue that received the packet
+ *	@rsp: the response queue descriptor holding the RX_PKT message
+ *	@gl: the gather list of packet fragments
+ *
+ *	Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
+		       const struct pkt_gl *gl)
+{
+	struct sk_buff *skb;
+	const struct cpl_rx_pkt *pkt = (void *)rsp;
+	bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+		       (rspq->netdev->features & NETIF_F_RXCSUM);
+	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+	struct adapter *adapter = rspq->adapter;
+	struct sge *s = &adapter->sge;
+
+	/*
+	 * If this is a good TCP packet and we have Generic Receive Offload
+	 * enabled, handle the packet in the GRO path.
+	 */
+	if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
+	    (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
+	    !pkt->ip_frag) {
+		do_gro(rxq, gl, pkt);
+		return 0;
+	}
+
+	/*
+	 * Convert the Packet Gather List into an skb.
+	 */
+	skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+	if (unlikely(!skb)) {
+		t4vf_pktgl_free(gl);
+		rxq->stats.rx_drops++;
+		return 0;
+	}
+	__skb_pull(skb, s->pktshift);
+	skb->protocol = eth_type_trans(skb, rspq->netdev);
+	skb_record_rx_queue(skb, rspq->idx);
+	rxq->stats.pkts++;
+
+	if (csum_ok && !pkt->err_vec &&
+	    (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
+		if (!pkt->ip_frag)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else {
+			__sum16 c = (__force __sum16)pkt->csum;
+			skb->csum = csum_unfold(c);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+		}
+		rxq->stats.rx_cso++;
+	} else
+		skb_checksum_none_assert(skb);
+
+	if (pkt->vlan_ex) {
+		rxq->stats.vlan_ex++;
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
+	}
+
+	netif_receive_skb(skb);
+
+	return 0;
+}
+
+/**
+ *	is_new_response - check if a response is newly written
+ *	@rc: the response control descriptor
+ *	@rspq: the response queue
+ *
+ *	Returns true if a response descriptor contains a yet unprocessed
+ *	response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *rc,
+				   const struct sge_rspq *rspq)
+{
+	return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
+}
+
+/**
+ *	restore_rx_bufs - put back a packet's RX buffers
+ *	@gl: the packet gather list
+ *	@fl: the SGE Free List
+ *	@nfrags: how many fragments in @si
+ *
+ *	Called when we find out that the current packet, @si, can't be
+ *	processed right away for some reason.  This is a very rare event and
+ *	there's no effort to make this suspension/resumption process
+ *	particularly efficient.
+ *
+ *	We implement the suspension by putting all of the RX buffers associated
+ *	with the current packet back on the original Free List.  The buffers
+ *	have already been unmapped and are left unmapped, we mark them as
+ *	unmapped in order to prevent further unmapping attempts.  (Effectively
+ *	this function undoes the series of @unmap_rx_buf calls which were done
+ *	to create the current packet's gather list.)  This leaves us ready to
+ *	restart processing of the packet the next time we start processing the
+ *	RX Queue ...
+ */
+static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
+			    int frags)
+{
+	struct rx_sw_desc *sdesc;
+
+	while (frags--) {
+		if (fl->cidx == 0)
+			fl->cidx = fl->size - 1;
+		else
+			fl->cidx--;
+		sdesc = &fl->sdesc[fl->cidx];
+		sdesc->page = gl->frags[frags].page;
+		sdesc->dma_addr |= RX_UNMAPPED_BUF;
+		fl->avail++;
+	}
+}
+
+/**
+ *	rspq_next - advance to the next entry in a response queue
+ *	@rspq: the queue
+ *
+ *	Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *rspq)
+{
+	rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
+	if (unlikely(++rspq->cidx == rspq->size)) {
+		rspq->cidx = 0;
+		rspq->gen ^= 1;
+		rspq->cur_desc = rspq->desc;
+	}
+}
+
+/**
+ *	process_responses - process responses from an SGE response queue
+ *	@rspq: the ingress response queue to process
+ *	@budget: how many responses can be processed in this round
+ *
+ *	Process responses from a Scatter Gather Engine response queue up to
+ *	the supplied budget.  Responses include received packets as well as
+ *	control messages from firmware or hardware.
+ *
+ *	Additionally choose the interrupt holdoff time for the next interrupt
+ *	on this queue.  If the system is under memory shortage use a fairly
+ *	long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *rspq, int budget)
+{
+	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+	struct adapter *adapter = rspq->adapter;
+	struct sge *s = &adapter->sge;
+	int budget_left = budget;
+
+	while (likely(budget_left)) {
+		int ret, rsp_type;
+		const struct rsp_ctrl *rc;
+
+		rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, rspq))
+			break;
+
+		/*
+		 * Figure out what kind of response we've received from the
+		 * SGE.
+		 */
+		dma_rmb();
+		rsp_type = RSPD_TYPE_G(rc->type_gen);
+		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
+			struct page_frag *fp;
+			struct pkt_gl gl;
+			const struct rx_sw_desc *sdesc;
+			u32 bufsz, frag;
+			u32 len = be32_to_cpu(rc->pldbuflen_qid);
+
+			/*
+			 * If we get a "new buffer" message from the SGE we
+			 * need to move on to the next Free List buffer.
+			 */
+			if (len & RSPD_NEWBUF_F) {
+				/*
+				 * We get one "new buffer" message when we
+				 * first start up a queue so we need to ignore
+				 * it when our offset into the buffer is 0.
+				 */
+				if (likely(rspq->offset > 0)) {
+					free_rx_bufs(rspq->adapter, &rxq->fl,
+						     1);
+					rspq->offset = 0;
+				}
+				len = RSPD_LEN_G(len);
+			}
+			gl.tot_len = len;
+
+			/*
+			 * Gather packet fragments.
+			 */
+			for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
+				BUG_ON(frag >= MAX_SKB_FRAGS);
+				BUG_ON(rxq->fl.avail == 0);
+				sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
+				bufsz = get_buf_size(adapter, sdesc);
+				fp->page = sdesc->page;
+				fp->offset = rspq->offset;
+				fp->size = min(bufsz, len);
+				len -= fp->size;
+				if (!len)
+					break;
+				unmap_rx_buf(rspq->adapter, &rxq->fl);
+			}
+			gl.nfrags = frag+1;
+
+			/*
+			 * Last buffer remains mapped so explicitly make it
+			 * coherent for CPU access and start preloading first
+			 * cache line ...
+			 */
+			dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
+						get_buf_addr(sdesc),
+						fp->size, DMA_FROM_DEVICE);
+			gl.va = (page_address(gl.frags[0].page) +
+				 gl.frags[0].offset);
+			prefetch(gl.va);
+
+			/*
+			 * Hand the new ingress packet to the handler for
+			 * this Response Queue.
+			 */
+			ret = rspq->handler(rspq, rspq->cur_desc, &gl);
+			if (likely(ret == 0))
+				rspq->offset += ALIGN(fp->size, s->fl_align);
+			else
+				restore_rx_bufs(&gl, &rxq->fl, frag);
+		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
+			ret = rspq->handler(rspq, rspq->cur_desc, NULL);
+		} else {
+			WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
+			ret = 0;
+		}
+
+		if (unlikely(ret)) {
+			/*
+			 * Couldn't process descriptor, back off for recovery.
+			 * We use the SGE's last timer which has the longest
+			 * interrupt coalescing value ...
+			 */
+			const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
+			rspq->next_intr_params =
+				QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
+			break;
+		}
+
+		rspq_next(rspq);
+		budget_left--;
+	}
+
+	/*
+	 * If this is a Response Queue with an associated Free List and
+	 * at least two Egress Queue units available in the Free List
+	 * for new buffer pointers, refill the Free List.
+	 */
+	if (rspq->offset >= 0 &&
+	    rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+		__refill_fl(rspq->adapter, &rxq->fl);
+	return budget - budget_left;
+}
+
+/**
+ *	napi_rx_handler - the NAPI handler for RX processing
+ *	@napi: the napi instance
+ *	@budget: how many packets we can process in this round
+ *
+ *	Handler for new data events when using NAPI.  This does not need any
+ *	locking or protection from interrupts as data interrupts are off at
+ *	this point and other adapter interrupts do not interfere (the latter
+ *	in not a concern at all with MSI-X as non-data interrupts then have
+ *	a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+	unsigned int intr_params;
+	struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
+	int work_done = process_responses(rspq, budget);
+	u32 val;
+
+	if (likely(work_done < budget)) {
+		napi_complete(napi);
+		intr_params = rspq->next_intr_params;
+		rspq->next_intr_params = rspq->intr_params;
+	} else
+		intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
+
+	if (unlikely(work_done == 0))
+		rspq->unhandled_irqs++;
+
+	val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!rspq->bar2_addr)) {
+		t4_write_reg(rspq->adapter,
+			     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+			     val | INGRESSQID_V((u32)rspq->cntxt_id));
+	} else {
+		writel(val | INGRESSQID_V(rspq->bar2_qid),
+		       rspq->bar2_addr + SGE_UDB_GTS);
+		wmb();
+	}
+	return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
+{
+	struct sge_rspq *rspq = cookie;
+
+	napi_schedule(&rspq->napi);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	struct sge_rspq *intrq = &s->intrq;
+	unsigned int work_done;
+	u32 val;
+
+	spin_lock(&adapter->sge.intrq_lock);
+	for (work_done = 0; ; work_done++) {
+		const struct rsp_ctrl *rc;
+		unsigned int qid, iq_idx;
+		struct sge_rspq *rspq;
+
+		/*
+		 * Grab the next response from the interrupt queue and bail
+		 * out if it's not a new response.
+		 */
+		rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, intrq))
+			break;
+
+		/*
+		 * If the response isn't a forwarded interrupt message issue a
+		 * error and go on to the next response message.  This should
+		 * never happen ...
+		 */
+		dma_rmb();
+		if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
+			dev_err(adapter->pdev_dev,
+				"Unexpected INTRQ response type %d\n",
+				RSPD_TYPE_G(rc->type_gen));
+			continue;
+		}
+
+		/*
+		 * Extract the Queue ID from the interrupt message and perform
+		 * sanity checking to make sure it really refers to one of our
+		 * Ingress Queues which is active and matches the queue's ID.
+		 * None of these error conditions should ever happen so we may
+		 * want to either make them fatal and/or conditionalized under
+		 * DEBUG.
+		 */
+		qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
+		iq_idx = IQ_IDX(s, qid);
+		if (unlikely(iq_idx >= MAX_INGQ)) {
+			dev_err(adapter->pdev_dev,
+				"Ingress QID %d out of range\n", qid);
+			continue;
+		}
+		rspq = s->ingr_map[iq_idx];
+		if (unlikely(rspq == NULL)) {
+			dev_err(adapter->pdev_dev,
+				"Ingress QID %d RSPQ=NULL\n", qid);
+			continue;
+		}
+		if (unlikely(rspq->abs_id != qid)) {
+			dev_err(adapter->pdev_dev,
+				"Ingress QID %d refers to RSPQ %d\n",
+				qid, rspq->abs_id);
+			continue;
+		}
+
+		/*
+		 * Schedule NAPI processing on the indicated Response Queue
+		 * and move on to the next entry in the Forwarded Interrupt
+		 * Queue.
+		 */
+		napi_schedule(&rspq->napi);
+		rspq_next(intrq);
+	}
+
+	val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!intrq->bar2_addr)) {
+		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+			     val | INGRESSQID_V(intrq->cntxt_id));
+	} else {
+		writel(val | INGRESSQID_V(intrq->bar2_qid),
+		       intrq->bar2_addr + SGE_UDB_GTS);
+		wmb();
+	}
+
+	spin_unlock(&adapter->sge.intrq_lock);
+
+	return work_done;
+}
+
+/*
+ * The MSI interrupt handler handles data events from SGE response queues as
+ * well as error and other async events as they all use the same MSI vector.
+ */
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+{
+	struct adapter *adapter = cookie;
+
+	process_intrq(adapter);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	t4vf_intr_handler - select the top-level interrupt handler
+ *	@adapter: the adapter
+ *
+ *	Selects the top-level interrupt handler based on the type of interrupts
+ *	(MSI-X or MSI).
+ */
+irq_handler_t t4vf_intr_handler(struct adapter *adapter)
+{
+	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+	if (adapter->flags & USING_MSIX)
+		return t4vf_sge_intr_msix;
+	else
+		return t4vf_intr_msi;
+}
+
+/**
+ *	sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
+ *	@data: the adapter
+ *
+ *	Runs periodically from a timer to perform maintenance of SGE RX queues.
+ *
+ *	a) Replenishes RX queues that have run out due to memory shortage.
+ *	Normally new RX buffers are added when existing ones are consumed but
+ *	when out of memory a queue can become empty.  We schedule NAPI to do
+ *	the actual refill.
+ */
+static void sge_rx_timer_cb(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *s = &adapter->sge;
+	unsigned int i;
+
+	/*
+	 * Scan the "Starving Free Lists" flag array looking for any Free
+	 * Lists in need of more free buffers.  If we find one and it's not
+	 * being actively polled, then bump its "starving" counter and attempt
+	 * to refill it.  If we're successful in adding enough buffers to push
+	 * the Free List over the starving threshold, then we can clear its
+	 * "starving" status.
+	 */
+	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
+		unsigned long m;
+
+		for (m = s->starving_fl[i]; m; m &= m - 1) {
+			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+			struct sge_fl *fl = s->egr_map[id];
+
+			clear_bit(id, s->starving_fl);
+			smp_mb__after_atomic();
+
+			/*
+			 * Since we are accessing fl without a lock there's a
+			 * small probability of a false positive where we
+			 * schedule napi but the FL is no longer starving.
+			 * No biggie.
+			 */
+			if (fl_starving(adapter, fl)) {
+				struct sge_eth_rxq *rxq;
+
+				rxq = container_of(fl, struct sge_eth_rxq, fl);
+				if (napi_reschedule(&rxq->rspq.napi))
+					fl->starving++;
+				else
+					set_bit(id, s->starving_fl);
+			}
+		}
+	}
+
+	/*
+	 * Reschedule the next scan for starving Free Lists ...
+	 */
+	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+/**
+ *	sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
+ *	@data: the adapter
+ *
+ *	Runs periodically from a timer to perform maintenance of SGE TX queues.
+ *
+ *	b) Reclaims completed Tx packets for the Ethernet queues.  Normally
+ *	packets are cleaned up by new Tx packets, this timer cleans up packets
+ *	when no new packets are being submitted.  This is essential for pktgen,
+ *	at least.
+ */
+static void sge_tx_timer_cb(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *s = &adapter->sge;
+	unsigned int i, budget;
+
+	budget = MAX_TIMER_TX_RECLAIM;
+	i = s->ethtxq_rover;
+	do {
+		struct sge_eth_txq *txq = &s->ethtxq[i];
+
+		if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
+			int avail = reclaimable(&txq->q);
+
+			if (avail > budget)
+				avail = budget;
+
+			free_tx_desc(adapter, &txq->q, avail, true);
+			txq->q.in_use -= avail;
+			__netif_tx_unlock(txq->txq);
+
+			budget -= avail;
+			if (!budget)
+				break;
+		}
+
+		i++;
+		if (i >= s->ethqsets)
+			i = 0;
+	} while (i != s->ethtxq_rover);
+	s->ethtxq_rover = i;
+
+	/*
+	 * If we found too many reclaimable packets schedule a timer in the
+	 * near future to continue where we left off.  Otherwise the next timer
+	 * will be at its normal interval.
+	 */
+	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+/**
+ *	bar2_address - return the BAR2 address for an SGE Queue's Registers
+ *	@adapter: the adapter
+ *	@qid: the SGE Queue ID
+ *	@qtype: the SGE Queue Type (Egress or Ingress)
+ *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ *	Returns the BAR2 address for the SGE Queue Registers associated with
+ *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
+ *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter,
+				  unsigned int qid,
+				  enum t4_bar2_qtype qtype,
+				  unsigned int *pbar2_qid)
+{
+	u64 bar2_qoffset;
+	int ret;
+
+	ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+				  &bar2_qoffset, pbar2_qid);
+	if (ret)
+		return NULL;
+
+	return adapter->bar2 + bar2_qoffset;
+}
+
+/**
+ *	t4vf_sge_alloc_rxq - allocate an SGE RX Queue
+ *	@adapter: the adapter
+ *	@rspq: pointer to to the new rxq's Response Queue to be filled in
+ *	@iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
+ *	@dev: the network device associated with the new rspq
+ *	@intr_dest: MSI-X vector index (overriden in MSI mode)
+ *	@fl: pointer to the new rxq's Free List to be filled in
+ *	@hnd: the interrupt handler to invoke for the rspq
+ */
+int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+		       bool iqasynch, struct net_device *dev,
+		       int intr_dest,
+		       struct sge_fl *fl, rspq_handler_t hnd)
+{
+	struct sge *s = &adapter->sge;
+	struct port_info *pi = netdev_priv(dev);
+	struct fw_iq_cmd cmd, rpl;
+	int ret, iqandst, flsz = 0;
+
+	/*
+	 * If we're using MSI interrupts and we're not initializing the
+	 * Forwarded Interrupt Queue itself, then set up this queue for
+	 * indirect interrupts to the Forwarded Interrupt Queue.  Obviously
+	 * the Forwarded Interrupt Queue must be set up before any other
+	 * ingress queue ...
+	 */
+	if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+		iqandst = SGE_INTRDST_IQ;
+		intr_dest = adapter->sge.intrq.abs_id;
+	} else
+		iqandst = SGE_INTRDST_PCI;
+
+	/*
+	 * Allocate the hardware ring for the Response Queue.  The size needs
+	 * to be a multiple of 16 which includes the mandatory status entry
+	 * (regardless of whether the Status Page capabilities are enabled or
+	 * not).
+	 */
+	rspq->size = roundup(rspq->size, 16);
+	rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
+				0, &rspq->phys_addr, NULL, 0);
+	if (!rspq->desc)
+		return -ENOMEM;
+
+	/*
+	 * Fill in the Ingress Queue Command.  Note: Ideally this code would
+	 * be in t4vf_hw.c but there are so many parameters and dependencies
+	 * on our Linux SGE state that we would end up having to pass tons of
+	 * parameters.  We'll have to think about how this might be migrated
+	 * into OS-independent common code ...
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_WRITE_F |
+				    FW_CMD_EXEC_F);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
+					 FW_IQ_CMD_IQSTART_F |
+					 FW_LEN16(cmd));
+	cmd.type_to_iqandstindex =
+		cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
+			    FW_IQ_CMD_IQASYNCH_V(iqasynch) |
+			    FW_IQ_CMD_VIID_V(pi->viid) |
+			    FW_IQ_CMD_IQANDST_V(iqandst) |
+			    FW_IQ_CMD_IQANUS_V(1) |
+			    FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
+			    FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
+	cmd.iqdroprss_to_iqesize =
+		cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
+			    FW_IQ_CMD_IQGTSMODE_F |
+			    FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
+			    FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
+	cmd.iqsize = cpu_to_be16(rspq->size);
+	cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
+
+	if (fl) {
+		enum chip_type chip =
+			CHELSIO_CHIP_VERSION(adapter->params.chip);
+		/*
+		 * Allocate the ring for the hardware free list (with space
+		 * for its status page) along with the associated software
+		 * descriptor ring.  The free list size needs to be a multiple
+		 * of the Egress Queue Unit and at least 2 Egress Units larger
+		 * than the SGE's Egress Congrestion Threshold
+		 * (fl_starve_thres - 1).
+		 */
+		if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
+			fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
+		fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
+		fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
+				      sizeof(__be64), sizeof(struct rx_sw_desc),
+				      &fl->addr, &fl->sdesc, s->stat_len);
+		if (!fl->desc) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/*
+		 * Calculate the size of the hardware free list ring plus
+		 * Status Page (which the SGE will place after the end of the
+		 * free list ring) in Egress Queue Units.
+		 */
+		flsz = (fl->size / FL_PER_EQ_UNIT +
+			s->stat_len / EQ_UNIT);
+
+		/*
+		 * Fill in all the relevant firmware Ingress Queue Command
+		 * fields for the free list.
+		 */
+		cmd.iqns_to_fl0congen =
+			cpu_to_be32(
+				FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
+				FW_IQ_CMD_FL0PACKEN_F |
+				FW_IQ_CMD_FL0PADEN_F);
+		cmd.fl0dcaen_to_fl0cidxfthresh =
+			cpu_to_be16(
+				FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+				FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+						     FETCHBURSTMAX_512B_X :
+						     FETCHBURSTMAX_256B_X));
+		cmd.fl0size = cpu_to_be16(flsz);
+		cmd.fl0addr = cpu_to_be64(fl->addr);
+	}
+
+	/*
+	 * Issue the firmware Ingress Queue Command and extract the results if
+	 * it completes successfully.
+	 */
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret)
+		goto err;
+
+	netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+	rspq->cur_desc = rspq->desc;
+	rspq->cidx = 0;
+	rspq->gen = 1;
+	rspq->next_intr_params = rspq->intr_params;
+	rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+	rspq->bar2_addr = bar2_address(adapter,
+				       rspq->cntxt_id,
+				       T4_BAR2_QTYPE_INGRESS,
+				       &rspq->bar2_qid);
+	rspq->abs_id = be16_to_cpu(rpl.physiqid);
+	rspq->size--;			/* subtract status entry */
+	rspq->adapter = adapter;
+	rspq->netdev = dev;
+	rspq->handler = hnd;
+
+	/* set offset to -1 to distinguish ingress queues without FL */
+	rspq->offset = fl ? 0 : -1;
+
+	if (fl) {
+		fl->cntxt_id = be16_to_cpu(rpl.fl0id);
+		fl->avail = 0;
+		fl->pend_cred = 0;
+		fl->pidx = 0;
+		fl->cidx = 0;
+		fl->alloc_failed = 0;
+		fl->large_alloc_failed = 0;
+		fl->starving = 0;
+
+		/* Note, we must initialize the BAR2 Free List User Doorbell
+		 * information before refilling the Free List!
+		 */
+		fl->bar2_addr = bar2_address(adapter,
+					     fl->cntxt_id,
+					     T4_BAR2_QTYPE_EGRESS,
+					     &fl->bar2_qid);
+
+		refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
+	}
+
+	return 0;
+
+err:
+	/*
+	 * An error occurred.  Clean up our partial allocation state and
+	 * return the error.
+	 */
+	if (rspq->desc) {
+		dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
+				  rspq->desc, rspq->phys_addr);
+		rspq->desc = NULL;
+	}
+	if (fl && fl->desc) {
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
+				  fl->desc, fl->addr);
+		fl->desc = NULL;
+	}
+	return ret;
+}
+
+/**
+ *	t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
+ *	@adapter: the adapter
+ *	@txq: pointer to the new txq to be filled in
+ *	@devq: the network TX queue associated with the new txq
+ *	@iqid: the relative ingress queue ID to which events relating to
+ *		the new txq should be directed
+ */
+int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+			   struct net_device *dev, struct netdev_queue *devq,
+			   unsigned int iqid)
+{
+	struct sge *s = &adapter->sge;
+	int ret, nentries;
+	struct fw_eq_eth_cmd cmd, rpl;
+	struct port_info *pi = netdev_priv(dev);
+
+	/*
+	 * Calculate the size of the hardware TX Queue (including the Status
+	 * Page on the end of the TX Queue) in units of TX Descriptors.
+	 */
+	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+	/*
+	 * Allocate the hardware ring for the TX ring (with space for its
+	 * status page) along with the associated software descriptor ring.
+	 */
+	txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
+				 sizeof(struct tx_desc),
+				 sizeof(struct tx_sw_desc),
+				 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	/*
+	 * Fill in the Egress Queue Command.  Note: As with the direct use of
+	 * the firmware Ingress Queue COmmand above in our RXQ allocation
+	 * routine, ideally, this code would be in t4vf_hw.c.  Again, we'll
+	 * have to see if there's some reasonable way to parameterize it
+	 * into the common code ...
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_WRITE_F |
+				    FW_CMD_EXEC_F);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
+					 FW_EQ_ETH_CMD_EQSTART_F |
+					 FW_LEN16(cmd));
+	cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+				   FW_EQ_ETH_CMD_VIID_V(pi->viid));
+	cmd.fetchszm_to_iqid =
+		cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
+			    FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
+			    FW_EQ_ETH_CMD_IQID_V(iqid));
+	cmd.dcaen_to_eqsize =
+		cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+			    FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
+			    FW_EQ_ETH_CMD_CIDXFTHRESH_V(
+						SGE_CIDXFLUSHTHRESH_32) |
+			    FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+	cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	/*
+	 * Issue the firmware Egress Queue Command and extract the results if
+	 * it completes successfully.
+	 */
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret) {
+		/*
+		 * The girmware Ingress Queue Command failed for some reason.
+		 * Free up our partial allocation state and return the error.
+		 */
+		kfree(txq->q.sdesc);
+		txq->q.sdesc = NULL;
+		dma_free_coherent(adapter->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	txq->q.in_use = 0;
+	txq->q.cidx = 0;
+	txq->q.pidx = 0;
+	txq->q.stat = (void *)&txq->q.desc[txq->q.size];
+	txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
+	txq->q.bar2_addr = bar2_address(adapter,
+					txq->q.cntxt_id,
+					T4_BAR2_QTYPE_EGRESS,
+					&txq->q.bar2_qid);
+	txq->q.abs_id =
+		FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
+	txq->txq = devq;
+	txq->tso = 0;
+	txq->tx_cso = 0;
+	txq->vlan_ins = 0;
+	txq->q.stops = 0;
+	txq->q.restarts = 0;
+	txq->mapping_err = 0;
+	return 0;
+}
+
+/*
+ * Free the DMA map resources associated with a TX queue.
+ */
+static void free_txq(struct adapter *adapter, struct sge_txq *tq)
+{
+	struct sge *s = &adapter->sge;
+
+	dma_free_coherent(adapter->pdev_dev,
+			  tq->size * sizeof(*tq->desc) + s->stat_len,
+			  tq->desc, tq->phys_addr);
+	tq->cntxt_id = 0;
+	tq->sdesc = NULL;
+	tq->desc = NULL;
+}
+
+/*
+ * Free the resources associated with a response queue (possibly including a
+ * free list).
+ */
+static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
+			 struct sge_fl *fl)
+{
+	struct sge *s = &adapter->sge;
+	unsigned int flid = fl ? fl->cntxt_id : 0xffff;
+
+	t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
+		     rspq->cntxt_id, flid, 0xffff);
+	dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
+			  rspq->desc, rspq->phys_addr);
+	netif_napi_del(&rspq->napi);
+	rspq->netdev = NULL;
+	rspq->cntxt_id = 0;
+	rspq->abs_id = 0;
+	rspq->desc = NULL;
+
+	if (fl) {
+		free_rx_bufs(adapter, fl, fl->avail);
+		dma_free_coherent(adapter->pdev_dev,
+				  fl->size * sizeof(*fl->desc) + s->stat_len,
+				  fl->desc, fl->addr);
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		fl->cntxt_id = 0;
+		fl->desc = NULL;
+	}
+}
+
+/**
+ *	t4vf_free_sge_resources - free SGE resources
+ *	@adapter: the adapter
+ *
+ *	Frees resources used by the SGE queue sets.
+ */
+void t4vf_free_sge_resources(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+	struct sge_eth_rxq *rxq = s->ethrxq;
+	struct sge_eth_txq *txq = s->ethtxq;
+	struct sge_rspq *evtq = &s->fw_evtq;
+	struct sge_rspq *intrq = &s->intrq;
+	int qs;
+
+	for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
+		if (rxq->rspq.desc)
+			free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
+		if (txq->q.desc) {
+			t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
+			free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
+			kfree(txq->q.sdesc);
+			free_txq(adapter, &txq->q);
+		}
+	}
+	if (evtq->desc)
+		free_rspq_fl(adapter, evtq, NULL);
+	if (intrq->desc)
+		free_rspq_fl(adapter, intrq, NULL);
+}
+
+/**
+ *	t4vf_sge_start - enable SGE operation
+ *	@adapter: the adapter
+ *
+ *	Start tasklets and timers associated with the DMA engine.
+ */
+void t4vf_sge_start(struct adapter *adapter)
+{
+	adapter->sge.ethtxq_rover = 0;
+	mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+	mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ *	t4vf_sge_stop - disable SGE operation
+ *	@adapter: the adapter
+ *
+ *	Stop tasklets and timers associated with the DMA engine.  Note that
+ *	this is effective only if measures have been taken to disable any HW
+ *	events that may restart them.
+ */
+void t4vf_sge_stop(struct adapter *adapter)
+{
+	struct sge *s = &adapter->sge;
+
+	if (s->rx_timer.function)
+		del_timer_sync(&s->rx_timer);
+	if (s->tx_timer.function)
+		del_timer_sync(&s->tx_timer);
+}
+
+/**
+ *	t4vf_sge_init - initialize SGE
+ *	@adapter: the adapter
+ *
+ *	Performs SGE initialization needed every time after a chip reset.
+ *	We do not initialize any of the queue sets here, instead the driver
+ *	top-level must request those individually.  We also do not enable DMA
+ *	here, that should be done after the queues have been set up.
+ */
+int t4vf_sge_init(struct adapter *adapter)
+{
+	struct sge_params *sge_params = &adapter->params.sge;
+	u32 fl0 = sge_params->sge_fl_buffer_size[0];
+	u32 fl1 = sge_params->sge_fl_buffer_size[1];
+	struct sge *s = &adapter->sge;
+	unsigned int ingpadboundary, ingpackboundary;
+
+	/*
+	 * Start by vetting the basic SGE parameters which have been set up by
+	 * the Physical Function Driver.  Ideally we should be able to deal
+	 * with _any_ configuration.  Practice is different ...
+	 */
+	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+			fl0, fl1);
+		return -EINVAL;
+	}
+	if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
+		dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Now translate the adapter parameters into our internal forms.
+	 */
+	if (fl1)
+		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
+			? 128 : 64);
+	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
+
+	/* T4 uses a single control field to specify both the PCIe Padding and
+	 * Packing Boundary.  T5 introduced the ability to specify these
+	 * separately.  The actual Ingress Packet Data alignment boundary
+	 * within Packed Buffer Mode is the maximum of these two
+	 * specifications.  (Note that it makes no real practical sense to
+	 * have the Pading Boudary be larger than the Packing Boundary but you
+	 * could set the chip up that way and, in fact, legacy T4 code would
+	 * end doing this because it would initialize the Padding Boundary and
+	 * leave the Packing Boundary initialized to 0 (16 bytes).)
+	 */
+	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
+			       INGPADBOUNDARY_SHIFT_X);
+	if (is_t4(adapter->params.chip)) {
+		s->fl_align = ingpadboundary;
+	} else {
+		/* T5 has a different interpretation of one of the PCIe Packing
+		 * Boundary values.
+		 */
+		ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
+		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+			ingpackboundary = 16;
+		else
+			ingpackboundary = 1 << (ingpackboundary +
+						INGPACKBOUNDARY_SHIFT_X);
+
+		s->fl_align = max(ingpadboundary, ingpackboundary);
+	}
+
+	/* A FL with <= fl_starve_thres buffers is starving and a periodic
+	 * timer will attempt to refill it.  This needs to be larger than the
+	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+	 * stuck waiting for new packets while the SGE is waiting for us to
+	 * give it more Free List entries.  (Note that the SGE's Egress
+	 * Congestion Threshold is in units of 2 Free List pointers.)
+	 */
+	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+	case CHELSIO_T4:
+		s->fl_starve_thres =
+		   EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T5:
+		s->fl_starve_thres =
+		   EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T6:
+	default:
+		s->fl_starve_thres =
+		   T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	}
+	s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+	/*
+	 * Set up tasklet timers.
+	 */
+	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
+	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+
+	/*
+	 * Initialize Forwarded Interrupt Queue lock.
+	 */
+	spin_lock_init(&s->intrq_lock);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
new file mode 100644
index 0000000..88b8981
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -0,0 +1,334 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_COMMON_H__
+#define __T4VF_COMMON_H__
+
+#include "../cxgb4/t4fw_api.h"
+
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
+ *
+ *   V  = "4" for T4; "5" for T5, etc. or
+ *      = "a" for T4 FPGA; "b" for T4 FPGA, etc.
+ *   F  = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ *   PP = adapter product designation
+ */
+#define CHELSIO_T4		0x4
+#define CHELSIO_T5		0x5
+#define CHELSIO_T6		0x6
+
+enum chip_type {
+	T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+	T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+	T4_FIRST_REV	= T4_A1,
+	T4_LAST_REV	= T4_A2,
+
+	T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+	T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+	T5_FIRST_REV	= T5_A0,
+	T5_LAST_REV	= T5_A1,
+};
+
+/*
+ * The "len16" field of a Firmware Command Structure ...
+ */
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
+
+/*
+ * Per-VF statistics.
+ */
+struct t4vf_port_stats {
+	/*
+	 * TX statistics.
+	 */
+	u64 tx_bcast_bytes;		/* broadcast */
+	u64 tx_bcast_frames;
+	u64 tx_mcast_bytes;		/* multicast */
+	u64 tx_mcast_frames;
+	u64 tx_ucast_bytes;		/* unicast */
+	u64 tx_ucast_frames;
+	u64 tx_drop_frames;		/* TX dropped frames */
+	u64 tx_offload_bytes;		/* offload */
+	u64 tx_offload_frames;
+
+	/*
+	 * RX statistics.
+	 */
+	u64 rx_bcast_bytes;		/* broadcast */
+	u64 rx_bcast_frames;
+	u64 rx_mcast_bytes;		/* multicast */
+	u64 rx_mcast_frames;
+	u64 rx_ucast_bytes;
+	u64 rx_ucast_frames;		/* unicast */
+
+	u64 rx_err_frames;		/* RX error frames */
+};
+
+/*
+ * Per-"port" (Virtual Interface) link configuration ...
+ */
+struct link_config {
+	unsigned int   supported;        /* link capabilities */
+	unsigned int   advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+	unsigned char  link_ok;          /* link up? */
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+/*
+ * General device parameters ...
+ */
+struct dev_params {
+	u32 fwrev;			/* firmware version */
+	u32 tprev;			/* TP Microcode Version */
+};
+
+/*
+ * Scatter Gather Engine parameters.  These are almost all determined by the
+ * Physical Function Driver.  We just need to grab them to see within which
+ * environment we're playing ...
+ */
+struct sge_params {
+	u32 sge_control;		/* padding, boundaries, lengths, etc. */
+	u32 sge_control2;		/* T5: more of the same */
+	u32 sge_host_page_size;		/* PF0-7 page sizes */
+	u32 sge_egress_queues_per_page;	/* PF0-7 egress queues/page */
+	u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */
+	u32 sge_vf_hps;                 /* host page size for our vf */
+	u32 sge_vf_eq_qpp;		/* egress queues/page for our VF */
+	u32 sge_vf_iq_qpp;		/* ingress queues/page for our VF */
+	u32 sge_fl_buffer_size[16];	/* free list buffer sizes */
+	u32 sge_ingress_rx_threshold;	/* RX counter interrupt threshold[4] */
+	u32 sge_congestion_control;     /* congestion thresholds, etc. */
+	u32 sge_timer_value_0_and_1;	/* interrupt coalescing timer values */
+	u32 sge_timer_value_2_and_3;
+	u32 sge_timer_value_4_and_5;
+};
+
+/*
+ * Vital Product Data parameters.
+ */
+struct vpd_params {
+	u32 cclk;			/* Core Clock (KHz) */
+};
+
+/* Stores chip specific parameters */
+struct arch_specific_params {
+	u32 sge_fl_db;
+	u16 mps_tcam_size;
+};
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+	unsigned int mode;		/* RSS mode */
+	union {
+	    struct {
+		unsigned int synmapen:1;	/* SYN Map Enable */
+		unsigned int syn4tupenipv6:1;	/* enable hashing 4-tuple IPv6 SYNs */
+		unsigned int syn2tupenipv6:1;	/* enable hashing 2-tuple IPv6 SYNs */
+		unsigned int syn4tupenipv4:1;	/* enable hashing 4-tuple IPv4 SYNs */
+		unsigned int syn2tupenipv4:1;	/* enable hashing 2-tuple IPv4 SYNs */
+		unsigned int ofdmapen:1;	/* Offload Map Enable */
+		unsigned int tnlmapen:1;	/* Tunnel Map Enable */
+		unsigned int tnlalllookup:1;	/* Tunnel All Lookup */
+		unsigned int hashtoeplitz:1;	/* use Toeplitz hash */
+	    } basicvirtual;
+	} u;
+};
+
+/*
+ * Virtual Interface RSS Configuration in host-native format.
+ */
+union rss_vi_config {
+    struct {
+	u16 defaultq;			/* Ingress Queue ID for !tnlalllookup */
+	unsigned int ip6fourtupen:1;	/* hash 4-tuple IPv6 ingress packets */
+	unsigned int ip6twotupen:1;	/* hash 2-tuple IPv6 ingress packets */
+	unsigned int ip4fourtupen:1;	/* hash 4-tuple IPv4 ingress packets */
+	unsigned int ip4twotupen:1;	/* hash 2-tuple IPv4 ingress packets */
+	int udpen;			/* hash 4-tuple UDP ingress packets */
+    } basicvirtual;
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+	unsigned int nvi;		/* N virtual interfaces */
+	unsigned int neq;		/* N egress Qs */
+	unsigned int nethctrl;		/* N egress ETH or CTRL Qs */
+	unsigned int niqflint;		/* N ingress Qs/w free list(s) & intr */
+	unsigned int niq;		/* N ingress Qs */
+	unsigned int tc;		/* PCI-E traffic class */
+	unsigned int pmask;		/* port access rights mask */
+	unsigned int nexactf;		/* N exact MPS filters */
+	unsigned int r_caps;		/* read capabilities */
+	unsigned int wx_caps;		/* write/execute capabilities */
+};
+
+/*
+ * Per-"adapter" (Virtual Function) parameters.
+ */
+struct adapter_params {
+	struct dev_params dev;		/* general device parameters */
+	struct sge_params sge;		/* Scatter Gather Engine */
+	struct vpd_params vpd;		/* Vital Product Data */
+	struct rss_params rss;		/* Receive Side Scaling */
+	struct vf_resources vfres;	/* Virtual Function Resource limits */
+	struct arch_specific_params arch; /* chip specific params */
+	enum chip_type chip;		/* chip code */
+	u8 nports;			/* # of Ethernet "ports" */
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; iter++)
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+}
+
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+		(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
+{
+	return adapter->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
+					    unsigned int us)
+{
+	return (us * adapter->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+					    unsigned int ticks)
+{
+	return (ticks * 1000) / adapter->params.vpd.cclk;
+}
+
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+			       int size, void *rpl)
+{
+	return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+				  int size, void *rpl)
+{
+	return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+#define CHELSIO_PCI_ID_VER(dev_id)  ((dev_id) >> 12)
+
+static inline int is_t4(enum chip_type chip)
+{
+	return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
+}
+
+int t4vf_wait_dev_ready(struct adapter *);
+int t4vf_port_init(struct adapter *, int);
+
+int t4vf_fw_reset(struct adapter *);
+int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+			unsigned int qid,
+			enum t4_bar2_qtype qtype,
+			u64 *pbar2_qoffset,
+			unsigned int *pbar2_qid);
+
+int t4vf_get_sge_params(struct adapter *);
+int t4vf_get_vpd_params(struct adapter *);
+int t4vf_get_dev_params(struct adapter *);
+int t4vf_get_rss_glb_config(struct adapter *);
+int t4vf_get_vfres(struct adapter *);
+
+int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
+			    union rss_vi_config *);
+int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
+			     union rss_vi_config *);
+int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
+			  const u16 *, int);
+
+int t4vf_alloc_vi(struct adapter *, int);
+int t4vf_free_vi(struct adapter *, int);
+int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
+int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
+
+int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
+		    bool);
+int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
+			const u8 **, u16 *, u64 *, bool);
+int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
+int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
+int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
+
+int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
+		 unsigned int);
+int t4vf_eth_eq_free(struct adapter *, unsigned int);
+
+int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
+int t4vf_prep_adapter(struct adapter *);
+
+#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
new file mode 100644
index 0000000..b516b12
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_DEFS_H__
+#define __T4VF_DEFS_H__
+
+#include "../cxgb4/t4_regs.h"
+
+/*
+ * The VF Register Map.
+ *
+ * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
+ * bus module (PL) and CPU Interface Module (CIM) components are mapped via
+ * the Slice to Module Map Table (see below) in the Physical Function Register
+ * Map.  The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
+ * and Offset registers in the PF Register Map.  The MBDATA base address is
+ * quite constrained as it determines the Mailbox Data addresses for both PFs
+ * and VFs, and therefore must fit in both the VF and PF Register Maps without
+ * overlapping other registers.
+ */
+#define T4VF_SGE_BASE_ADDR	0x0000
+#define T4VF_MPS_BASE_ADDR	0x0100
+#define T4VF_PL_BASE_ADDR	0x0200
+#define T4VF_MBDATA_BASE_ADDR	0x0240
+#define T4VF_CIM_BASE_ADDR	0x0300
+
+#define T4VF_REGMAP_START	0x0000
+#define T4VF_REGMAP_SIZE	0x0400
+
+/*
+ * There's no hardware limitation which requires that the addresses of the
+ * Mailbox Data in the fixed CIM PF map and the programmable VF map must
+ * match.  However, it's a useful convention ...
+ */
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
+#endif
+
+/*
+ * Virtual Function "Slice to Module Map Table" definitions.
+ *
+ * This table allows us to map subsets of the various module register sets
+ * into the T4VF Register Map.  Each table entry identifies the index of the
+ * module whose registers are being mapped, the offset within the module's
+ * register set that the mapping should start at, the limit of the mapping,
+ * and the offset within the T4VF Register Map to which the module's registers
+ * are being mapped.  All addresses and qualtities are in terms of 32-bit
+ * words.  The "limit" value is also in terms of 32-bit words and is equal to
+ * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
+ * relation rather than a "<").
+ */
+#define T4VF_MOD_MAP(module, index, first, last) \
+	T4VF_MOD_MAP_##module##_INDEX  = (index), \
+	T4VF_MOD_MAP_##module##_FIRST  = (first), \
+	T4VF_MOD_MAP_##module##_LAST   = (last), \
+	T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
+	T4VF_MOD_MAP_##module##_BASE = \
+		(T4VF_##module##_BASE_ADDR/4 + (first)/4), \
+	T4VF_MOD_MAP_##module##_LIMIT = \
+		(T4VF_##module##_BASE_ADDR/4 + (last)/4),
+
+#define SGE_VF_KDOORBELL 0x0
+#define SGE_VF_GTS 0x4
+#define MPS_VF_CTL 0x0
+#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define PL_VF_WHOAMI 0x0
+#define CIM_VF_EXT_MAILBOX_CTRL 0x0
+#define CIM_VF_EXT_MAILBOX_STATUS 0x4
+
+enum {
+    T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
+    T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
+    T4VF_MOD_MAP(PL,  3, PL_VF_WHOAMI, PL_VF_WHOAMI)
+    T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
+};
+
+/*
+ * There isn't a Slice to Module Map Table entry for the Mailbox Data
+ * registers, but it's convenient to use similar names as above.  There are 8
+ * little-endian 64-bit Mailbox Data registers.  Note that the "instances"
+ * value below is in terms of 32-bit words which matches the "word" addressing
+ * space we use above for the Slice to Module Map Space.
+ */
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
+
+#define T4VF_MBDATA_FIRST	0
+#define T4VF_MBDATA_LAST	((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
+
+#endif /* __T4T4VF_DEFS_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
new file mode 100644
index 0000000..63dd5fd
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -0,0 +1,1613 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's).  Return an error if it doesn't
+ * become ready ...
+ */
+int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+	const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
+	const u32 notready1 = 0xffffffff;
+	const u32 notready2 = 0xeeeeeeee;
+	u32 val;
+
+	val = t4_read_reg(adapter, whoami);
+	if (val != notready1 && val != notready2)
+		return 0;
+	msleep(500);
+	val = t4_read_reg(adapter, whoami);
+	if (val != notready1 && val != notready2)
+		return 0;
+	else
+		return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order
+ * (since the firmware data structures are specified in a big-endian layout).
+ */
+static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
+			 u32 mbox_data)
+{
+	for ( ; size; size -= 8, mbox_data += 8)
+		*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
+}
+
+/*
+ * Dump contents of mailbox with a leading tag.
+ */
+static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+{
+	dev_err(adapter->pdev_dev,
+		"mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
+		(unsigned long long)t4_read_reg64(adapter, mbox_data +  0),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data +  8),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
+		(unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+}
+
+/**
+ *	t4vf_wr_mbox_core - send a command to FW through the mailbox
+ *	@adapter: the adapter
+ *	@cmd: the command to write
+ *	@size: command length in bytes
+ *	@rpl: where to optionally store the reply
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sends the given command to FW through the mailbox and waits for the
+ *	FW to execute the command.  If @rpl is not %NULL it is used to store
+ *	the FW's reply to the command.  The command and its optional reply
+ *	are of the same length.  FW can take up to 500 ms to respond.
+ *	@sleep_ok determines whether we may sleep while awaiting the response.
+ *	If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ *	The return value is 0 on success or a negative errno on failure.  A
+ *	failure can happen either because we are not able to execute the
+ *	command or FW executes it but signals an error.  In the latter case
+ *	the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
+		      void *rpl, bool sleep_ok)
+{
+	static const int delay[] = {
+		1, 1, 3, 5, 10, 10, 20, 50, 100
+	};
+
+	u32 v;
+	int i, ms, delay_idx;
+	const __be64 *p;
+	u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
+	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+
+	/*
+	 * Commands must be multiples of 16 bytes in length and may not be
+	 * larger than the size of the Mailbox Data register array.
+	 */
+	if ((size % 16) != 0 ||
+	    size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+		return -EINVAL;
+
+	/*
+	 * Loop trying to get ownership of the mailbox.  Return an error
+	 * if we can't gain ownership.
+	 */
+	v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
+	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+		v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
+	if (v != MBOX_OWNER_DRV)
+		return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+
+	/*
+	 * Write the command array into the Mailbox Data register array and
+	 * transfer ownership of the mailbox to the firmware.
+	 *
+	 * For the VFs, the Mailbox Data "registers" are actually backed by
+	 * T4's "MA" interface rather than PL Registers (as is the case for
+	 * the PFs).  Because these are in different coherency domains, the
+	 * write to the VF's PL-register-backed Mailbox Control can race in
+	 * front of the writes to the MA-backed VF Mailbox Data "registers".
+	 * So we need to do a read-back on at least one byte of the VF Mailbox
+	 * Data registers before doing the write to the VF Mailbox Control
+	 * register.
+	 */
+	for (i = 0, p = cmd; i < size; i += 8)
+		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+	t4_read_reg(adapter, mbox_data);         /* flush write */
+
+	t4_write_reg(adapter, mbox_ctl,
+		     MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
+	t4_read_reg(adapter, mbox_ctl);          /* flush write */
+
+	/*
+	 * Spin waiting for firmware to acknowledge processing our command.
+	 */
+	delay_idx = 0;
+	ms = delay[0];
+
+	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+		if (sleep_ok) {
+			ms = delay[delay_idx];
+			if (delay_idx < ARRAY_SIZE(delay) - 1)
+				delay_idx++;
+			msleep(ms);
+		} else
+			mdelay(ms);
+
+		/*
+		 * If we're the owner, see if this is the reply we wanted.
+		 */
+		v = t4_read_reg(adapter, mbox_ctl);
+		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
+			/*
+			 * If the Message Valid bit isn't on, revoke ownership
+			 * of the mailbox and continue waiting for our reply.
+			 */
+			if ((v & MBMSGVALID_F) == 0) {
+				t4_write_reg(adapter, mbox_ctl,
+					     MBOWNER_V(MBOX_OWNER_NONE));
+				continue;
+			}
+
+			/*
+			 * We now have our reply.  Extract the command return
+			 * value, copy the reply back to our caller's buffer
+			 * (if specified) and revoke ownership of the mailbox.
+			 * We return the (negated) firmware command return
+			 * code (this depends on FW_SUCCESS == 0).
+			 */
+
+			/* return value in low-order little-endian word */
+			v = t4_read_reg(adapter, mbox_data);
+			if (FW_CMD_RETVAL_G(v))
+				dump_mbox(adapter, "FW Error", mbox_data);
+
+			if (rpl) {
+				/* request bit in high-order BE word */
+				WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
+					 & FW_CMD_REQUEST_F) == 0);
+				get_mbox_rpl(adapter, rpl, size, mbox_data);
+				WARN_ON((be32_to_cpu(*(__be32 *)rpl)
+					 & FW_CMD_REQUEST_F) != 0);
+			}
+			t4_write_reg(adapter, mbox_ctl,
+				     MBOWNER_V(MBOX_OWNER_NONE));
+			return -FW_CMD_RETVAL_G(v);
+		}
+	}
+
+	/*
+	 * We timed out.  Return the error ...
+	 */
+	dump_mbox(adapter, "FW Timeout", mbox_data);
+	return -ETIMEDOUT;
+}
+
+/**
+ *	hash_mac_addr - return the hash value of a MAC address
+ *	@addr: the 48-bit Ethernet MAC address
+ *
+ *	Hashes a MAC address according to the hash function used by hardware
+ *	inexact (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+	a ^= b;
+	a ^= (a >> 12);
+	a ^= (a >> 6);
+	return a & 0x3f;
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
+/**
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@caps: link capabilities
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void init_link_config(struct link_config *lc, unsigned int caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = 0;
+	lc->speed = 0;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & FW_PORT_CAP_ANEG) {
+		lc->advertising = lc->supported & ADVERT_MASK;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+/**
+ *	t4vf_port_init - initialize port hardware/software state
+ *	@adapter: the adapter
+ *	@pidx: the adapter port index
+ */
+int t4vf_port_init(struct adapter *adapter, int pidx)
+{
+	struct port_info *pi = adap2pinfo(adapter, pidx);
+	struct fw_vi_cmd vi_cmd, vi_rpl;
+	struct fw_port_cmd port_cmd, port_rpl;
+	int v;
+
+	/*
+	 * Execute a VI Read command to get our Virtual Interface information
+	 * like MAC address, etc.
+	 */
+	memset(&vi_cmd, 0, sizeof(vi_cmd));
+	vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+				       FW_CMD_REQUEST_F |
+				       FW_CMD_READ_F);
+	vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+	vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
+	v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+	if (v)
+		return v;
+
+	BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
+	pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
+	t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
+
+	/*
+	 * If we don't have read access to our port information, we're done
+	 * now.  Otherwise, execute a PORT Read command to get it ...
+	 */
+	if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+		return 0;
+
+	memset(&port_cmd, 0, sizeof(port_cmd));
+	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+					    FW_CMD_REQUEST_F |
+					    FW_CMD_READ_F |
+					    FW_PORT_CMD_PORTID_V(pi->port_id));
+	port_cmd.action_to_len16 =
+		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+			    FW_LEN16(port_cmd));
+	v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
+	if (v)
+		return v;
+
+	v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+	pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+			FW_PORT_CMD_MDIOADDR_G(v) : -1;
+	pi->port_type = FW_PORT_CMD_PTYPE_G(v);
+	pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+	init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
+
+	return 0;
+}
+
+/**
+ *      t4vf_fw_reset - issue a reset to FW
+ *      @adapter: the adapter
+ *
+ *	Issues a reset command to FW.  For a Physical Function this would
+ *	result in the Firmware resetting all of its state.  For a Virtual
+ *	Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+	struct fw_reset_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
+				      FW_CMD_WRITE_F);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_query_params - query FW or device parameters
+ *	@adapter: the adapter
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@vals: the parameter values
+ *
+ *	Reads the values of firmware or device parameters.  Up to 7 parameters
+ *	can be queried at once.
+ */
+static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+			     const u32 *params, u32 *vals)
+{
+	int i, ret;
+	struct fw_params_cmd cmd, rpl;
+	struct fw_params_param *p;
+	size_t len16;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_READ_F);
+	len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+				      param[nparams].mnem), 16);
+	cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
+	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+		p->mnem = htonl(*params++);
+
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret == 0)
+		for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+			*vals++ = be32_to_cpu(p->val);
+	return ret;
+}
+
+/**
+ *	t4vf_set_params - sets FW or device parameters
+ *	@adapter: the adapter
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@vals: the parameter values
+ *
+ *	Sets the values of firmware or device parameters.  Up to 7 parameters
+ *	can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+		    const u32 *params, const u32 *vals)
+{
+	int i;
+	struct fw_params_cmd cmd;
+	struct fw_params_param *p;
+	size_t len16;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_WRITE_F);
+	len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+				      param[nparams]), 16);
+	cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
+	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+		p->mnem = cpu_to_be32(*params++);
+		p->val = cpu_to_be32(*vals++);
+	}
+
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
+ *	@adapter: the adapter
+ *	@qid: the Queue ID
+ *	@qtype: the Ingress or Egress type for @qid
+ *	@pbar2_qoffset: BAR2 Queue Offset
+ *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ *	Returns the BAR2 SGE Queue Registers information associated with the
+ *	indicated Absolute Queue ID.  These are passed back in return value
+ *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ *	This may return an error which indicates that BAR2 SGE Queue
+ *	registers aren't available.  If an error is not returned, then the
+ *	following values are returned:
+ *
+ *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ *	require the "Inferred Queue ID" ability may be used.  E.g. the
+ *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ *	then these "Inferred Queue ID" register may not be used.
+ */
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+			unsigned int qid,
+			enum t4_bar2_qtype qtype,
+			u64 *pbar2_qoffset,
+			unsigned int *pbar2_qid)
+{
+	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+	u64 bar2_page_offset, bar2_qoffset;
+	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+	/* T4 doesn't support BAR2 SGE Queue registers.
+	 */
+	if (is_t4(adapter->params.chip))
+		return -EINVAL;
+
+	/* Get our SGE Page Size parameters.
+	 */
+	page_shift = adapter->params.sge.sge_vf_hps + 10;
+	page_size = 1 << page_shift;
+
+	/* Get the right Queues per Page parameters for our Queue.
+	 */
+	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
+		     ? adapter->params.sge.sge_vf_eq_qpp
+		     : adapter->params.sge.sge_vf_iq_qpp);
+	qpp_mask = (1 << qpp_shift) - 1;
+
+	/* Calculate the basics of the BAR2 SGE Queue register area:
+	 *  o The BAR2 page the Queue registers will be in.
+	 *  o The BAR2 Queue ID.
+	 *  o The BAR2 Queue ID Offset into the BAR2 page.
+	 */
+	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
+	bar2_qid = qid & qpp_mask;
+	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
+	 * hardware will infer the Absolute Queue ID simply from the writes to
+	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
+	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+	 * from the BAR2 Page and BAR2 Queue ID.
+	 *
+	 * One important censequence of this is that some BAR2 SGE registers
+	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+	 * there.  But other registers synthesize the SGE Queue ID purely
+	 * from the writes to the registers -- the Write Combined Doorbell
+	 * Buffer is a good example.  These BAR2 SGE Registers are only
+	 * available for those BAR2 SGE Register areas where the SGE Absolute
+	 * Queue ID can be inferred from simple writes.
+	 */
+	bar2_qoffset = bar2_page_offset;
+	bar2_qinferred = (bar2_qid_offset < page_size);
+	if (bar2_qinferred) {
+		bar2_qoffset += bar2_qid_offset;
+		bar2_qid = 0;
+	}
+
+	*pbar2_qoffset = bar2_qoffset;
+	*pbar2_qid = bar2_qid;
+	return 0;
+}
+
+/**
+ *	t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
+ *	@adapter: the adapter
+ *
+ *	Retrieves various core SGE parameters in the form of hardware SGE
+ *	register values.  The caller is responsible for decoding these as
+ *	needed.  The SGE parameters are stored in @adapter->params.sge.
+ */
+int t4vf_get_sge_params(struct adapter *adapter)
+{
+	struct sge_params *sge_params = &adapter->params.sge;
+	u32 params[7], vals[7];
+	int v;
+
+	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
+	params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
+	params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
+	params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
+	params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
+	params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
+	params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
+	v = t4vf_query_params(adapter, 7, params, vals);
+	if (v)
+		return v;
+	sge_params->sge_control = vals[0];
+	sge_params->sge_host_page_size = vals[1];
+	sge_params->sge_fl_buffer_size[0] = vals[2];
+	sge_params->sge_fl_buffer_size[1] = vals[3];
+	sge_params->sge_timer_value_0_and_1 = vals[4];
+	sge_params->sge_timer_value_2_and_3 = vals[5];
+	sge_params->sge_timer_value_4_and_5 = vals[6];
+
+	/* T4 uses a single control field to specify both the PCIe Padding and
+	 * Packing Boundary.  T5 introduced the ability to specify these
+	 * separately with the Padding Boundary in SGE_CONTROL and and Packing
+	 * Boundary in SGE_CONTROL2.  So for T5 and later we need to grab
+	 * SGE_CONTROL in order to determine how ingress packet data will be
+	 * laid out in Packed Buffer Mode.  Unfortunately, older versions of
+	 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+	 * failure grabbing it we throw an error since we can't figure out the
+	 * right value.
+	 */
+	if (!is_t4(adapter->params.chip)) {
+		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+			     FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
+		v = t4vf_query_params(adapter, 1, params, vals);
+		if (v != FW_SUCCESS) {
+			dev_err(adapter->pdev_dev,
+				"Unable to get SGE Control2; "
+				"probably old firmware.\n");
+			return v;
+		}
+		sge_params->sge_control2 = vals[0];
+	}
+
+	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
+	params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+		     FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
+	v = t4vf_query_params(adapter, 2, params, vals);
+	if (v)
+		return v;
+	sge_params->sge_ingress_rx_threshold = vals[0];
+	sge_params->sge_congestion_control = vals[1];
+
+	/* For T5 and later we want to use the new BAR2 Doorbells.
+	 * Unfortunately, older firmware didn't allow the this register to be
+	 * read.
+	 */
+	if (!is_t4(adapter->params.chip)) {
+		u32 whoami;
+		unsigned int pf, s_hps, s_qpp;
+
+		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+			     FW_PARAMS_PARAM_XYZ_V(
+				     SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
+		params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+			     FW_PARAMS_PARAM_XYZ_V(
+				     SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
+		v = t4vf_query_params(adapter, 2, params, vals);
+		if (v != FW_SUCCESS) {
+			dev_warn(adapter->pdev_dev,
+				 "Unable to get VF SGE Queues/Page; "
+				 "probably old firmware.\n");
+			return v;
+		}
+		sge_params->sge_egress_queues_per_page = vals[0];
+		sge_params->sge_ingress_queues_per_page = vals[1];
+
+		/* We need the Queues/Page for our VF.  This is based on the
+		 * PF from which we're instantiated and is indexed in the
+		 * register we just read. Do it once here so other code in
+		 * the driver can just use it.
+		 */
+		whoami = t4_read_reg(adapter,
+				     T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+		pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+		s_hps = (HOSTPAGESIZEPF0_S +
+			 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
+		sge_params->sge_vf_hps =
+			((sge_params->sge_host_page_size >> s_hps)
+			 & HOSTPAGESIZEPF0_M);
+
+		s_qpp = (QUEUESPERPAGEPF0_S +
+			 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
+		sge_params->sge_vf_eq_qpp =
+			((sge_params->sge_egress_queues_per_page >> s_qpp)
+			 & QUEUESPERPAGEPF0_M);
+		sge_params->sge_vf_iq_qpp =
+			((sge_params->sge_ingress_queues_per_page >> s_qpp)
+			 & QUEUESPERPAGEPF0_M);
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_vpd_params - retrieve device VPD paremeters
+ *	@adapter: the adapter
+ *
+ *	Retrives various device Vital Product Data parameters.  The parameters
+ *	are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+	struct vpd_params *vpd_params = &adapter->params.vpd;
+	u32 params[7], vals[7];
+	int v;
+
+	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
+	v = t4vf_query_params(adapter, 1, params, vals);
+	if (v)
+		return v;
+	vpd_params->cclk = vals[0];
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_dev_params - retrieve device paremeters
+ *	@adapter: the adapter
+ *
+ *	Retrives various device parameters.  The parameters are stored in
+ *	@adapter->params.dev.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+	struct dev_params *dev_params = &adapter->params.dev;
+	u32 params[7], vals[7];
+	int v;
+
+	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
+	params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
+	v = t4vf_query_params(adapter, 2, params, vals);
+	if (v)
+		return v;
+	dev_params->fwrev = vals[0];
+	dev_params->tprev = vals[1];
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ *	@adapter: the adapter
+ *
+ *	Retrieves global RSS mode and parameters with which we have to live
+ *	and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+	struct rss_params *rss = &adapter->params.rss;
+	struct fw_rss_glb_config_cmd cmd, rpl;
+	int v;
+
+	/*
+	 * Execute an RSS Global Configuration read command to retrieve
+	 * our RSS configuration.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+				      FW_CMD_REQUEST_F |
+				      FW_CMD_READ_F);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	/*
+	 * Transate the big-endian RSS Global Configuration into our
+	 * cpu-endian format based on the RSS mode.  We also do first level
+	 * filtering at this point to weed out modes which don't support
+	 * VF Drivers ...
+	 */
+	rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
+			be32_to_cpu(rpl.u.manual.mode_pkd));
+	switch (rss->mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = be32_to_cpu(
+				rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+		rss->u.basicvirtual.synmapen =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
+		rss->u.basicvirtual.syn4tupenipv6 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
+		rss->u.basicvirtual.syn2tupenipv6 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
+		rss->u.basicvirtual.syn4tupenipv4 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
+		rss->u.basicvirtual.syn2tupenipv4 =
+			((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
+
+		rss->u.basicvirtual.ofdmapen =
+			((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
+
+		rss->u.basicvirtual.tnlmapen =
+			((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
+		rss->u.basicvirtual.tnlalllookup =
+			((word  & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
+
+		rss->u.basicvirtual.hashtoeplitz =
+			((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
+
+		/* we need at least Tunnel Map Enable to be set */
+		if (!rss->u.basicvirtual.tnlmapen)
+			return -EINVAL;
+		break;
+	}
+
+	default:
+		/* all unknown/unsupported RSS modes result in an error */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_vfres - retrieve VF resource limits
+ *	@adapter: the adapter
+ *
+ *	Retrieves configured resource limits and capabilities for a virtual
+ *	function.  The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+	struct vf_resources *vfres = &adapter->params.vfres;
+	struct fw_pfvf_cmd cmd, rpl;
+	int v;
+	u32 word;
+
+	/*
+	 * Execute PFVF Read command to get VF resource limits; bail out early
+	 * with error on command failure.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_READ_F);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	/*
+	 * Extract VF resource limits and return success.
+	 */
+	word = be32_to_cpu(rpl.niqflint_niq);
+	vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+	vfres->niq = FW_PFVF_CMD_NIQ_G(word);
+
+	word = be32_to_cpu(rpl.type_to_neq);
+	vfres->neq = FW_PFVF_CMD_NEQ_G(word);
+	vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
+
+	word = be32_to_cpu(rpl.tc_to_nexactf);
+	vfres->tc = FW_PFVF_CMD_TC_G(word);
+	vfres->nvi = FW_PFVF_CMD_NVI_G(word);
+	vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
+
+	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+	vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+	vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+	vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
+
+	return 0;
+}
+
+/**
+ *	t4vf_read_rss_vi_config - read a VI's RSS configuration
+ *	@adapter: the adapter
+ *	@viid: Virtual Interface ID
+ *	@config: pointer to host-native VI RSS Configuration buffer
+ *
+ *	Reads the Virtual Interface's RSS configuration information and
+ *	translates it into CPU-native format.
+ */
+int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
+			    union rss_vi_config *config)
+{
+	struct fw_rss_vi_config_cmd cmd, rpl;
+	int v;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_READ_F |
+				     FW_RSS_VI_CONFIG_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	switch (adapter->params.rss.mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
+
+		config->basicvirtual.ip6fourtupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
+		config->basicvirtual.ip6twotupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
+		config->basicvirtual.ip4fourtupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
+		config->basicvirtual.ip4twotupen =
+			((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
+		config->basicvirtual.udpen =
+			((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
+		config->basicvirtual.defaultq =
+			FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_write_rss_vi_config - write a VI's RSS configuration
+ *	@adapter: the adapter
+ *	@viid: Virtual Interface ID
+ *	@config: pointer to host-native VI RSS Configuration buffer
+ *
+ *	Write the Virtual Interface's RSS configuration information
+ *	(translating it into firmware-native format before writing).
+ */
+int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
+			     union rss_vi_config *config)
+{
+	struct fw_rss_vi_config_cmd cmd, rpl;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_WRITE_F |
+				     FW_RSS_VI_CONFIG_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	switch (adapter->params.rss.mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = 0;
+
+		if (config->basicvirtual.ip6fourtupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
+		if (config->basicvirtual.ip6twotupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
+		if (config->basicvirtual.ip4fourtupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
+		if (config->basicvirtual.ip4twotupen)
+			word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
+		if (config->basicvirtual.udpen)
+			word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
+		word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
+				config->basicvirtual.defaultq);
+		cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+}
+
+/**
+ *	t4vf_config_rss_range - configure a portion of the RSS mapping table
+ *	@adapter: the adapter
+ *	@viid: Virtual Interface of RSS Table Slice
+ *	@start: starting entry in the table to write
+ *	@n: how many table entries to write
+ *	@rspq: values for the "Response Queue" (Ingress Queue) lookup table
+ *	@nrspq: number of values in @rspq
+ *
+ *	Programs the selected part of the VI's RSS mapping table with the
+ *	provided values.  If @nrspq < @n the supplied values are used repeatedly
+ *	until the full table range is populated.
+ *
+ *	The caller must ensure the values in @rspq are in the range 0..1023.
+ */
+int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
+			  int start, int n, const u16 *rspq, int nrspq)
+{
+	const u16 *rsp = rspq;
+	const u16 *rsp_end = rspq+nrspq;
+	struct fw_rss_ind_tbl_cmd cmd;
+
+	/*
+	 * Initialize firmware command template to write the RSS table.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_WRITE_F |
+				     FW_RSS_IND_TBL_CMD_VIID_V(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	/*
+	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
+	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
+	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
+	 * reserved.
+	 */
+	while (n > 0) {
+		__be32 *qp = &cmd.iq0_to_iq2;
+		int nq = min(n, 32);
+		int ret;
+
+		/*
+		 * Set up the firmware RSS command header to send the next
+		 * "nq" Ingress Queue IDs to the firmware.
+		 */
+		cmd.niqid = cpu_to_be16(nq);
+		cmd.startidx = cpu_to_be16(start);
+
+		/*
+		 * "nq" more done for the start of the next loop.
+		 */
+		start += nq;
+		n -= nq;
+
+		/*
+		 * While there are still Ingress Queue IDs to stuff into the
+		 * current firmware RSS command, retrieve them from the
+		 * Ingress Queue ID array and insert them into the command.
+		 */
+		while (nq > 0) {
+			/*
+			 * Grab up to the next 3 Ingress Queue IDs (wrapping
+			 * around the Ingress Queue ID array if necessary) and
+			 * insert them into the firmware RSS command at the
+			 * current 3-tuple position within the commad.
+			 */
+			u16 qbuf[3];
+			u16 *qbp = qbuf;
+			int nqbuf = min(3, nq);
+
+			nq -= nqbuf;
+			qbuf[0] = qbuf[1] = qbuf[2] = 0;
+			while (nqbuf) {
+				nqbuf--;
+				*qbp++ = *rsp++;
+				if (rsp >= rsp_end)
+					rsp = rspq;
+			}
+			*qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
+					    FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
+					    FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
+		}
+
+		/*
+		 * Send this portion of the RRS table update to the firmware;
+		 * bail out on any errors.
+		 */
+		ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ *	t4vf_alloc_vi - allocate a virtual interface on a port
+ *	@adapter: the adapter
+ *	@port_id: physical port associated with the VI
+ *
+ *	Allocate a new Virtual Interface and bind it to the indicated
+ *	physical port.  Return the new Virtual Interface Identifier on
+ *	success, or a [negative] error number on failure.
+ */
+int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+	struct fw_vi_cmd cmd, rpl;
+	int v;
+
+	/*
+	 * Execute a VI command to allocate Virtual Interface and return its
+	 * VIID.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_WRITE_F |
+				    FW_CMD_EXEC_F);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+					 FW_VI_CMD_ALLOC_F);
+	cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v)
+		return v;
+
+	return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
+}
+
+/**
+ *	t4vf_free_vi -- free a virtual interface
+ *	@adapter: the adapter
+ *	@viid: the virtual interface identifier
+ *
+ *	Free a previously allocated Virtual Interface.  Return an error on
+ *	failure.
+ */
+int t4vf_free_vi(struct adapter *adapter, int viid)
+{
+	struct fw_vi_cmd cmd;
+
+	/*
+	 * Execute a VI command to free the Virtual Interface.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_EXEC_F);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+					 FW_VI_CMD_FREE_F);
+	cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_enable_vi - enable/disable a virtual interface
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface ID
+ *	@rx_en: 1=enable Rx, 0=disable Rx
+ *	@tx_en: 1=enable Tx, 0=disable Tx
+ *
+ *	Enables/disables a virtual interface.
+ */
+int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
+		   bool rx_en, bool tx_en)
+{
+	struct fw_vi_enable_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_EXEC_F |
+				     FW_VI_ENABLE_CMD_VIID_V(viid));
+	cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+				       FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+				       FW_LEN16(cmd));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_identify_port - identify a VI's port by blinking its LED
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface ID
+ *	@nblinks: how many times to blink LED at 2.5 Hz
+ *
+ *	Identifies a VI's port by blinking its LED.
+ */
+int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
+		       unsigned int nblinks)
+{
+	struct fw_vi_enable_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_EXEC_F |
+				     FW_VI_ENABLE_CMD_VIID_V(viid));
+	cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
+				       FW_LEN16(cmd));
+	cmd.blinkdur = cpu_to_be16(nblinks);
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_set_rxmode - set Rx properties of a virtual interface
+ *	@adapter: the adapter
+ *	@viid: the VI id
+ *	@mtu: the new MTU or -1 for no change
+ *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ *	@vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ *		-1 no change
+ *
+ *	Sets Rx properties of a virtual interface.
+ */
+int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
+		    int mtu, int promisc, int all_multi, int bcast, int vlanex,
+		    bool sleep_ok)
+{
+	struct fw_vi_rxmode_cmd cmd;
+
+	/* convert to FW values */
+	if (mtu < 0)
+		mtu = FW_VI_RXMODE_CMD_MTU_M;
+	if (promisc < 0)
+		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
+	if (all_multi < 0)
+		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
+	if (bcast < 0)
+		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
+	if (vlanex < 0)
+		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_WRITE_F |
+				     FW_VI_RXMODE_CMD_VIID_V(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	cmd.mtu_to_vlanexen =
+		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+	return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ *	t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface Identifier
+ *	@free: if true any existing filters for this VI id are first removed
+ *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
+ *	@addr: the MAC address(es)
+ *	@idx: where to store the index of each allocated filter
+ *	@hash: pointer to hash address filter bitmap
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Allocates an exact-match filter for each of the supplied addresses and
+ *	sets it to the corresponding address.  If @idx is not %NULL it should
+ *	have at least @naddr entries, each of which will be set to the index of
+ *	the filter allocated for the corresponding MAC address.  If a filter
+ *	could not be allocated for an address its index is set to 0xffff.
+ *	If @hash is not %NULL addresses that fail to allocate an exact filter
+ *	are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ *	Returns a negative error number or the number of filters allocated.
+ */
+int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
+			unsigned int naddr, const u8 **addr, u16 *idx,
+			u64 *hash, bool sleep_ok)
+{
+	int offset, ret = 0;
+	unsigned nfilters = 0;
+	unsigned int rem = naddr;
+	struct fw_vi_mac_cmd cmd, rpl;
+	unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
+
+	if (naddr > max_naddr)
+		return -EINVAL;
+
+	for (offset = 0; offset < naddr; /**/) {
+		unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+					 ? rem
+					 : ARRAY_SIZE(cmd.u.exact));
+		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+						     u.exact[fw_naddr]), 16);
+		struct fw_vi_mac_exact *p;
+		int i;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+					     FW_CMD_REQUEST_F |
+					     FW_CMD_WRITE_F |
+					     (free ? FW_CMD_EXEC_F : 0) |
+					     FW_VI_MAC_CMD_VIID_V(viid));
+		cmd.freemacs_to_len16 =
+			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+				    FW_CMD_LEN16_V(len16));
+
+		for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+			p->valid_to_idx = cpu_to_be16(
+				FW_VI_MAC_CMD_VALID_F |
+				FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
+			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+		}
+
+
+		ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+					sleep_ok);
+		if (ret && ret != -ENOMEM)
+			break;
+
+		for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+			u16 index = FW_VI_MAC_CMD_IDX_G(
+				be16_to_cpu(p->valid_to_idx));
+
+			if (idx)
+				idx[offset+i] =
+					(index >= max_naddr
+					 ? 0xffff
+					 : index);
+			if (index < max_naddr)
+				nfilters++;
+			else if (hash)
+				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+		}
+
+		free = false;
+		offset += fw_naddr;
+		rem -= fw_naddr;
+	}
+
+	/*
+	 * If there were no errors or we merely ran out of room in our MAC
+	 * address arena, return the number of filters actually written.
+	 */
+	if (ret == 0 || ret == -ENOMEM)
+		ret = nfilters;
+	return ret;
+}
+
+/**
+ *	t4vf_change_mac - modifies the exact-match filter for a MAC address
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface ID
+ *	@idx: index of existing filter for old value of MAC address, or -1
+ *	@addr: the new MAC address value
+ *	@persist: if idx < 0, the new MAC allocation should be persistent
+ *
+ *	Modifies an exact-match filter and sets it to the new MAC address.
+ *	Note that in general it is not possible to modify the value of a given
+ *	filter so the generic way to modify an address filter is to free the
+ *	one being used by the old address value and allocate a new filter for
+ *	the new address value.  @idx can be -1 if the address is a new
+ *	addition.
+ *
+ *	Returns a negative error number or the index of the filter with the new
+ *	MAC value.
+ */
+int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
+		    int idx, const u8 *addr, bool persist)
+{
+	int ret;
+	struct fw_vi_mac_cmd cmd, rpl;
+	struct fw_vi_mac_exact *p = &cmd.u.exact[0];
+	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+					     u.exact[1]), 16);
+	unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
+
+	/*
+	 * If this is a new allocation, determine whether it should be
+	 * persistent (across a "freemacs" operation) or not.
+	 */
+	if (idx < 0)
+		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_WRITE_F |
+				     FW_VI_MAC_CMD_VIID_V(viid));
+	cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
+	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+				      FW_VI_MAC_CMD_IDX_V(idx));
+	memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (ret == 0) {
+		p = &rpl.u.exact[0];
+		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
+		if (ret >= max_mac_addr)
+			ret = -ENOMEM;
+	}
+	return ret;
+}
+
+/**
+ *	t4vf_set_addr_hash - program the MAC inexact-match hash filter
+ *	@adapter: the adapter
+ *	@viid: the Virtual Interface Identifier
+ *	@ucast: whether the hash filter should also match unicast addresses
+ *	@vec: the value to be written to the hash filter
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
+		       bool ucast, u64 vec, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd cmd;
+	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+					     u.exact[0]), 16);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				     FW_CMD_REQUEST_F |
+				     FW_CMD_WRITE_F |
+				     FW_VI_ENABLE_CMD_VIID_V(viid));
+	cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+					    FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+					    FW_CMD_LEN16_V(len16));
+	cmd.u.hash.hashvec = cpu_to_be64(vec);
+	return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ *	t4vf_get_port_stats - collect "port" statistics
+ *	@adapter: the adapter
+ *	@pidx: the port index
+ *	@s: the stats structure to fill
+ *
+ *	Collect statistics for the "port"'s Virtual Interface.
+ */
+int t4vf_get_port_stats(struct adapter *adapter, int pidx,
+			struct t4vf_port_stats *s)
+{
+	struct port_info *pi = adap2pinfo(adapter, pidx);
+	struct fw_vi_stats_vf fwstats;
+	unsigned int rem = VI_VF_NUM_STATS;
+	__be64 *fwsp = (__be64 *)&fwstats;
+
+	/*
+	 * Grab the Virtual Interface statistics a chunk at a time via mailbox
+	 * commands.  We could use a Work Request and get all of them at once
+	 * but that's an asynchronous interface which is awkward to use.
+	 */
+	while (rem) {
+		unsigned int ix = VI_VF_NUM_STATS - rem;
+		unsigned int nstats = min(6U, rem);
+		struct fw_vi_stats_cmd cmd, rpl;
+		size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+			      sizeof(struct fw_vi_stats_ctl));
+		size_t len16 = DIV_ROUND_UP(len, 16);
+		int ret;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
+					     FW_VI_STATS_CMD_VIID_V(pi->viid) |
+					     FW_CMD_REQUEST_F |
+					     FW_CMD_READ_F);
+		cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
+		cmd.u.ctl.nstats_ix =
+			cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
+				    FW_VI_STATS_CMD_NSTATS_V(nstats));
+		ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+		if (ret)
+			return ret;
+
+		memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+		rem -= nstats;
+		fwsp += nstats;
+	}
+
+	/*
+	 * Translate firmware statistics into host native statistics.
+	 */
+	s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
+	s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+	s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
+	s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+	s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
+	s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+	s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
+	s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
+	s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
+
+	s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
+	s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+	s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
+	s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+	s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
+	s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+
+	s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
+
+	return 0;
+}
+
+/**
+ *	t4vf_iq_free - free an ingress queue and its free lists
+ *	@adapter: the adapter
+ *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *	@iqid: ingress queue ID
+ *	@fl0id: FL0 queue ID or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue ID or 0xffff if no attached FL1
+ *
+ *	Frees an ingress queue and its associated free lists, if any.
+ */
+int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
+		 unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
+{
+	struct fw_iq_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_EXEC_F);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
+					 FW_LEN16(cmd));
+	cmd.type_to_iqandstindex =
+		cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+
+	cmd.iqid = cpu_to_be16(iqid);
+	cmd.fl0id = cpu_to_be16(fl0id);
+	cmd.fl1id = cpu_to_be16(fl1id);
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_eth_eq_free - free an Ethernet egress queue
+ *	@adapter: the adapter
+ *	@eqid: egress queue ID
+ *
+ *	Frees an Ethernet egress queue.
+ */
+int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
+{
+	struct fw_eq_eth_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+				    FW_CMD_REQUEST_F |
+				    FW_CMD_EXEC_F);
+	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
+					 FW_LEN16(cmd));
+	cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_handle_fw_rpl - process a firmware reply message
+ *	@adapter: the adapter
+ *	@rpl: start of the firmware message
+ *
+ *	Processes a firmware message, such as link state change messages.
+ */
+int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
+{
+	const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
+	u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
+
+	switch (opcode) {
+	case FW_PORT_CMD: {
+		/*
+		 * Link/module state change message.
+		 */
+		const struct fw_port_cmd *port_cmd =
+			(const struct fw_port_cmd *)rpl;
+		u32 stat, mod;
+		int action, port_id, link_ok, speed, fc, pidx;
+
+		/*
+		 * Extract various fields from port status change message.
+		 */
+		action = FW_PORT_CMD_ACTION_G(
+			be32_to_cpu(port_cmd->action_to_len16));
+		if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+			dev_err(adapter->pdev_dev,
+				"Unknown firmware PORT reply action %x\n",
+				action);
+			break;
+		}
+
+		port_id = FW_PORT_CMD_PORTID_G(
+			be32_to_cpu(port_cmd->op_to_portid));
+
+		stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+		link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+		speed = 0;
+		fc = 0;
+		if (stat & FW_PORT_CMD_RXPAUSE_F)
+			fc |= PAUSE_RX;
+		if (stat & FW_PORT_CMD_TXPAUSE_F)
+			fc |= PAUSE_TX;
+		if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+			speed = 100;
+		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+			speed = 1000;
+		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+			speed = 10000;
+		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+			speed = 40000;
+
+		/*
+		 * Scan all of our "ports" (Virtual Interfaces) looking for
+		 * those bound to the physical port which has changed.  If
+		 * our recorded state doesn't match the current state,
+		 * signal that change to the OS code.
+		 */
+		for_each_port(adapter, pidx) {
+			struct port_info *pi = adap2pinfo(adapter, pidx);
+			struct link_config *lc;
+
+			if (pi->port_id != port_id)
+				continue;
+
+			lc = &pi->link_cfg;
+
+			mod = FW_PORT_CMD_MODTYPE_G(stat);
+			if (mod != pi->mod_type) {
+				pi->mod_type = mod;
+				t4vf_os_portmod_changed(adapter, pidx);
+			}
+
+			if (link_ok != lc->link_ok || speed != lc->speed ||
+			    fc != lc->fc) {
+				/* something changed */
+				lc->link_ok = link_ok;
+				lc->speed = speed;
+				lc->fc = fc;
+				lc->supported =
+					be16_to_cpu(port_cmd->u.info.pcap);
+				t4vf_os_link_changed(adapter, pidx, link_ok);
+			}
+		}
+		break;
+	}
+
+	default:
+		dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
+			opcode);
+	}
+	return 0;
+}
+
+/**
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+	int err;
+	unsigned int chipid;
+
+	/* Wait for the device to become ready before proceeding ...
+	 */
+	err = t4vf_wait_dev_ready(adapter);
+	if (err)
+		return err;
+
+	/* Default port and clock for debugging in case we can't reach
+	 * firmware.
+	 */
+	adapter->params.nports = 1;
+	adapter->params.vfres.pmask = 1;
+	adapter->params.vpd.cclk = 50000;
+
+	adapter->params.chip = 0;
+	switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
+	case CHELSIO_T4:
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+		adapter->params.arch.sge_fl_db = DBPRIO_F;
+		adapter->params.arch.mps_tcam_size =
+				NUM_MPS_CLS_SRAM_L_INSTANCES;
+		break;
+
+	case CHELSIO_T5:
+		chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+		adapter->params.arch.mps_tcam_size =
+				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		break;
+
+	case CHELSIO_T6:
+		chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+		adapter->params.arch.sge_fl_db = 0;
+		adapter->params.arch.mps_tcam_size =
+				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+		break;
+	}
+
+	return 0;
+}